diff -urN oldtree/Documentation/acpi/sony_acpi.txt newtree/Documentation/acpi/sony_acpi.txt
--- oldtree/Documentation/acpi/sony_acpi.txt	1970-01-01 00:00:00.000000000 +0000
+++ newtree/Documentation/acpi/sony_acpi.txt	2006-01-29 11:18:10.805646088 +0000
@@ -0,0 +1,87 @@
+ACPI Sony Notebook Control Driver (SNC) Readme
+----------------------------------------------
+	Copyright (C) 2004- 2005 Stelian Pop <stelian@popies.net>
+
+This mini-driver drives the ACPI SNC device present in the
+ACPI BIOS of the Sony Vaio laptops.
+
+It gives access to some extra laptop functionalities. In
+its current form, this driver is mainly useful for controlling the
+screen brightness, but it may do more in the future.
+
+You should probably start by trying the sonypi driver, and try
+sony_acpi only if sonypi doesn't work for you.
+
+Usage:
+------
+
+Loading the sony_acpi module will create a /proc/acpi/sony/
+directory populated with a couple of files.
+
+You then read/write integer values from/to those files by using
+standard UNIX tools.
+
+The files are:
+	brightness		current screen brightness
+	brightness_default	screen brightness which will be set
+				when the laptop will be rebooted
+	cdpower			power on/off the internal CD drive
+
+Note that some files may be missing if they are not supported
+by your particular laptop model.
+
+Example usage:
+	# echo "1" > /proc/acpi/sony/brightness
+sets the lowest screen brightness,
+	# echo "8" > /proc/acpi/sony/brightness
+sets the highest screen brightness,
+	# cat /proc/acpi/sony/brightness
+retrieves the current screen brightness.
+
+Development:
+------------
+
+If you want to help with the development of this driver (and
+you are not afraid of any side effects doing strange things with
+your ACPI BIOS could have on your laptop), load the driver and
+pass the option 'debug=1'.
+
+REPEAT: DON'T DO THIS IF YOU DON'T LIKE RISKY BUSINESS.
+
+In your kernel logs you will find the list of all ACPI methods
+the SNC device has on your laptop. You can see the GBRT/SBRT methods
+used to get/set the brightness, but there are others.
+
+I HAVE NO IDEA WHAT THOSE METHODS DO.
+
+The sony_acpi driver creates, for some of those methods (the most
+current ones found on several Vaio models), an entry under
+/proc/acpi/sony/, just like the 'brightness' one. You can create
+other entries corresponding to your own laptop methods by further
+editing the source (see the 'sony_acpi_values' table, and add a new
+structure to this table with your get/set method names).
+
+Your mission, should you accept it, is to try finding out what
+those entries are for, by reading/writing random values from/to those
+files and find out what is the impact on your laptop.
+
+Should you find anything interesting, please report it back to me,
+I will not disavow all knowledge of your actions :)
+
+Bugs/Limitations:
+-----------------
+
+* This driver is not based on official documentation from Sony
+  (because there is none), so there is no guarantee this driver
+  will work at all, or do the right thing. Although this hasn't
+  happened to me, this driver could do very bad things to your
+  laptop, including permanent damage.
+
+* The sony_acpi and sonypi drivers do not interact at all. In the
+  future, sonypi could use sony_acpi to do (part of) its business.
+
+* spicctrl, which is the userspace tool used to communicate with the
+  sonypi driver (through /dev/sonypi) does not try to use the
+  sony_acpi driver. In the future, spicctrl could try sonypi first,
+  and if it isn't present, try sony_acpi instead.
+
diff -urN oldtree/Makefile newtree/Makefile
--- oldtree/Makefile	2006-01-28 19:08:42.000000000 +0000
+++ newtree/Makefile	2006-01-29 11:18:52.647285192 +0000
@@ -1,8 +1,8 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 15
-EXTRAVERSION =
-NAME=Sliding Snow Leopard
+EXTRAVERSION = -no2
+NAME=Whirling Whirlpool
 
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
diff -urN oldtree/arch/i386/Kconfig newtree/arch/i386/Kconfig
--- oldtree/arch/i386/Kconfig	2006-01-28 19:08:42.000000000 +0000
+++ newtree/arch/i386/Kconfig	2006-01-29 11:18:10.806645936 +0000
@@ -618,13 +618,10 @@
 	default y
 
 config REGPARM
-	bool "Use register arguments (EXPERIMENTAL)"
-	depends on EXPERIMENTAL
-	default n
+	bool "Use register arguments"
 	help
 	Compile the kernel with -mregparm=3. This uses a different ABI
 	and passes the first three arguments of a function call in registers.
-	This will probably break binary only modules.
 
 	This feature is only enabled for gcc-3.0 and later - earlier compilers
 	generate incorrect output with certain kernel constructs when
diff -urN oldtree/arch/i386/Kconfig.debug newtree/arch/i386/Kconfig.debug
--- oldtree/arch/i386/Kconfig.debug	2006-01-28 19:08:42.000000000 +0000
+++ newtree/arch/i386/Kconfig.debug	2006-01-29 11:18:20.568161960 +0000
@@ -43,14 +43,15 @@
 	  of memory corruptions.
 
 config 4KSTACKS
-	bool "Use 4Kb for kernel stacks instead of 8Kb"
-	depends on DEBUG_KERNEL
+	bool "Use 4Kb + 4Kb for kernel stacks instead of 8Kb" if DEBUG_KERNEL
+	default y
 	help
 	  If you say Y here the kernel will use a 4Kb stacksize for the
 	  kernel stack attached to each process/thread. This facilitates
 	  running more threads on a system and also reduces the pressure
 	  on the VM subsystem for higher order allocations. This option
-	  will also use IRQ stacks to compensate for the reduced stackspace.
+	  will also use separate 4Kb IRQ stacks to compensate for the
+	  reduced stackspace.
 
 config X86_FIND_SMP_CONFIG
 	bool
diff -urN oldtree/arch/i386/Kconfig.debug.orig newtree/arch/i386/Kconfig.debug.orig
--- oldtree/arch/i386/Kconfig.debug.orig	1970-01-01 00:00:00.000000000 +0000
+++ newtree/arch/i386/Kconfig.debug.orig	2006-01-28 19:08:42.000000000 +0000
@@ -0,0 +1,65 @@
+menu "Kernel hacking"
+
+source "lib/Kconfig.debug"
+
+config EARLY_PRINTK
+	bool "Early printk" if EMBEDDED && DEBUG_KERNEL
+	default y
+	help
+	  Write kernel log output directly into the VGA buffer or to a serial
+	  port.
+
+	  This is useful for kernel debugging when your machine crashes very
+	  early before the console code is initialized. For normal operation
+	  it is not recommended because it looks ugly and doesn't cooperate
+	  with klogd/syslogd or the X server. You should normally N here,
+	  unless you want to debug such a crash.
+
+config DEBUG_STACKOVERFLOW
+	bool "Check for stack overflows"
+	depends on DEBUG_KERNEL
+	help
+	  This option will cause messages to be printed if free stack space
+	  drops below a certain limit.
+
+config DEBUG_STACK_USAGE
+	bool "Stack utilization instrumentation"
+	depends on DEBUG_KERNEL
+	help
+	  Enables the display of the minimum amount of free stack which each
+	  task has ever had available in the sysrq-T and sysrq-P debug output.
+
+	  This option will slow down process creation somewhat.
+
+comment "Page alloc debug is incompatible with Software Suspend on i386"
+	depends on DEBUG_KERNEL && SOFTWARE_SUSPEND
+
+config DEBUG_PAGEALLOC
+	bool "Page alloc debugging"
+	depends on DEBUG_KERNEL && !SOFTWARE_SUSPEND
+	help
+	  Unmap pages from the kernel linear mapping after free_pages().
+	  This results in a large slowdown, but helps to find certain types
+	  of memory corruptions.
+
+config 4KSTACKS
+	bool "Use 4Kb for kernel stacks instead of 8Kb"
+	depends on DEBUG_KERNEL
+	help
+	  If you say Y here the kernel will use a 4Kb stacksize for the
+	  kernel stack attached to each process/thread. This facilitates
+	  running more threads on a system and also reduces the pressure
+	  on the VM subsystem for higher order allocations. This option
+	  will also use IRQ stacks to compensate for the reduced stackspace.
+
+config X86_FIND_SMP_CONFIG
+	bool
+	depends on X86_LOCAL_APIC || X86_VOYAGER
+	default y
+
+config X86_MPPARSE
+	bool
+	depends on X86_LOCAL_APIC && !X86_VISWS
+	default y
+
+endmenu
diff -urN oldtree/arch/i386/Makefile newtree/arch/i386/Makefile
--- oldtree/arch/i386/Makefile	2006-01-28 19:08:42.000000000 +0000
+++ newtree/arch/i386/Makefile	2006-01-29 11:18:10.807645784 +0000
@@ -42,9 +42,9 @@
 GCC_VERSION			:= $(call cc-version)
 cflags-$(CONFIG_REGPARM) 	+= $(shell if [ $(GCC_VERSION) -ge 0300 ] ; then echo "-mregparm=3"; fi ;)
 
-# Disable unit-at-a-time mode, it makes gcc use a lot more stack
-# due to the lack of sharing of stacklots.
-CFLAGS += $(call cc-option,-fno-unit-at-a-time)
+# Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use
+# a lot more stack due to the lack of sharing of stacklots:
+CFLAGS				+= $(shell if [ $(call cc-version) -lt 0400 ] ; then echo $(call cc-option,-fno-unit-at-a-time); fi ;)
 
 CFLAGS += $(cflags-y)
 
diff -urN oldtree/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c newtree/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
--- oldtree/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c	2006-01-29 11:18:20.616154664 +0000
@@ -52,6 +52,7 @@
 
 
 static int has_N44_O17_errata[NR_CPUS];
+static int has_N60_errata[NR_CPUS];
 static unsigned int stock_freq;
 static struct cpufreq_driver p4clockmod_driver;
 static unsigned int cpufreq_p4_get(unsigned int cpu);
@@ -226,6 +227,12 @@
 	case 0x0f12:
 		has_N44_O17_errata[policy->cpu] = 1;
 		dprintk("has errata -- disabling low frequencies\n");
+		break;
+
+	case 0x0f29:
+		has_N60_errata[policy->cpu] = 1;
+		dprintk("has errata -- disabling frequencies lower than 2ghz\n");
+		break;
 	}
 	
 	/* get max frequency */
@@ -237,6 +244,8 @@
 	for (i=1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) {
 		if ((i<2) && (has_N44_O17_errata[policy->cpu]))
 			p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID;
+		else if (has_N60_errata[policy->cpu] && p4clockmod_table[i].frequency < 2000000)
+			p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID;
 		else
 			p4clockmod_table[i].frequency = (stock_freq * i)/8;
 	}
diff -urN oldtree/arch/i386/kernel/cpu/cpufreq/powernow-k8.c newtree/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
--- oldtree/arch/i386/kernel/cpu/cpufreq/powernow-k8.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/arch/i386/kernel/cpu/cpufreq/powernow-k8.c	2006-01-29 11:18:20.631152384 +0000
@@ -976,7 +976,7 @@
 }
 
 /* per CPU init entry point to the driver */
-static int __init powernowk8_cpu_init(struct cpufreq_policy *pol)
+static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
 {
 	struct powernow_k8_data *data;
 	cpumask_t oldmask = CPU_MASK_ALL;
@@ -1134,7 +1134,7 @@
 };
 
 /* driver entry point for init */
-static int __init powernowk8_init(void)
+static int __cpuinit powernowk8_init(void)
 {
 	unsigned int i, supported_cpus = 0;
 
diff -urN oldtree/arch/i386/kernel/cpu/cpufreq/powernow-k8.c.orig newtree/arch/i386/kernel/cpu/cpufreq/powernow-k8.c.orig
--- oldtree/arch/i386/kernel/cpu/cpufreq/powernow-k8.c.orig	1970-01-01 00:00:00.000000000 +0000
+++ newtree/arch/i386/kernel/cpu/cpufreq/powernow-k8.c.orig	2006-01-28 19:08:42.000000000 +0000
@@ -0,0 +1,1171 @@
+/*
+ *   (c) 2003, 2004, 2005 Advanced Micro Devices, Inc.
+ *  Your use of this code is subject to the terms and conditions of the
+ *  GNU general public license version 2. See "COPYING" or
+ *  http://www.gnu.org/licenses/gpl.html
+ *
+ *  Support : mark.langsdorf@amd.com
+ *
+ *  Based on the powernow-k7.c module written by Dave Jones.
+ *  (C) 2003 Dave Jones <davej@codemonkey.org.uk> on behalf of SuSE Labs
+ *  (C) 2004 Dominik Brodowski <linux@brodo.de>
+ *  (C) 2004 Pavel Machek <pavel@suse.cz>
+ *  Licensed under the terms of the GNU GPL License version 2.
+ *  Based upon datasheets & sample CPUs kindly provided by AMD.
+ *
+ *  Valuable input gratefully received from Dave Jones, Pavel Machek,
+ *  Dominik Brodowski, and others.
+ *  Originally developed by Paul Devriendt.
+ *  Processor information obtained from Chapter 9 (Power and Thermal Management)
+ *  of the "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
+ *  Opteron Processors" available for download from www.amd.com
+ *
+ *  Tables for specific CPUs can be infrerred from
+ *     http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/30430.pdf
+ */
+
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/cpumask.h>
+#include <linux/sched.h>	/* for current / set_cpus_allowed() */
+
+#include <asm/msr.h>
+#include <asm/io.h>
+#include <asm/delay.h>
+
+#ifdef CONFIG_X86_POWERNOW_K8_ACPI
+#include <linux/acpi.h>
+#include <acpi/processor.h>
+#endif
+
+#define PFX "powernow-k8: "
+#define BFX PFX "BIOS error: "
+#define VERSION "version 1.50.4"
+#include "powernow-k8.h"
+
+/* serialize freq changes  */
+static DECLARE_MUTEX(fidvid_sem);
+
+static struct powernow_k8_data *powernow_data[NR_CPUS];
+
+#ifndef CONFIG_SMP
+static cpumask_t cpu_core_map[1];
+#endif
+
+/* Return a frequency in MHz, given an input fid */
+static u32 find_freq_from_fid(u32 fid)
+{
+	return 800 + (fid * 100);
+}
+
+/* Return a frequency in KHz, given an input fid */
+static u32 find_khz_freq_from_fid(u32 fid)
+{
+	return 1000 * find_freq_from_fid(fid);
+}
+
+/* Return a voltage in miliVolts, given an input vid */
+static u32 find_millivolts_from_vid(struct powernow_k8_data *data, u32 vid)
+{
+	return 1550-vid*25;
+}
+
+/* Return the vco fid for an input fid
+ *
+ * Each "low" fid has corresponding "high" fid, and you can get to "low" fids
+ * only from corresponding high fids. This returns "high" fid corresponding to
+ * "low" one.
+ */
+static u32 convert_fid_to_vco_fid(u32 fid)
+{
+	if (fid < HI_FID_TABLE_BOTTOM) {
+		return 8 + (2 * fid);
+	} else {
+		return fid;
+	}
+}
+
+/*
+ * Return 1 if the pending bit is set. Unless we just instructed the processor
+ * to transition to a new state, seeing this bit set is really bad news.
+ */
+static int pending_bit_stuck(void)
+{
+	u32 lo, hi;
+
+	rdmsr(MSR_FIDVID_STATUS, lo, hi);
+	return lo & MSR_S_LO_CHANGE_PENDING ? 1 : 0;
+}
+
+/*
+ * Update the global current fid / vid values from the status msr.
+ * Returns 1 on error.
+ */
+static int query_current_values_with_pending_wait(struct powernow_k8_data *data)
+{
+	u32 lo, hi;
+	u32 i = 0;
+
+	do {
+		if (i++ > 10000) {
+			dprintk("detected change pending stuck\n");
+			return 1;
+		}
+		rdmsr(MSR_FIDVID_STATUS, lo, hi);
+	} while (lo & MSR_S_LO_CHANGE_PENDING);
+
+	data->currvid = hi & MSR_S_HI_CURRENT_VID;
+	data->currfid = lo & MSR_S_LO_CURRENT_FID;
+
+	return 0;
+}
+
+/* the isochronous relief time */
+static void count_off_irt(struct powernow_k8_data *data)
+{
+	udelay((1 << data->irt) * 10);
+	return;
+}
+
+/* the voltage stabalization time */
+static void count_off_vst(struct powernow_k8_data *data)
+{
+	udelay(data->vstable * VST_UNITS_20US);
+	return;
+}
+
+/* need to init the control msr to a safe value (for each cpu) */
+static void fidvid_msr_init(void)
+{
+	u32 lo, hi;
+	u8 fid, vid;
+
+	rdmsr(MSR_FIDVID_STATUS, lo, hi);
+	vid = hi & MSR_S_HI_CURRENT_VID;
+	fid = lo & MSR_S_LO_CURRENT_FID;
+	lo = fid | (vid << MSR_C_LO_VID_SHIFT);
+	hi = MSR_C_HI_STP_GNT_BENIGN;
+	dprintk("cpu%d, init lo 0x%x, hi 0x%x\n", smp_processor_id(), lo, hi);
+	wrmsr(MSR_FIDVID_CTL, lo, hi);
+}
+
+
+/* write the new fid value along with the other control fields to the msr */
+static int write_new_fid(struct powernow_k8_data *data, u32 fid)
+{
+	u32 lo;
+	u32 savevid = data->currvid;
+	u32 i = 0;
+
+	if ((fid & INVALID_FID_MASK) || (data->currvid & INVALID_VID_MASK)) {
+		printk(KERN_ERR PFX "internal error - overflow on fid write\n");
+		return 1;
+	}
+
+	lo = fid | (data->currvid << MSR_C_LO_VID_SHIFT) | MSR_C_LO_INIT_FID_VID;
+
+	dprintk("writing fid 0x%x, lo 0x%x, hi 0x%x\n",
+		fid, lo, data->plllock * PLL_LOCK_CONVERSION);
+
+	do {
+		wrmsr(MSR_FIDVID_CTL, lo, data->plllock * PLL_LOCK_CONVERSION);
+		if (i++ > 100) {
+			printk(KERN_ERR PFX "internal error - pending bit very stuck - no further pstate changes possible\n");
+			return 1;
+		}			
+	} while (query_current_values_with_pending_wait(data));
+
+	count_off_irt(data);
+
+	if (savevid != data->currvid) {
+		printk(KERN_ERR PFX "vid change on fid trans, old 0x%x, new 0x%x\n",
+		       savevid, data->currvid);
+		return 1;
+	}
+
+	if (fid != data->currfid) {
+		printk(KERN_ERR PFX "fid trans failed, fid 0x%x, curr 0x%x\n", fid,
+		        data->currfid);
+		return 1;
+	}
+
+	return 0;
+}
+
+/* Write a new vid to the hardware */
+static int write_new_vid(struct powernow_k8_data *data, u32 vid)
+{
+	u32 lo;
+	u32 savefid = data->currfid;
+	int i = 0;
+
+	if ((data->currfid & INVALID_FID_MASK) || (vid & INVALID_VID_MASK)) {
+		printk(KERN_ERR PFX "internal error - overflow on vid write\n");
+		return 1;
+	}
+
+	lo = data->currfid | (vid << MSR_C_LO_VID_SHIFT) | MSR_C_LO_INIT_FID_VID;
+
+	dprintk("writing vid 0x%x, lo 0x%x, hi 0x%x\n",
+		vid, lo, STOP_GRANT_5NS);
+
+	do {
+		wrmsr(MSR_FIDVID_CTL, lo, STOP_GRANT_5NS);
+                if (i++ > 100) {
+                        printk(KERN_ERR PFX "internal error - pending bit very stuck - no further pstate changes possible\n");
+                        return 1;
+                }
+	} while (query_current_values_with_pending_wait(data));
+
+	if (savefid != data->currfid) {
+		printk(KERN_ERR PFX "fid changed on vid trans, old 0x%x new 0x%x\n",
+		       savefid, data->currfid);
+		return 1;
+	}
+
+	if (vid != data->currvid) {
+		printk(KERN_ERR PFX "vid trans failed, vid 0x%x, curr 0x%x\n", vid,
+				data->currvid);
+		return 1;
+	}
+
+	return 0;
+}
+
+/*
+ * Reduce the vid by the max of step or reqvid.
+ * Decreasing vid codes represent increasing voltages:
+ * vid of 0 is 1.550V, vid of 0x1e is 0.800V, vid of VID_OFF is off.
+ */
+static int decrease_vid_code_by_step(struct powernow_k8_data *data, u32 reqvid, u32 step)
+{
+	if ((data->currvid - reqvid) > step)
+		reqvid = data->currvid - step;
+
+	if (write_new_vid(data, reqvid))
+		return 1;
+
+	count_off_vst(data);
+
+	return 0;
+}
+
+/* Change the fid and vid, by the 3 phases. */
+static int transition_fid_vid(struct powernow_k8_data *data, u32 reqfid, u32 reqvid)
+{
+	if (core_voltage_pre_transition(data, reqvid))
+		return 1;
+
+	if (core_frequency_transition(data, reqfid))
+		return 1;
+
+	if (core_voltage_post_transition(data, reqvid))
+		return 1;
+
+	if (query_current_values_with_pending_wait(data))
+		return 1;
+
+	if ((reqfid != data->currfid) || (reqvid != data->currvid)) {
+		printk(KERN_ERR PFX "failed (cpu%d): req 0x%x 0x%x, curr 0x%x 0x%x\n",
+				smp_processor_id(),
+				reqfid, reqvid, data->currfid, data->currvid);
+		return 1;
+	}
+
+	dprintk("transitioned (cpu%d): new fid 0x%x, vid 0x%x\n",
+		smp_processor_id(), data->currfid, data->currvid);
+
+	return 0;
+}
+
+/* Phase 1 - core voltage transition ... setup voltage */
+static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 reqvid)
+{
+	u32 rvosteps = data->rvo;
+	u32 savefid = data->currfid;
+	u32 maxvid, lo;
+
+	dprintk("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, reqvid 0x%x, rvo 0x%x\n",
+		smp_processor_id(),
+		data->currfid, data->currvid, reqvid, data->rvo);
+
+	rdmsr(MSR_FIDVID_STATUS, lo, maxvid);
+	maxvid = 0x1f & (maxvid >> 16);
+	dprintk("ph1 maxvid=0x%x\n", maxvid);
+	if (reqvid < maxvid) /* lower numbers are higher voltages */
+		reqvid = maxvid;
+
+	while (data->currvid > reqvid) {
+		dprintk("ph1: curr 0x%x, req vid 0x%x\n",
+			data->currvid, reqvid);
+		if (decrease_vid_code_by_step(data, reqvid, data->vidmvs))
+			return 1;
+	}
+
+	while ((rvosteps > 0) && ((data->rvo + data->currvid) > reqvid)) {
+		if (data->currvid == maxvid) {
+			rvosteps = 0;
+		} else {
+			dprintk("ph1: changing vid for rvo, req 0x%x\n",
+				data->currvid - 1);
+			if (decrease_vid_code_by_step(data, data->currvid - 1, 1))
+				return 1;
+			rvosteps--;
+		}
+	}
+
+	if (query_current_values_with_pending_wait(data))
+		return 1;
+
+	if (savefid != data->currfid) {
+		printk(KERN_ERR PFX "ph1 err, currfid changed 0x%x\n", data->currfid);
+		return 1;
+	}
+
+	dprintk("ph1 complete, currfid 0x%x, currvid 0x%x\n",
+		data->currfid, data->currvid);
+
+	return 0;
+}
+
+/* Phase 2 - core frequency transition */
+static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid)
+{
+	u32 vcoreqfid, vcocurrfid, vcofiddiff, savevid = data->currvid;
+
+	if ((reqfid < HI_FID_TABLE_BOTTOM) && (data->currfid < HI_FID_TABLE_BOTTOM)) {
+		printk(KERN_ERR PFX "ph2: illegal lo-lo transition 0x%x 0x%x\n",
+			reqfid, data->currfid);
+		return 1;
+	}
+
+	if (data->currfid == reqfid) {
+		printk(KERN_ERR PFX "ph2 null fid transition 0x%x\n", data->currfid);
+		return 0;
+	}
+
+	dprintk("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, reqfid 0x%x\n",
+		smp_processor_id(),
+		data->currfid, data->currvid, reqfid);
+
+	vcoreqfid = convert_fid_to_vco_fid(reqfid);
+	vcocurrfid = convert_fid_to_vco_fid(data->currfid);
+	vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid
+	    : vcoreqfid - vcocurrfid;
+
+	while (vcofiddiff > 2) {
+		if (reqfid > data->currfid) {
+			if (data->currfid > LO_FID_TABLE_TOP) {
+				if (write_new_fid(data, data->currfid + 2)) {
+					return 1;
+				}
+			} else {
+				if (write_new_fid
+				    (data, 2 + convert_fid_to_vco_fid(data->currfid))) {
+					return 1;
+				}
+			}
+		} else {
+			if (write_new_fid(data, data->currfid - 2))
+				return 1;
+		}
+
+		vcocurrfid = convert_fid_to_vco_fid(data->currfid);
+		vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid
+		    : vcoreqfid - vcocurrfid;
+	}
+
+	if (write_new_fid(data, reqfid))
+		return 1;
+
+	if (query_current_values_with_pending_wait(data))
+		return 1;
+
+	if (data->currfid != reqfid) {
+		printk(KERN_ERR PFX
+			"ph2: mismatch, failed fid transition, curr 0x%x, req 0x%x\n",
+			data->currfid, reqfid);
+		return 1;
+	}
+
+	if (savevid != data->currvid) {
+		printk(KERN_ERR PFX "ph2: vid changed, save 0x%x, curr 0x%x\n",
+			savevid, data->currvid);
+		return 1;
+	}
+
+	dprintk("ph2 complete, currfid 0x%x, currvid 0x%x\n",
+		data->currfid, data->currvid);
+
+	return 0;
+}
+
+/* Phase 3 - core voltage transition flow ... jump to the final vid. */
+static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvid)
+{
+	u32 savefid = data->currfid;
+	u32 savereqvid = reqvid;
+
+	dprintk("ph3 (cpu%d): starting, currfid 0x%x, currvid 0x%x\n",
+		smp_processor_id(),
+		data->currfid, data->currvid);
+
+	if (reqvid != data->currvid) {
+		if (write_new_vid(data, reqvid))
+			return 1;
+
+		if (savefid != data->currfid) {
+			printk(KERN_ERR PFX
+			       "ph3: bad fid change, save 0x%x, curr 0x%x\n",
+			       savefid, data->currfid);
+			return 1;
+		}
+
+		if (data->currvid != reqvid) {
+			printk(KERN_ERR PFX
+			       "ph3: failed vid transition\n, req 0x%x, curr 0x%x",
+			       reqvid, data->currvid);
+			return 1;
+		}
+	}
+
+	if (query_current_values_with_pending_wait(data))
+		return 1;
+
+	if (savereqvid != data->currvid) {
+		dprintk("ph3 failed, currvid 0x%x\n", data->currvid);
+		return 1;
+	}
+
+	if (savefid != data->currfid) {
+		dprintk("ph3 failed, currfid changed 0x%x\n",
+			data->currfid);
+		return 1;
+	}
+
+	dprintk("ph3 complete, currfid 0x%x, currvid 0x%x\n",
+		data->currfid, data->currvid);
+
+	return 0;
+}
+
+static int check_supported_cpu(unsigned int cpu)
+{
+	cpumask_t oldmask = CPU_MASK_ALL;
+	u32 eax, ebx, ecx, edx;
+	unsigned int rc = 0;
+
+	oldmask = current->cpus_allowed;
+	set_cpus_allowed(current, cpumask_of_cpu(cpu));
+
+	if (smp_processor_id() != cpu) {
+		printk(KERN_ERR "limiting to cpu %u failed\n", cpu);
+		goto out;
+	}
+
+	if (current_cpu_data.x86_vendor != X86_VENDOR_AMD)
+		goto out;
+
+	eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
+	if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) ||
+	    ((eax & CPUID_XFAM) != CPUID_XFAM_K8) ||
+	    ((eax & CPUID_XMOD) > CPUID_XMOD_REV_F)) {
+		printk(KERN_INFO PFX "Processor cpuid %x not supported\n", eax);
+		goto out;
+	}
+
+	eax = cpuid_eax(CPUID_GET_MAX_CAPABILITIES);
+	if (eax < CPUID_FREQ_VOLT_CAPABILITIES) {
+		printk(KERN_INFO PFX
+		       "No frequency change capabilities detected\n");
+		goto out;
+	}
+
+	cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
+	if ((edx & P_STATE_TRANSITION_CAPABLE) != P_STATE_TRANSITION_CAPABLE) {
+		printk(KERN_INFO PFX "Power state transitions not supported\n");
+		goto out;
+	}
+
+	rc = 1;
+
+out:
+	set_cpus_allowed(current, oldmask);
+	return rc;
+}
+
+static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, u8 maxvid)
+{
+	unsigned int j;
+	u8 lastfid = 0xff;
+
+	for (j = 0; j < data->numps; j++) {
+		if (pst[j].vid > LEAST_VID) {
+			printk(KERN_ERR PFX "vid %d invalid : 0x%x\n", j, pst[j].vid);
+			return -EINVAL;
+		}
+		if (pst[j].vid < data->rvo) {	/* vid + rvo >= 0 */
+			printk(KERN_ERR BFX "0 vid exceeded with pstate %d\n", j);
+			return -ENODEV;
+		}
+		if (pst[j].vid < maxvid + data->rvo) {	/* vid + rvo >= maxvid */
+			printk(KERN_ERR BFX "maxvid exceeded with pstate %d\n", j);
+			return -ENODEV;
+		}
+		if ((pst[j].fid > MAX_FID)
+		    || (pst[j].fid & 1)
+		    || (j && (pst[j].fid < HI_FID_TABLE_BOTTOM))) {
+			/* Only first fid is allowed to be in "low" range */
+			printk(KERN_ERR PFX "two low fids - %d : 0x%x\n", j, pst[j].fid);
+			return -EINVAL;
+		}
+		if (pst[j].fid < lastfid)
+			lastfid = pst[j].fid;
+	}
+	if (lastfid & 1) {
+		printk(KERN_ERR PFX "lastfid invalid\n");
+		return -EINVAL;
+	}
+	if (lastfid > LO_FID_TABLE_TOP)
+		printk(KERN_INFO PFX  "first fid not from lo freq table\n");
+
+	return 0;
+}
+
+static void print_basics(struct powernow_k8_data *data)
+{
+	int j;
+	for (j = 0; j < data->numps; j++) {
+		if (data->powernow_table[j].frequency != CPUFREQ_ENTRY_INVALID)
+			printk(KERN_INFO PFX "   %d : fid 0x%x (%d MHz), vid 0x%x (%d mV)\n", j,
+				data->powernow_table[j].index & 0xff,
+				data->powernow_table[j].frequency/1000,
+				data->powernow_table[j].index >> 8,
+				find_millivolts_from_vid(data, data->powernow_table[j].index >> 8));
+	}
+	if (data->batps)
+		printk(KERN_INFO PFX "Only %d pstates on battery\n", data->batps);
+}
+
+static int fill_powernow_table(struct powernow_k8_data *data, struct pst_s *pst, u8 maxvid)
+{
+	struct cpufreq_frequency_table *powernow_table;
+	unsigned int j;
+
+	if (data->batps) {    /* use ACPI support to get full speed on mains power */
+		printk(KERN_WARNING PFX "Only %d pstates usable (use ACPI driver for full range\n", data->batps);
+		data->numps = data->batps;
+	}
+
+	for ( j=1; j<data->numps; j++ ) {
+		if (pst[j-1].fid >= pst[j].fid) {
+			printk(KERN_ERR PFX "PST out of sequence\n");
+			return -EINVAL;
+		}
+	}
+
+	if (data->numps < 2) {
+		printk(KERN_ERR PFX "no p states to transition\n");
+		return -ENODEV;
+	}
+
+	if (check_pst_table(data, pst, maxvid))
+		return -EINVAL;
+
+	powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table)
+		* (data->numps + 1)), GFP_KERNEL);
+	if (!powernow_table) {
+		printk(KERN_ERR PFX "powernow_table memory alloc failure\n");
+		return -ENOMEM;
+	}
+
+	for (j = 0; j < data->numps; j++) {
+		powernow_table[j].index = pst[j].fid; /* lower 8 bits */
+		powernow_table[j].index |= (pst[j].vid << 8); /* upper 8 bits */
+		powernow_table[j].frequency = find_khz_freq_from_fid(pst[j].fid);
+	}
+	powernow_table[data->numps].frequency = CPUFREQ_TABLE_END;
+	powernow_table[data->numps].index = 0;
+
+	if (query_current_values_with_pending_wait(data)) {
+		kfree(powernow_table);
+		return -EIO;
+	}
+
+	dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid);
+	data->powernow_table = powernow_table;
+	print_basics(data);
+
+	for (j = 0; j < data->numps; j++)
+		if ((pst[j].fid==data->currfid) && (pst[j].vid==data->currvid))
+			return 0;
+
+	dprintk("currfid/vid do not match PST, ignoring\n");
+	return 0;
+}
+
+/* Find and validate the PSB/PST table in BIOS. */
+static int find_psb_table(struct powernow_k8_data *data)
+{
+	struct psb_s *psb;
+	unsigned int i;
+	u32 mvs;
+	u8 maxvid;
+	u32 cpst = 0;
+	u32 thiscpuid;
+
+	for (i = 0xc0000; i < 0xffff0; i += 0x10) {
+		/* Scan BIOS looking for the signature. */
+		/* It can not be at ffff0 - it is too big. */
+
+		psb = phys_to_virt(i);
+		if (memcmp(psb, PSB_ID_STRING, PSB_ID_STRING_LEN) != 0)
+			continue;
+
+		dprintk("found PSB header at 0x%p\n", psb);
+
+		dprintk("table vers: 0x%x\n", psb->tableversion);
+		if (psb->tableversion != PSB_VERSION_1_4) {
+			printk(KERN_INFO BFX "PSB table is not v1.4\n");
+			return -ENODEV;
+		}
+
+		dprintk("flags: 0x%x\n", psb->flags1);
+		if (psb->flags1) {
+			printk(KERN_ERR BFX "unknown flags\n");
+			return -ENODEV;
+		}
+
+		data->vstable = psb->vstable;
+		dprintk("voltage stabilization time: %d(*20us)\n", data->vstable);
+
+		dprintk("flags2: 0x%x\n", psb->flags2);
+		data->rvo = psb->flags2 & 3;
+		data->irt = ((psb->flags2) >> 2) & 3;
+		mvs = ((psb->flags2) >> 4) & 3;
+		data->vidmvs = 1 << mvs;
+		data->batps = ((psb->flags2) >> 6) & 3;
+
+		dprintk("ramp voltage offset: %d\n", data->rvo);
+		dprintk("isochronous relief time: %d\n", data->irt);
+		dprintk("maximum voltage step: %d - 0x%x\n", mvs, data->vidmvs);
+
+		dprintk("numpst: 0x%x\n", psb->num_tables);
+		cpst = psb->num_tables;
+		if ((psb->cpuid == 0x00000fc0) || (psb->cpuid == 0x00000fe0) ){
+			thiscpuid = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
+			if ((thiscpuid == 0x00000fc0) || (thiscpuid == 0x00000fe0) ) {
+				cpst = 1;
+			}
+		}
+		if (cpst != 1) {
+			printk(KERN_ERR BFX "numpst must be 1\n");
+			return -ENODEV;
+		}
+
+		data->plllock = psb->plllocktime;
+		dprintk("plllocktime: 0x%x (units 1us)\n", psb->plllocktime);
+		dprintk("maxfid: 0x%x\n", psb->maxfid);
+		dprintk("maxvid: 0x%x\n", psb->maxvid);
+		maxvid = psb->maxvid;
+
+		data->numps = psb->numps;
+		dprintk("numpstates: 0x%x\n", data->numps);
+		return fill_powernow_table(data, (struct pst_s *)(psb+1), maxvid);
+	}
+	/*
+	 * If you see this message, complain to BIOS manufacturer. If
+	 * he tells you "we do not support Linux" or some similar
+	 * nonsense, remember that Windows 2000 uses the same legacy
+	 * mechanism that the old Linux PSB driver uses. Tell them it
+	 * is broken with Windows 2000.
+	 *
+	 * The reference to the AMD documentation is chapter 9 in the
+	 * BIOS and Kernel Developer's Guide, which is available on
+	 * www.amd.com
+	 */
+	printk(KERN_INFO PFX "BIOS error - no PSB or ACPI _PSS objects\n");
+	return -ENODEV;
+}
+
+#ifdef CONFIG_X86_POWERNOW_K8_ACPI
+static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index)
+{
+	if (!data->acpi_data.state_count)
+		return;
+
+	data->irt = (data->acpi_data.states[index].control >> IRT_SHIFT) & IRT_MASK;
+	data->rvo = (data->acpi_data.states[index].control >> RVO_SHIFT) & RVO_MASK;
+	data->exttype = (data->acpi_data.states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK;
+	data->plllock = (data->acpi_data.states[index].control >> PLL_L_SHIFT) & PLL_L_MASK;
+	data->vidmvs = 1 << ((data->acpi_data.states[index].control >> MVS_SHIFT) & MVS_MASK);
+	data->vstable = (data->acpi_data.states[index].control >> VST_SHIFT) & VST_MASK;
+}
+
+static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
+{
+	int i;
+	int cntlofreq = 0;
+	struct cpufreq_frequency_table *powernow_table;
+
+	if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) {
+		dprintk("register performance failed: bad ACPI data\n");
+		return -EIO;
+	}
+
+	/* verify the data contained in the ACPI structures */
+	if (data->acpi_data.state_count <= 1) {
+		dprintk("No ACPI P-States\n");
+		goto err_out;
+	}
+
+	if ((data->acpi_data.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
+		(data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
+		dprintk("Invalid control/status registers (%x - %x)\n",
+			data->acpi_data.control_register.space_id,
+			data->acpi_data.status_register.space_id);
+		goto err_out;
+	}
+
+	/* fill in data->powernow_table */
+	powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table)
+		* (data->acpi_data.state_count + 1)), GFP_KERNEL);
+	if (!powernow_table) {
+		dprintk("powernow_table memory alloc failure\n");
+		goto err_out;
+	}
+
+	for (i = 0; i < data->acpi_data.state_count; i++) {
+		u32 fid;
+		u32 vid;
+
+		if (data->exttype) {
+			fid = data->acpi_data.states[i].status & FID_MASK;
+			vid = (data->acpi_data.states[i].status >> VID_SHIFT) & VID_MASK;
+		} else {
+			fid = data->acpi_data.states[i].control & FID_MASK;
+			vid = (data->acpi_data.states[i].control >> VID_SHIFT) & VID_MASK;
+		}
+
+		dprintk("   %d : fid 0x%x, vid 0x%x\n", i, fid, vid);
+
+		powernow_table[i].index = fid; /* lower 8 bits */
+		powernow_table[i].index |= (vid << 8); /* upper 8 bits */
+		powernow_table[i].frequency = find_khz_freq_from_fid(fid);
+
+		/* verify frequency is OK */
+		if ((powernow_table[i].frequency > (MAX_FREQ * 1000)) ||
+			(powernow_table[i].frequency < (MIN_FREQ * 1000))) {
+			dprintk("invalid freq %u kHz, ignoring\n", powernow_table[i].frequency);
+			powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
+			continue;
+		}
+
+		/* verify voltage is OK - BIOSs are using "off" to indicate invalid */
+		if (vid == VID_OFF) {
+			dprintk("invalid vid %u, ignoring\n", vid);
+			powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
+			continue;
+		}
+
+		/* verify only 1 entry from the lo frequency table */
+		if (fid < HI_FID_TABLE_BOTTOM) {
+			if (cntlofreq) {
+				/* if both entries are the same, ignore this
+				 * one... 
+				 */
+				if ((powernow_table[i].frequency != powernow_table[cntlofreq].frequency) ||
+				    (powernow_table[i].index != powernow_table[cntlofreq].index)) {
+					printk(KERN_ERR PFX "Too many lo freq table entries\n");
+					goto err_out_mem;
+				}
+
+				dprintk("double low frequency table entry, ignoring it.\n");
+				powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
+				continue;
+			} else
+				cntlofreq = i;
+		}
+
+		if (powernow_table[i].frequency != (data->acpi_data.states[i].core_frequency * 1000)) {
+			printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n",
+				powernow_table[i].frequency,
+				(unsigned int) (data->acpi_data.states[i].core_frequency * 1000));
+			powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID;
+			continue;
+		}
+	}
+
+	powernow_table[data->acpi_data.state_count].frequency = CPUFREQ_TABLE_END;
+	powernow_table[data->acpi_data.state_count].index = 0;
+	data->powernow_table = powernow_table;
+
+	/* fill in data */
+	data->numps = data->acpi_data.state_count;
+	print_basics(data);
+	powernow_k8_acpi_pst_values(data, 0);
+
+	/* notify BIOS that we exist */
+	acpi_processor_notify_smm(THIS_MODULE);
+
+	return 0;
+
+err_out_mem:
+	kfree(powernow_table);
+
+err_out:
+	acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
+
+	/* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */
+	data->acpi_data.state_count = 0;
+
+	return -ENODEV;
+}
+
+static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
+{
+	if (data->acpi_data.state_count)
+		acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
+}
+
+#else
+static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; }
+static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; }
+static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; }
+#endif /* CONFIG_X86_POWERNOW_K8_ACPI */
+
+/* Take a frequency, and issue the fid/vid transition command */
+static int transition_frequency(struct powernow_k8_data *data, unsigned int index)
+{
+	u32 fid;
+	u32 vid;
+	int res, i;
+	struct cpufreq_freqs freqs;
+
+	dprintk("cpu %d transition to index %u\n", smp_processor_id(), index);
+
+	/* fid are the lower 8 bits of the index we stored into
+	 * the cpufreq frequency table in find_psb_table, vid are 
+	 * the upper 8 bits.
+	 */
+
+	fid = data->powernow_table[index].index & 0xFF;
+	vid = (data->powernow_table[index].index & 0xFF00) >> 8;
+
+	dprintk("table matched fid 0x%x, giving vid 0x%x\n", fid, vid);
+
+	if (query_current_values_with_pending_wait(data))
+		return 1;
+
+	if ((data->currvid == vid) && (data->currfid == fid)) {
+		dprintk("target matches current values (fid 0x%x, vid 0x%x)\n",
+			fid, vid);
+		return 0;
+	}
+
+	if ((fid < HI_FID_TABLE_BOTTOM) && (data->currfid < HI_FID_TABLE_BOTTOM)) {
+		printk(KERN_ERR PFX
+		       "ignoring illegal change in lo freq table-%x to 0x%x\n",
+		       data->currfid, fid);
+		return 1;
+	}
+
+	dprintk("cpu %d, changing to fid 0x%x, vid 0x%x\n",
+		smp_processor_id(), fid, vid);
+
+	freqs.cpu = data->cpu;
+	freqs.old = find_khz_freq_from_fid(data->currfid);
+	freqs.new = find_khz_freq_from_fid(fid);
+	for_each_cpu_mask(i, cpu_core_map[data->cpu]) {
+		freqs.cpu = i;
+		cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+	}
+
+	res = transition_fid_vid(data, fid, vid);
+
+	freqs.new = find_khz_freq_from_fid(data->currfid);
+	for_each_cpu_mask(i, cpu_core_map[data->cpu]) {
+		freqs.cpu = i;
+		cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+        }
+	return res;
+}
+
+/* Driver entry point to switch to the target frequency */
+static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation)
+{
+	cpumask_t oldmask = CPU_MASK_ALL;
+	struct powernow_k8_data *data = powernow_data[pol->cpu];
+	u32 checkfid = data->currfid;
+	u32 checkvid = data->currvid;
+	unsigned int newstate;
+	int ret = -EIO;
+	int i;
+
+	/* only run on specific CPU from here on */
+	oldmask = current->cpus_allowed;
+	set_cpus_allowed(current, cpumask_of_cpu(pol->cpu));
+
+	if (smp_processor_id() != pol->cpu) {
+		printk(KERN_ERR "limiting to cpu %u failed\n", pol->cpu);
+		goto err_out;
+	}
+
+	if (pending_bit_stuck()) {
+		printk(KERN_ERR PFX "failing targ, change pending bit set\n");
+		goto err_out;
+	}
+
+	dprintk("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n",
+		pol->cpu, targfreq, pol->min, pol->max, relation);
+
+	if (query_current_values_with_pending_wait(data)) {
+		ret = -EIO;
+		goto err_out;
+	}
+
+	dprintk("targ: curr fid 0x%x, vid 0x%x\n",
+		data->currfid, data->currvid);
+
+	if ((checkvid != data->currvid) || (checkfid != data->currfid)) {
+		printk(KERN_INFO PFX
+			"error - out of sync, fix 0x%x 0x%x, vid 0x%x 0x%x\n",
+			checkfid, data->currfid, checkvid, data->currvid);
+	}
+
+	if (cpufreq_frequency_table_target(pol, data->powernow_table, targfreq, relation, &newstate))
+		goto err_out;
+
+	down(&fidvid_sem);
+
+	powernow_k8_acpi_pst_values(data, newstate);
+
+	if (transition_frequency(data, newstate)) {
+		printk(KERN_ERR PFX "transition frequency failed\n");
+		ret = 1;
+		up(&fidvid_sem);
+		goto err_out;
+	}
+
+	/* Update all the fid/vids of our siblings */
+	for_each_cpu_mask(i, cpu_core_map[pol->cpu]) {
+		powernow_data[i]->currvid = data->currvid;
+		powernow_data[i]->currfid = data->currfid;
+	}	
+	up(&fidvid_sem);
+
+	pol->cur = find_khz_freq_from_fid(data->currfid);
+	ret = 0;
+
+err_out:
+	set_cpus_allowed(current, oldmask);
+	return ret;
+}
+
+/* Driver entry point to verify the policy and range of frequencies */
+static int powernowk8_verify(struct cpufreq_policy *pol)
+{
+	struct powernow_k8_data *data = powernow_data[pol->cpu];
+
+	return cpufreq_frequency_table_verify(pol, data->powernow_table);
+}
+
+/* per CPU init entry point to the driver */
+static int __init powernowk8_cpu_init(struct cpufreq_policy *pol)
+{
+	struct powernow_k8_data *data;
+	cpumask_t oldmask = CPU_MASK_ALL;
+	int rc, i;
+
+	if (!check_supported_cpu(pol->cpu))
+		return -ENODEV;
+
+	data = kzalloc(sizeof(struct powernow_k8_data), GFP_KERNEL);
+	if (!data) {
+		printk(KERN_ERR PFX "unable to alloc powernow_k8_data");
+		return -ENOMEM;
+	}
+
+	data->cpu = pol->cpu;
+
+	if (powernow_k8_cpu_init_acpi(data)) {
+		/*
+		 * Use the PSB BIOS structure. This is only availabe on
+		 * an UP version, and is deprecated by AMD.
+		 */
+
+		if ((num_online_cpus() != 1) || (num_possible_cpus() != 1)) {
+			printk(KERN_ERR PFX "MP systems not supported by PSB BIOS structure\n");
+			kfree(data);
+			return -ENODEV;
+		}
+		if (pol->cpu != 0) {
+			printk(KERN_ERR PFX "init not cpu 0\n");
+			kfree(data);
+			return -ENODEV;
+		}
+		rc = find_psb_table(data);
+		if (rc) {
+			kfree(data);
+			return -ENODEV;
+		}
+	}
+
+	/* only run on specific CPU from here on */
+	oldmask = current->cpus_allowed;
+	set_cpus_allowed(current, cpumask_of_cpu(pol->cpu));
+
+	if (smp_processor_id() != pol->cpu) {
+		printk(KERN_ERR "limiting to cpu %u failed\n", pol->cpu);
+		goto err_out;
+	}
+
+	if (pending_bit_stuck()) {
+		printk(KERN_ERR PFX "failing init, change pending bit set\n");
+		goto err_out;
+	}
+
+	if (query_current_values_with_pending_wait(data))
+		goto err_out;
+
+	fidvid_msr_init();
+
+	/* run on any CPU again */
+	set_cpus_allowed(current, oldmask);
+
+	pol->governor = CPUFREQ_DEFAULT_GOVERNOR;
+	pol->cpus = cpu_core_map[pol->cpu];
+
+	/* Take a crude guess here. 
+	 * That guess was in microseconds, so multiply with 1000 */
+	pol->cpuinfo.transition_latency = (((data->rvo + 8) * data->vstable * VST_UNITS_20US)
+	    + (3 * (1 << data->irt) * 10)) * 1000;
+
+	pol->cur = find_khz_freq_from_fid(data->currfid);
+	dprintk("policy current frequency %d kHz\n", pol->cur);
+
+	/* min/max the cpu is capable of */
+	if (cpufreq_frequency_table_cpuinfo(pol, data->powernow_table)) {
+		printk(KERN_ERR PFX "invalid powernow_table\n");
+		powernow_k8_cpu_exit_acpi(data);
+		kfree(data->powernow_table);
+		kfree(data);
+		return -EINVAL;
+	}
+
+	cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
+
+	printk("cpu_init done, current fid 0x%x, vid 0x%x\n",
+	       data->currfid, data->currvid);
+
+	for_each_cpu_mask(i, cpu_core_map[pol->cpu]) {
+		powernow_data[i] = data;
+	}
+
+	return 0;
+
+err_out:
+	set_cpus_allowed(current, oldmask);
+	powernow_k8_cpu_exit_acpi(data);
+
+	kfree(data);
+	return -ENODEV;
+}
+
+static int __devexit powernowk8_cpu_exit (struct cpufreq_policy *pol)
+{
+	struct powernow_k8_data *data = powernow_data[pol->cpu];
+
+	if (!data)
+		return -EINVAL;
+
+	powernow_k8_cpu_exit_acpi(data);
+
+	cpufreq_frequency_table_put_attr(pol->cpu);
+
+	kfree(data->powernow_table);
+	kfree(data);
+
+	return 0;
+}
+
+static unsigned int powernowk8_get (unsigned int cpu)
+{
+	struct powernow_k8_data *data = powernow_data[cpu];
+	cpumask_t oldmask = current->cpus_allowed;
+	unsigned int khz = 0;
+
+	set_cpus_allowed(current, cpumask_of_cpu(cpu));
+	if (smp_processor_id() != cpu) {
+		printk(KERN_ERR PFX "limiting to CPU %d failed in powernowk8_get\n", cpu);
+		set_cpus_allowed(current, oldmask);
+		return 0;
+	}
+
+	if (query_current_values_with_pending_wait(data))
+		goto out;
+
+	khz = find_khz_freq_from_fid(data->currfid);
+
+out:
+	set_cpus_allowed(current, oldmask);
+	return khz;
+}
+
+static struct freq_attr* powernow_k8_attr[] = {
+	&cpufreq_freq_attr_scaling_available_freqs,
+	NULL,
+};
+
+static struct cpufreq_driver cpufreq_amd64_driver = {
+	.verify = powernowk8_verify,
+	.target = powernowk8_target,
+	.init = powernowk8_cpu_init,
+	.exit = __devexit_p(powernowk8_cpu_exit),
+	.get = powernowk8_get,
+	.name = "powernow-k8",
+	.owner = THIS_MODULE,
+	.attr = powernow_k8_attr,
+};
+
+/* driver entry point for init */
+static int __init powernowk8_init(void)
+{
+	unsigned int i, supported_cpus = 0;
+
+	for (i=0; i<NR_CPUS; i++) {
+		if (!cpu_online(i))
+			continue;
+		if (check_supported_cpu(i))
+			supported_cpus++;
+	}
+
+	if (supported_cpus == num_online_cpus()) {
+		printk(KERN_INFO PFX "Found %d AMD Athlon 64 / Opteron processors (" VERSION ")\n",
+			supported_cpus);
+		return cpufreq_register_driver(&cpufreq_amd64_driver);
+	}
+
+	return -ENODEV;
+}
+
+/* driver entry point for term */
+static void __exit powernowk8_exit(void)
+{
+	dprintk("exit\n");
+
+	cpufreq_unregister_driver(&cpufreq_amd64_driver);
+}
+
+MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and Mark Langsdorf <mark.langsdorf@amd.com.");
+MODULE_DESCRIPTION("AMD Athlon 64 and Opteron processor frequency driver.");
+MODULE_LICENSE("GPL");
+
+late_initcall(powernowk8_init);
+module_exit(powernowk8_exit);
+
diff -urN oldtree/arch/i386/lib/usercopy.c newtree/arch/i386/lib/usercopy.c
--- oldtree/arch/i386/lib/usercopy.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/arch/i386/lib/usercopy.c	2006-01-29 11:18:20.621153904 +0000
@@ -425,15 +425,121 @@
 		       : "eax", "edx", "memory");
 	return size;
 }
+
+/*
+ * Non Temporal Hint version of __copy_user_zeroing_intel.  It is cache aware.
+ * hyoshiok@miraclelinux.com
+ */
+
+static unsigned long __copy_user_zeroing_intel_nocache(void *to,
+				const void __user *from, unsigned long size)
+{
+        int d0, d1;
+
+	__asm__ __volatile__(
+	       "        .align 2,0x90\n"
+	       "0:      movl 32(%4), %%eax\n"
+	       "        cmpl $67, %0\n"
+	       "        jbe 2f\n"
+	       "1:      movl 64(%4), %%eax\n"
+	       "        .align 2,0x90\n"
+	       "2:      movl 0(%4), %%eax\n"
+	       "21:     movl 4(%4), %%edx\n"
+	       "        movnti %%eax, 0(%3)\n"
+	       "        movnti %%edx, 4(%3)\n"
+	       "3:      movl 8(%4), %%eax\n"
+	       "31:     movl 12(%4),%%edx\n"
+	       "        movnti %%eax, 8(%3)\n"
+	       "        movnti %%edx, 12(%3)\n"
+	       "4:      movl 16(%4), %%eax\n"
+	       "41:     movl 20(%4), %%edx\n"
+	       "        movnti %%eax, 16(%3)\n"
+	       "        movnti %%edx, 20(%3)\n"
+	       "10:     movl 24(%4), %%eax\n"
+	       "51:     movl 28(%4), %%edx\n"
+	       "        movnti %%eax, 24(%3)\n"
+	       "        movnti %%edx, 28(%3)\n"
+	       "11:     movl 32(%4), %%eax\n"
+	       "61:     movl 36(%4), %%edx\n"
+	       "        movnti %%eax, 32(%3)\n"
+	       "        movnti %%edx, 36(%3)\n"
+	       "12:     movl 40(%4), %%eax\n"
+	       "71:     movl 44(%4), %%edx\n"
+	       "        movnti %%eax, 40(%3)\n"
+	       "        movnti %%edx, 44(%3)\n"
+	       "13:     movl 48(%4), %%eax\n"
+	       "81:     movl 52(%4), %%edx\n"
+	       "        movnti %%eax, 48(%3)\n"
+	       "        movnti %%edx, 52(%3)\n"
+	       "14:     movl 56(%4), %%eax\n"
+	       "91:     movl 60(%4), %%edx\n"
+	       "        movnti %%eax, 56(%3)\n"
+	       "        movnti %%edx, 60(%3)\n"
+	       "        addl $-64, %0\n"
+	       "        addl $64, %4\n"
+	       "        addl $64, %3\n"
+	       "        cmpl $63, %0\n"
+	       "        ja  0b\n"
+	       "        sfence \n"
+	       "5:      movl  %0, %%eax\n"
+	       "        shrl  $2, %0\n"
+	       "        andl $3, %%eax\n"
+	       "        cld\n"
+	       "6:      rep; movsl\n"
+	       "        movl %%eax,%0\n"
+	       "7:      rep; movsb\n"
+	       "8:\n"
+	       ".section .fixup,\"ax\"\n"
+	       "9:      lea 0(%%eax,%0,4),%0\n"
+	       "16:     pushl %0\n"
+	       "        pushl %%eax\n"
+	       "        xorl %%eax,%%eax\n"
+	       "        rep; stosb\n"
+	       "        popl %%eax\n"
+	       "        popl %0\n"
+	       "        jmp 8b\n"
+	       ".previous\n"
+	       ".section __ex_table,\"a\"\n"
+	       "	.align 4\n"
+	       "	.long 0b,16b\n"
+	       "	.long 1b,16b\n"
+	       "	.long 2b,16b\n"
+	       "	.long 21b,16b\n"
+	       "	.long 3b,16b\n"
+	       "	.long 31b,16b\n"
+	       "	.long 4b,16b\n"
+	       "	.long 41b,16b\n"
+	       "	.long 10b,16b\n"
+	       "	.long 51b,16b\n"
+	       "	.long 11b,16b\n"
+	       "	.long 61b,16b\n"
+	       "	.long 12b,16b\n"
+	       "	.long 71b,16b\n"
+	       "	.long 13b,16b\n"
+	       "	.long 81b,16b\n"
+	       "	.long 14b,16b\n"
+	       "	.long 91b,16b\n"
+	       "	.long 6b,9b\n"
+	       "        .long 7b,16b\n"
+	       ".previous"
+	       : "=&c"(size), "=&D" (d0), "=&S" (d1)
+	       :  "1"(to), "2"(from), "0"(size)
+	       : "eax", "edx", "memory");
+	return size;
+}
+
 #else
+
 /*
  * Leave these declared but undefined.  They should not be any references to
  * them
  */
-unsigned long
-__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size);
-unsigned long
-__copy_user_intel(void __user *to, const void *from, unsigned long size);
+unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
+					unsigned long size);
+unsigned long __copy_user_intel(void __user *to, const void *from,
+					unsigned long size);
+unsigned long __copy_user_zeroing_intel_nocache(void *to,
+				const void __user *from, unsigned long size);
 #endif /* CONFIG_X86_INTEL_USERCOPY */
 
 /* Generic arbitrary sized copy.  */
@@ -515,8 +621,8 @@
 		: "memory");						\
 } while (0)
 
-
-unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned long n)
+unsigned long __copy_to_user_ll(void __user *to, const void *from,
+				unsigned long n)
 {
 	BUG_ON((long) n < 0);
 #ifndef CONFIG_X86_WP_WORKS_OK
@@ -576,8 +682,8 @@
 }
 EXPORT_SYMBOL(__copy_to_user_ll);
 
-unsigned long
-__copy_from_user_ll(void *to, const void __user *from, unsigned long n)
+unsigned long __copy_from_user_ll(void *to, const void __user *from,
+					unsigned long n)
 {
 	BUG_ON((long)n < 0);
 	if (movsl_is_ok(to, from, n))
@@ -588,6 +694,21 @@
 }
 EXPORT_SYMBOL(__copy_from_user_ll);
 
+unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
+					unsigned long n)
+{
+	BUG_ON((long)n < 0);
+#ifdef CONFIG_X86_INTEL_USERCOPY
+	if ( n > 64 && cpu_has_xmm2)
+                n = __copy_user_zeroing_intel_nocache(to, from, n);
+	else
+		__copy_user_zeroing(to, from, n);
+#else
+        __copy_user_zeroing(to, from, n);
+#endif
+	return n;
+}
+
 /**
  * copy_to_user: - Copy a block of data into user space.
  * @to:   Destination address, in user space.
diff -urN oldtree/arch/i386/mm/init.c newtree/arch/i386/mm/init.c
--- oldtree/arch/i386/mm/init.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/arch/i386/mm/init.c	2006-01-29 11:18:20.625153296 +0000
@@ -268,7 +268,7 @@
 	pkmap_page_table = pte;	
 }
 
-static void __devinit free_new_highpage(struct page *page)
+static void __meminit free_new_highpage(struct page *page)
 {
 	set_page_count(page, 1);
 	__free_page(page);
diff -urN oldtree/arch/x86_64/lib/Makefile newtree/arch/x86_64/lib/Makefile
--- oldtree/arch/x86_64/lib/Makefile	2006-01-28 19:08:42.000000000 +0000
+++ newtree/arch/x86_64/lib/Makefile	2006-01-29 11:18:20.542165912 +0000
@@ -4,7 +4,7 @@
 
 CFLAGS_csum-partial.o := -funroll-loops
 
-obj-y := io.o
+obj-y := io.o iomap_copy.o
 
 lib-y := csum-partial.o csum-copy.o csum-wrappers.o delay.o \
 	usercopy.o getuser.o putuser.o  \
diff -urN oldtree/arch/x86_64/lib/iomap_copy.S newtree/arch/x86_64/lib/iomap_copy.S
--- oldtree/arch/x86_64/lib/iomap_copy.S	1970-01-01 00:00:00.000000000 +0000
+++ newtree/arch/x86_64/lib/iomap_copy.S	2006-01-29 11:18:20.542165912 +0000
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2006 PathScale, Inc.  All Rights Reserved.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/*
+ * override generic version in lib/iomap_copy.c
+ */
+ 	.globl __iowrite32_copy
+	.p2align 4
+__iowrite32_copy:
+	movl %edx,%ecx
+	rep movsd
+	ret
diff -urN oldtree/block/ll_rw_blk.c newtree/block/ll_rw_blk.c
--- oldtree/block/ll_rw_blk.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/block/ll_rw_blk.c	2006-01-29 11:18:10.808645632 +0000
@@ -1472,6 +1472,7 @@
  **/
 void generic_unplug_device(request_queue_t *q)
 {
+	might_sleep();
 	spin_lock_irq(q->queue_lock);
 	__generic_unplug_device(q);
 	spin_unlock_irq(q->queue_lock);
@@ -2829,7 +2830,7 @@
  * bi_sector for remaps as it sees fit.  So the values of these fields
  * should NOT be depended on after the call to generic_make_request.
  */
-void generic_make_request(struct bio *bio)
+static inline void __generic_make_request(struct bio *bio)
 {
 	request_queue_t *q;
 	sector_t maxsector;
@@ -2896,6 +2897,57 @@
 	} while (ret);
 }
 
+/*
+ * We only want one ->make_request_fn to be active at a time,
+ * else stack usage with stacked devices could be a problem.
+ * So use current->bio_{list,tail} to keep a list of requests
+ * submited by a make_request_fn function.
+ * current->bio_tail is also used as a flag to say if
+ * generic_make_request is currently active in this task or not.
+ * If it is NULL, then no make_request is active.  If it is non-NULL,
+ * then a make_request is active, and new requests should be added
+ * at the tail
+ */
+void generic_make_request(struct bio *bio)
+{
+	if (current->bio_tail) {
+		/* make_request is active */
+		*(current->bio_tail) = bio;
+		bio->bi_next = NULL;
+		current->bio_tail = &bio->bi_next;
+		return;
+	}
+	/* following loop may be a bit non-obvious, and so deserves some
+	 * explanation.
+	 * Before entering the loop, bio->bi_next is NULL (as all callers
+	 * ensure that) so we have a list with a single bio.
+	 * We pretend that we have just taken it off a longer list, so
+	 * we assign bio_list to the next (which is NULL) and bio_tail
+	 * to &bio_list, thus initialising the bio_list of new bios to be
+	 * added.  __generic_make_request may indeed add some more bios
+	 * through a recursive call to generic_make_request.  If it
+	 * did, we find a non-NULL value in bio_list and re-enter the loop
+	 * from the top.  In this case we really did just take the bio
+	 * of the top of the list (no pretending) and so fixup bio_list and
+	 * bio_tail or bi_next, and call into __generic_make_request again.
+	 *
+	 * The loop was structured like this to make only one call to
+	 * __generic_make_request (which is important as it is large and
+	 * inlined) and to keep the structure simple.
+	 */
+	BUG_ON(bio->bi_next);
+	do {
+		current->bio_list = bio->bi_next;
+		if (bio->bi_next == NULL)
+			current->bio_tail = &current->bio_list;
+		else
+			bio->bi_next = NULL;
+		__generic_make_request(bio);
+		bio = current->bio_list;
+	} while (bio);
+	current->bio_tail = NULL; /* deactivate */
+}
+
 EXPORT_SYMBOL(generic_make_request);
 
 /**
diff -urN oldtree/drivers/acpi/Kconfig newtree/drivers/acpi/Kconfig
--- oldtree/drivers/acpi/Kconfig	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/acpi/Kconfig	2006-01-29 11:18:10.809645480 +0000
@@ -233,6 +233,19 @@
 	  If you have a legacy free Toshiba laptop (such as the Libretto L1
 	  series), say Y.
 
+config ACPI_SONY
+	tristate "Sony Laptop Extras"
+	depends on X86 && ACPI
+	  ---help---
+	  This mini-driver drives the ACPI SNC device present in the
+	  ACPI BIOS of the Sony Vaio laptops.
+
+	  It gives access to some extra laptop functionalities. In
+	  its current form, the only thing this driver does is letting
+	  the user set or query the screen brightness.
+
+	  Read <file:Documentation/acpi/sony_acpi.txt> for more information.
+
 config ACPI_CUSTOM_DSDT
 	bool "Include Custom DSDT"
 	depends on !STANDALONE
diff -urN oldtree/drivers/acpi/Makefile newtree/drivers/acpi/Makefile
--- oldtree/drivers/acpi/Makefile	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/acpi/Makefile	2006-01-29 11:18:10.810645328 +0000
@@ -55,5 +55,6 @@
 obj-$(CONFIG_ACPI_ASUS)		+= asus_acpi.o
 obj-$(CONFIG_ACPI_IBM)		+= ibm_acpi.o
 obj-$(CONFIG_ACPI_TOSHIBA)	+= toshiba_acpi.o
+obj-$(CONFIG_ACPI_SONY)		+= sony_acpi.o
 obj-y				+= scan.o motherboard.o
 obj-$(CONFIG_ACPI_HOTPLUG_MEMORY)	+= acpi_memhotplug.o
diff -urN oldtree/drivers/acpi/asus_acpi.c newtree/drivers/acpi/asus_acpi.c
--- oldtree/drivers/acpi/asus_acpi.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/acpi/asus_acpi.c	2006-01-29 11:18:20.543165760 +0000
@@ -490,13 +490,13 @@
  */
 
 /* Generic LED functions */
-static int read_led(const char *ledname, int ledmask)
+static int read_led(const char *ledname, int ledmask, int invert)
 {
 	if (ledname) {
 		int led_status;
 
 		if (read_acpi_int(NULL, ledname, &led_status))
-			return led_status;
+			return (invert) ? !led_status : led_status;
 		else
 			printk(KERN_WARNING "Asus ACPI: Error reading LED "
 			       "status\n");
@@ -552,7 +552,7 @@
 	       void *data)
 {
 	return sprintf(page, "%d\n",
-		       read_led(hotk->methods->mled_status, MLED_ON));
+		       read_led(hotk->methods->mled_status, MLED_ON, 0));
 }
 
 static int
@@ -570,7 +570,7 @@
 	       void *data)
 {
 	return sprintf(page, "%d\n",
-		       read_led(hotk->methods->wled_status, WLED_ON));
+		       read_led(hotk->methods->wled_status, WLED_ON, 1));
 }
 
 static int
@@ -588,7 +588,7 @@
 	       void *data)
 {
 	return sprintf(page, "%d\n",
-		       read_led(hotk->methods->tled_status, TLED_ON));
+		       read_led(hotk->methods->tled_status, TLED_ON, 0));
 }
 
 static int
diff -urN oldtree/drivers/acpi/sony_acpi.c newtree/drivers/acpi/sony_acpi.c
--- oldtree/drivers/acpi/sony_acpi.c	1970-01-01 00:00:00.000000000 +0000
+++ newtree/drivers/acpi/sony_acpi.c	2006-01-29 11:18:10.811645176 +0000
@@ -0,0 +1,392 @@
+/*
+ * ACPI Sony Notebook Control Driver (SNC)
+ *
+ * Copyright (C) 2004-2005 Stelian Pop <stelian@popies.net>
+ *
+ * Parts of this driver inspired from asus_acpi.c and ibm_acpi.c
+ * which are copyrighted by their respective authors.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <acpi/acpi_drivers.h>
+#include <acpi/acpi_bus.h>
+#include <asm/uaccess.h>
+
+#define ACPI_SNC_CLASS		"sony"
+#define ACPI_SNC_HID		"SNY5001"
+#define ACPI_SNC_DRIVER_NAME	"ACPI Sony Notebook Control Driver v0.2"
+
+#define LOG_PFX			KERN_WARNING "sony_acpi: "
+
+MODULE_AUTHOR("Stelian Pop");
+MODULE_DESCRIPTION(ACPI_SNC_DRIVER_NAME);
+MODULE_LICENSE("GPL");
+
+static int debug;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "set this to 1 (and RTFM) if you want to help "
+			"the development of this driver");
+
+static int sony_acpi_add (struct acpi_device *device);
+static int sony_acpi_remove (struct acpi_device *device, int type);
+
+static struct acpi_driver sony_acpi_driver = {
+	.name	= ACPI_SNC_DRIVER_NAME,
+	.class	= ACPI_SNC_CLASS,
+	.ids	= ACPI_SNC_HID,
+	.ops	= {
+			.add	= sony_acpi_add,
+			.remove	= sony_acpi_remove,
+		  },
+};
+
+static acpi_handle sony_acpi_handle;
+static struct proc_dir_entry *sony_acpi_dir;
+
+static struct sony_acpi_value {
+	char			*name;	 /* name of the entry */
+	struct proc_dir_entry 	*proc;	 /* /proc entry */
+	char			*acpiget;/* name of the ACPI get function */
+	char			*acpiset;/* name of the ACPI get function */
+	int 			min;	 /* minimum allowed value or -1 */
+	int			max;	 /* maximum allowed value or -1 */
+	int			debug;	 /* active only in debug mode ? */
+} sony_acpi_values[] = {
+	{
+		.name		= "brightness",
+		.acpiget	= "GBRT",
+		.acpiset	= "SBRT",
+		.min		= 1,
+		.max		= 8,
+		.debug		= 0,
+	},
+	{
+		.name		= "brightness_default",
+		.acpiget	= "GPBR",
+		.acpiset	= "SPBR",
+		.min		= 1,
+		.max		= 8,
+		.debug		= 0,
+	},
+	{
+		.name		= "cdpower",
+		.acpiget	= "GCDP",
+		.acpiset	= "SCDP",
+		.min		= -1,
+		.max		= -1,
+		.debug		= 0,
+	},
+	{
+		.name		= "PID",
+		.acpiget	= "GPID",
+		.debug		= 1,
+	},
+	{
+		.name		= "CTR",
+		.acpiget	= "GCTR",
+		.acpiset	= "SCTR",
+		.min		= -1,
+		.max		= -1,
+		.debug		= 1,
+	},
+	{
+		.name		= "PCR",
+		.acpiget	= "GPCR",
+		.acpiset	= "SPCR",
+		.min		= -1,
+		.max		= -1,
+		.debug		= 1,
+	},
+	{
+		.name		= "CMI",
+		.acpiget	= "GCMI",
+		.acpiset	= "SCMI",
+		.min		= -1,
+		.max		= -1,
+		.debug		= 1,
+	},
+	{
+		.name		= NULL,
+	}
+};
+
+static int acpi_callgetfunc(acpi_handle handle, char *name, int *result)
+{
+	struct acpi_buffer output;
+	union acpi_object out_obj;
+	acpi_status status;
+
+	output.length = sizeof(out_obj);
+	output.pointer = &out_obj;
+
+	status = acpi_evaluate_object(handle, name, NULL, &output);
+	if ((status == AE_OK) && (out_obj.type == ACPI_TYPE_INTEGER)) {
+		*result = out_obj.integer.value;
+		return 0;
+	}
+
+	printk(LOG_PFX "acpi_callreadfunc failed\n");
+
+	return -1;
+}
+
+static int acpi_callsetfunc(acpi_handle handle, char *name, int value,
+			    int *result)
+{
+	struct acpi_object_list params;
+	union acpi_object in_obj;
+	struct acpi_buffer output;
+	union acpi_object out_obj;
+	acpi_status status;
+
+	params.count = 1;
+	params.pointer = &in_obj;
+	in_obj.type = ACPI_TYPE_INTEGER;
+	in_obj.integer.value = value;
+
+	output.length = sizeof(out_obj);
+	output.pointer = &out_obj;
+
+	status = acpi_evaluate_object(handle, name, &params, &output);
+	if (status == AE_OK) {
+		if (result != NULL) {
+			if (out_obj.type != ACPI_TYPE_INTEGER) {
+				printk(LOG_PFX "acpi_evaluate_object bad "
+				       "return type\n");
+				return -1;
+			}
+			*result = out_obj.integer.value;
+		}
+		return 0;
+	}
+
+	printk(LOG_PFX "acpi_evaluate_object failed\n");
+
+	return -1;
+}
+
+static int parse_buffer(const char __user *buffer, unsigned long count,
+			int *val) {
+	char s[32];
+	int ret;
+
+	if (count > 31)
+		return -EINVAL;
+	if (copy_from_user(s, buffer, count))
+		return -EFAULT;
+	s[count] = '\0';
+	ret = simple_strtoul(s, NULL, 10);
+	*val = ret;
+	return 0;
+}
+
+static int sony_acpi_read(char* page, char** start, off_t off, int count,
+			  int* eof, void *data)
+{
+	struct sony_acpi_value *item = data;
+	int value;
+
+	if (!item->acpiget)
+		return -EIO;
+
+	if (acpi_callgetfunc(sony_acpi_handle, item->acpiget, &value) < 0)
+		return -EIO;
+
+	return sprintf(page, "%d\n", value);
+}
+
+static int sony_acpi_write(struct file *file, const char __user *buffer,
+			   unsigned long count, void *data)
+{
+	struct sony_acpi_value *item = data;
+	int result;
+	int value;
+
+	if (!item->acpiset)
+		return -EIO;
+
+	if ((result = parse_buffer(buffer, count, &value)) < 0)
+		return result;
+
+	if (item->min != -1 && value < item->min)
+		return -EINVAL;
+	if (item->max != -1 && value > item->max)
+		return -EINVAL;
+
+	if (acpi_callsetfunc(sony_acpi_handle, item->acpiset, value, NULL) < 0)
+		return -EIO;
+
+	return count;
+}
+
+static void sony_acpi_notify(acpi_handle handle, u32 event, void *data)
+{
+	printk(LOG_PFX "sony_acpi_notify\n");
+}
+
+static acpi_status sony_walk_callback(acpi_handle handle, u32 level,
+				      void *context, void **return_value)
+{
+	struct acpi_namespace_node *node;
+	union acpi_operand_object *operand;
+
+	node = (struct acpi_namespace_node *) handle;
+	operand = (union acpi_operand_object *) node->object;
+
+	printk(LOG_PFX "method: name: %4.4s, args %X\n", node->name.ascii,
+	       (u32) operand->method.param_count);
+
+	return AE_OK;
+}
+
+static int __init sony_acpi_add(struct acpi_device *device)
+{
+	acpi_status status;
+	int result;
+	struct sony_acpi_value *item;
+
+	sony_acpi_handle = device->handle;
+
+	acpi_driver_data(device) = NULL;
+	acpi_device_dir(device) = sony_acpi_dir;
+
+	if (debug) {
+		status = acpi_walk_namespace(ACPI_TYPE_METHOD, sony_acpi_handle,
+					     1, sony_walk_callback, NULL, NULL);
+		if (ACPI_FAILURE(status)) {
+			printk(LOG_PFX "unable to walk acpi resources\n");
+			result = -ENODEV;
+			goto outwalk;
+		}
+
+		status = acpi_install_notify_handler(sony_acpi_handle,
+						     ACPI_DEVICE_NOTIFY,
+						     sony_acpi_notify,
+						     NULL);
+		if (ACPI_FAILURE(status)) {
+			printk(LOG_PFX "unable to install notify handler\n");
+			result = -ENODEV;
+			goto outnotify;
+		}
+	}
+
+	for (item = sony_acpi_values; item->name; ++item) {
+		acpi_handle handle;
+
+		if (!debug && item->debug)
+			continue;
+
+		if (item->acpiget &&
+		    ACPI_FAILURE(acpi_get_handle(sony_acpi_handle,
+		    		 item->acpiget, &handle)))
+		    	continue;
+
+		if (item->acpiset &&
+		    ACPI_FAILURE(acpi_get_handle(sony_acpi_handle,
+		    		 item->acpiset, &handle)))
+		    	continue;
+
+		item->proc = create_proc_entry(item->name, 0600,
+					       acpi_device_dir(device));
+		if (!item->proc) {
+			printk(LOG_PFX "unable to create proc entry\n");
+			result = -EIO;
+			goto outproc;
+		}
+
+		item->proc->read_proc = sony_acpi_read;
+		item->proc->write_proc = sony_acpi_write;
+		item->proc->data = item;
+		item->proc->owner = THIS_MODULE;
+	}
+
+	printk(KERN_INFO ACPI_SNC_DRIVER_NAME " successfully installed\n");
+
+	return 0;
+
+outproc:
+	if (debug) {
+		status = acpi_remove_notify_handler(sony_acpi_handle,
+						    ACPI_DEVICE_NOTIFY,
+						    sony_acpi_notify);
+		if (ACPI_FAILURE(status))
+			printk(LOG_PFX "unable to remove notify handler\n");
+	}
+outnotify:
+	for (item = sony_acpi_values; item->name; ++item)
+		if (item->proc)
+			remove_proc_entry(item->name, acpi_device_dir(device));
+outwalk:
+	return result;
+}
+
+
+static int __exit sony_acpi_remove(struct acpi_device *device, int type)
+{
+	acpi_status status;
+	struct sony_acpi_value *item;
+
+	if (debug) {
+		status = acpi_remove_notify_handler(sony_acpi_handle,
+						    ACPI_DEVICE_NOTIFY,
+						    sony_acpi_notify);
+		if (ACPI_FAILURE(status))
+			printk(LOG_PFX "unable to remove notify handler\n");
+	}
+
+	for (item = sony_acpi_values; item->name; ++item)
+		if (item->proc)
+			remove_proc_entry(item->name, acpi_device_dir(device));
+
+	printk(KERN_INFO ACPI_SNC_DRIVER_NAME " successfully removed\n");
+
+	return 0;
+}
+
+static int __init sony_acpi_init(void)
+{
+	int result;
+
+	sony_acpi_dir = proc_mkdir("sony", acpi_root_dir);
+	if (!sony_acpi_dir) {
+		printk(LOG_PFX "unable to create /proc entry\n");
+		return -ENODEV;
+	}
+	sony_acpi_dir->owner = THIS_MODULE;
+
+	result = acpi_bus_register_driver(&sony_acpi_driver);
+	if (result < 0) {
+		remove_proc_entry("sony", acpi_root_dir);
+		return -ENODEV;
+	}
+	return 0;
+}
+
+
+static void __exit sony_acpi_exit(void)
+{
+	acpi_bus_unregister_driver(&sony_acpi_driver);
+	remove_proc_entry("sony", acpi_root_dir);
+}
+
+module_init(sony_acpi_init);
+module_exit(sony_acpi_exit);
diff -urN oldtree/drivers/base/power/suspend.c newtree/drivers/base/power/suspend.c
--- oldtree/drivers/base/power/suspend.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/base/power/suspend.c	2006-01-29 11:18:20.546165304 +0000
@@ -92,6 +92,8 @@
 		get_device(dev);
 		up(&dpm_list_sem);
 
+		printk("Suspending device %s\n", kobject_name(&dev->kobj));
+
 		error = suspend_device(dev, state);
 
 		down(&dpm_list_sem);
diff -urN oldtree/drivers/cdrom/cdrom.c newtree/drivers/cdrom/cdrom.c
--- oldtree/drivers/cdrom/cdrom.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/cdrom/cdrom.c	2006-01-29 11:18:20.571161504 +0000
@@ -407,7 +407,6 @@
 	ENSURE(get_mcn, CDC_MCN);
 	ENSURE(reset, CDC_RESET);
 	ENSURE(audio_ioctl, CDC_PLAY_AUDIO);
-	ENSURE(dev_ioctl, CDC_IOCTLS);
 	ENSURE(generic_packet, CDC_GENERIC_PACKET);
 	cdi->mc_flags = 0;
 	cdo->n_minors = 0;
@@ -2196,395 +2195,586 @@
 	return cdrom_read_cdda_old(cdi, ubuf, lba, nframes);	
 }
 
-/* Just about every imaginable ioctl is supported in the Uniform layer
- * these days. ATAPI / SCSI specific code now mainly resides in
- * mmc_ioct().
- */
-int cdrom_ioctl(struct file * file, struct cdrom_device_info *cdi,
-		struct inode *ip, unsigned int cmd, unsigned long arg)
+static int cdrom_ioctl_multisession(struct cdrom_device_info *cdi,
+		void __user *argp)
 {
-	struct cdrom_device_ops *cdo = cdi->ops;
+	struct cdrom_multisession ms_info;
+	u8 requested_format;
 	int ret;
 
-	/* Try the generic SCSI command ioctl's first.. */
-	ret = scsi_cmd_ioctl(file, ip->i_bdev->bd_disk, cmd, (void __user *)arg);
-	if (ret != -ENOTTY)
+	cdinfo(CD_DO_IOCTL, "entering CDROMMULTISESSION\n");
+
+	if (!(cdi->ops->capability & CDC_MULTI_SESSION))
+		return -ENOSYS;
+
+	if (copy_from_user(&ms_info, argp, sizeof(ms_info)))
+		return -EFAULT;
+
+	requested_format = ms_info.addr_format;
+	if (requested_format != CDROM_MSF && requested_format != CDROM_LBA)
+		return -EINVAL;
+	ms_info.addr_format = CDROM_LBA;
+
+	ret = cdi->ops->get_last_session(cdi, &ms_info);
+	if (ret)
 		return ret;
 
-	/* the first few commands do not deal with audio drive_info, but
-	   only with routines in cdrom device operations. */
-	switch (cmd) {
-	case CDROMMULTISESSION: {
-		struct cdrom_multisession ms_info;
-		u_char requested_format;
-		cdinfo(CD_DO_IOCTL, "entering CDROMMULTISESSION\n"); 
-                if (!(cdo->capability & CDC_MULTI_SESSION))
-                        return -ENOSYS;
-		IOCTL_IN(arg, struct cdrom_multisession, ms_info);
-		requested_format = ms_info.addr_format;
-		if (!((requested_format == CDROM_MSF) ||
-			(requested_format == CDROM_LBA)))
-				return -EINVAL;
-		ms_info.addr_format = CDROM_LBA;
-		if ((ret=cdo->get_last_session(cdi, &ms_info)))
+	sanitize_format(&ms_info.addr, &ms_info.addr_format, requested_format);
+
+	if (copy_to_user(argp, &ms_info, sizeof(ms_info)))
+		return -EFAULT;
+
+	cdinfo(CD_DO_IOCTL, "CDROMMULTISESSION successful\n");
+	return 0;
+}
+
+static int cdrom_ioctl_eject(struct cdrom_device_info *cdi)
+{
+	cdinfo(CD_DO_IOCTL, "entering CDROMEJECT\n");
+
+	if (!CDROM_CAN(CDC_OPEN_TRAY))
+		return -ENOSYS;
+	if (cdi->use_count != 1 || keeplocked)
+		return -EBUSY;
+	if (CDROM_CAN(CDC_LOCK)) {
+		int ret = cdi->ops->lock_door(cdi, 0);
+		if (ret)
 			return ret;
-		sanitize_format(&ms_info.addr, &ms_info.addr_format,
-				requested_format);
-		IOCTL_OUT(arg, struct cdrom_multisession, ms_info);
-		cdinfo(CD_DO_IOCTL, "CDROMMULTISESSION successful\n"); 
-		return 0;
-		}
+	}
 
-	case CDROMEJECT: {
-		cdinfo(CD_DO_IOCTL, "entering CDROMEJECT\n"); 
-		if (!CDROM_CAN(CDC_OPEN_TRAY))
-			return -ENOSYS;
-		if (cdi->use_count != 1 || keeplocked)
-			return -EBUSY;
-		if (CDROM_CAN(CDC_LOCK))
-			if ((ret=cdo->lock_door(cdi, 0)))
-				return ret;
+	return cdi->ops->tray_move(cdi, 1);
+}
 
-		return cdo->tray_move(cdi, 1);
-		}
+static int cdrom_ioctl_closetray(struct cdrom_device_info *cdi)
+{
+	cdinfo(CD_DO_IOCTL, "entering CDROMCLOSETRAY\n");
 
-	case CDROMCLOSETRAY: {
-		cdinfo(CD_DO_IOCTL, "entering CDROMCLOSETRAY\n"); 
-		if (!CDROM_CAN(CDC_CLOSE_TRAY))
-			return -ENOSYS;
-		return cdo->tray_move(cdi, 0);
-		}
+	if (!CDROM_CAN(CDC_CLOSE_TRAY))
+		return -ENOSYS;
+	return cdi->ops->tray_move(cdi, 0);
+}
 
-	case CDROMEJECT_SW: {
-		cdinfo(CD_DO_IOCTL, "entering CDROMEJECT_SW\n"); 
-		if (!CDROM_CAN(CDC_OPEN_TRAY))
-			return -ENOSYS;
-		if (keeplocked)
-			return -EBUSY;
-		cdi->options &= ~(CDO_AUTO_CLOSE | CDO_AUTO_EJECT);
-		if (arg)
-			cdi->options |= CDO_AUTO_CLOSE | CDO_AUTO_EJECT;
-		return 0;
-		}
+static int cdrom_ioctl_eject_sw(struct cdrom_device_info *cdi,
+		unsigned long arg)
+{
+	cdinfo(CD_DO_IOCTL, "entering CDROMEJECT_SW\n");
 
-	case CDROM_MEDIA_CHANGED: {
-		struct cdrom_changer_info *info;
-		int changed;
+	if (!CDROM_CAN(CDC_OPEN_TRAY))
+		return -ENOSYS;
+	if (keeplocked)
+		return -EBUSY;
 
-		cdinfo(CD_DO_IOCTL, "entering CDROM_MEDIA_CHANGED\n"); 
-		if (!CDROM_CAN(CDC_MEDIA_CHANGED))
+	cdi->options &= ~(CDO_AUTO_CLOSE | CDO_AUTO_EJECT);
+	if (arg)
+		cdi->options |= CDO_AUTO_CLOSE | CDO_AUTO_EJECT;
+	return 0;
+}
+
+static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
+		unsigned long arg)
+{
+	struct cdrom_changer_info *info;
+	int ret;
+
+	cdinfo(CD_DO_IOCTL, "entering CDROM_MEDIA_CHANGED\n");
+
+	if (!CDROM_CAN(CDC_MEDIA_CHANGED))
+		return -ENOSYS;
+
+	/* cannot select disc or select current disc */
+	if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT)
+		return media_changed(cdi, 1);
+
+	if ((unsigned int)arg >= cdi->capacity)
+		return -EINVAL;
+
+	info = kmalloc(sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	ret = cdrom_read_mech_status(cdi, info);
+	if (!ret)
+		ret = info->slots[arg].change;
+	kfree(info);
+	return ret;
+}
+
+static int cdrom_ioctl_set_options(struct cdrom_device_info *cdi,
+		unsigned long arg)
+{
+	cdinfo(CD_DO_IOCTL, "entering CDROM_SET_OPTIONS\n");
+
+	/*
+	 * Options need to be in sync with capability.
+	 * Too late for that, so we have to check each one separately.
+	 */
+	switch (arg) {
+	case CDO_USE_FFLAGS:
+	case CDO_CHECK_TYPE:
+		break;
+	case CDO_LOCK:
+		if (!CDROM_CAN(CDC_LOCK))
+			return -ENOSYS;
+		break;
+	case 0:
+		return cdi->options;
+	/* default is basically CDO_[AUTO_CLOSE|AUTO_EJECT] */
+	default:
+		if (!CDROM_CAN(arg))
 			return -ENOSYS;
+	}
+	cdi->options |= (int) arg;
+	return cdi->options;
+}
+
+static int cdrom_ioctl_clear_options(struct cdrom_device_info *cdi,
+		unsigned long arg)
+{
+	cdinfo(CD_DO_IOCTL, "entering CDROM_CLEAR_OPTIONS\n");
+
+	cdi->options &= ~(int) arg;
+	return cdi->options;
+}
 
-		/* cannot select disc or select current disc */
-		if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT)
-			return media_changed(cdi, 1);
+static int cdrom_ioctl_select_speed(struct cdrom_device_info *cdi,
+		unsigned long arg)
+{
+	cdinfo(CD_DO_IOCTL, "entering CDROM_SELECT_SPEED\n");
+
+	if (!CDROM_CAN(CDC_SELECT_SPEED))
+		return -ENOSYS;
+	return cdi->ops->select_speed(cdi, arg);
+}
+
+static int cdrom_ioctl_select_disc(struct cdrom_device_info *cdi,
+		unsigned long arg)
+{
+	cdinfo(CD_DO_IOCTL, "entering CDROM_SELECT_DISC\n");
 
-		if ((unsigned int)arg >= cdi->capacity)
+	if (!CDROM_CAN(CDC_SELECT_DISC))
+		return -ENOSYS;
+
+	if (arg != CDSL_CURRENT && arg != CDSL_NONE) {
+		if ((int)arg >= cdi->capacity)
 			return -EINVAL;
+	}
 
-		info = kmalloc(sizeof(*info), GFP_KERNEL);
-		if (!info)
-			return -ENOMEM;
+	/*
+	 * ->select_disc is a hook to allow a driver-specific way of
+	 * seleting disc.  However, since there is no equivalent hook for
+	 * cdrom_slot_status this may not actually be useful...
+	 */
+	if (cdi->ops->select_disc)
+		return cdi->ops->select_disc(cdi, arg);
 
-		if ((ret = cdrom_read_mech_status(cdi, info))) {
-			kfree(info);
-			return ret;
-		}
+	cdinfo(CD_CHANGER, "Using generic cdrom_select_disc()\n");
+	return cdrom_select_disc(cdi, arg);
+}
 
-		changed = info->slots[arg].change;
-		kfree(info);
-		return changed;
-		}
+static int cdrom_ioctl_reset(struct cdrom_device_info *cdi,
+		struct block_device *bdev)
+{
+	cdinfo(CD_DO_IOCTL, "entering CDROM_RESET\n");
 
-	case CDROM_SET_OPTIONS: {
-		cdinfo(CD_DO_IOCTL, "entering CDROM_SET_OPTIONS\n"); 
-		/* options need to be in sync with capability. too late for
-		   that, so we have to check each one separately... */
-		switch (arg) {
-		case CDO_USE_FFLAGS:
-		case CDO_CHECK_TYPE:
-			break;
-		case CDO_LOCK:
-			if (!CDROM_CAN(CDC_LOCK))
-				return -ENOSYS;
-			break;
-		case 0:
-			return cdi->options;
-		/* default is basically CDO_[AUTO_CLOSE|AUTO_EJECT] */
-		default:
-			if (!CDROM_CAN(arg))
-				return -ENOSYS;
-		}
-		cdi->options |= (int) arg;
-		return cdi->options;
-		}
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+	if (!CDROM_CAN(CDC_RESET))
+		return -ENOSYS;
+	invalidate_bdev(bdev, 0);
+	return cdi->ops->reset(cdi);
+}
 
-	case CDROM_CLEAR_OPTIONS: {
-		cdinfo(CD_DO_IOCTL, "entering CDROM_CLEAR_OPTIONS\n"); 
-		cdi->options &= ~(int) arg;
-		return cdi->options;
-		}
+static int cdrom_ioctl_lock_door(struct cdrom_device_info *cdi,
+		unsigned long arg)
+{
+	cdinfo(CD_DO_IOCTL, "%socking door.\n", arg ? "L" : "Unl");
 
-	case CDROM_SELECT_SPEED: {
-		cdinfo(CD_DO_IOCTL, "entering CDROM_SELECT_SPEED\n"); 
-		if (!CDROM_CAN(CDC_SELECT_SPEED))
-			return -ENOSYS;
-		return cdo->select_speed(cdi, arg);
-		}
+	if (!CDROM_CAN(CDC_LOCK))
+		return -EDRIVE_CANT_DO_THIS;
 
-	case CDROM_SELECT_DISC: {
-		cdinfo(CD_DO_IOCTL, "entering CDROM_SELECT_DISC\n"); 
-		if (!CDROM_CAN(CDC_SELECT_DISC))
-			return -ENOSYS;
+	keeplocked = arg ? 1 : 0;
 
-                if ((arg != CDSL_CURRENT) && (arg != CDSL_NONE))
-			if ((int)arg >= cdi->capacity)
-				return -EINVAL;
-
-		/* cdo->select_disc is a hook to allow a driver-specific
-		 * way of seleting disc.  However, since there is no
-		 * equiv hook for cdrom_slot_status this may not 
-		 * actually be useful...
-		 */
-		if (cdo->select_disc != NULL)
-			return cdo->select_disc(cdi, arg);
-
-		/* no driver specific select_disc(), call our own */
-		cdinfo(CD_CHANGER, "Using generic cdrom_select_disc()\n"); 
-		return cdrom_select_disc(cdi, arg);
-		}
-
-	case CDROMRESET: {
-		if (!capable(CAP_SYS_ADMIN))
-			return -EACCES;
-		cdinfo(CD_DO_IOCTL, "entering CDROM_RESET\n");
-		if (!CDROM_CAN(CDC_RESET))
-			return -ENOSYS;
-		invalidate_bdev(ip->i_bdev, 0);
-		return cdo->reset(cdi);
-		}
+	/*
+	 * Don't unlock the door on multiple opens by default, but allow
+	 * root to do so.
+	 */
+	if (cdi->use_count != 1 && !arg && !capable(CAP_SYS_ADMIN))
+		return -EBUSY;
+	return cdi->ops->lock_door(cdi, arg);
+}
 
-	case CDROM_LOCKDOOR: {
-		cdinfo(CD_DO_IOCTL, "%socking door.\n", arg ? "L" : "Unl");
-		if (!CDROM_CAN(CDC_LOCK))
-			return -EDRIVE_CANT_DO_THIS;
-		keeplocked = arg ? 1 : 0;
-		/* don't unlock the door on multiple opens,but allow root
-		 * to do so */
-		if ((cdi->use_count != 1) && !arg && !capable(CAP_SYS_ADMIN))
-			return -EBUSY;
-		return cdo->lock_door(cdi, arg);
-		}
+static int cdrom_ioctl_debug(struct cdrom_device_info *cdi,
+		unsigned long arg)
+{
+	cdinfo(CD_DO_IOCTL, "%sabling debug.\n", arg ? "En" : "Dis");
 
-	case CDROM_DEBUG: {
-		if (!capable(CAP_SYS_ADMIN))
-			return -EACCES;
-		cdinfo(CD_DO_IOCTL, "%sabling debug.\n", arg ? "En" : "Dis");
-		debug = arg ? 1 : 0;
-		return debug;
-		}
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+	debug = arg ? 1 : 0;
+	return debug;
+}
 
-	case CDROM_GET_CAPABILITY: {
-		cdinfo(CD_DO_IOCTL, "entering CDROM_GET_CAPABILITY\n");
-		return (cdo->capability & ~cdi->mask);
-		}
+static int cdrom_ioctl_get_capability(struct cdrom_device_info *cdi)
+{
+	cdinfo(CD_DO_IOCTL, "entering CDROM_GET_CAPABILITY\n");
+	return (cdi->ops->capability & ~cdi->mask);
+}
 
-/* The following function is implemented, although very few audio
+/*
+ * The following function is implemented, although very few audio
  * discs give Universal Product Code information, which should just be
  * the Medium Catalog Number on the box.  Note, that the way the code
  * is written on the CD is /not/ uniform across all discs!
  */
-	case CDROM_GET_MCN: {
-		struct cdrom_mcn mcn;
-		cdinfo(CD_DO_IOCTL, "entering CDROM_GET_MCN\n"); 
-		if (!(cdo->capability & CDC_MCN))
-			return -ENOSYS;
-		if ((ret=cdo->get_mcn(cdi, &mcn)))
-			return ret;
-		IOCTL_OUT(arg, struct cdrom_mcn, mcn);
-		cdinfo(CD_DO_IOCTL, "CDROM_GET_MCN successful\n"); 
-		return 0;
-		}
+static int cdrom_ioctl_get_mcn(struct cdrom_device_info *cdi,
+		void __user *argp)
+{
+	struct cdrom_mcn mcn;
+	int ret;
 
-	case CDROM_DRIVE_STATUS: {
-		cdinfo(CD_DO_IOCTL, "entering CDROM_DRIVE_STATUS\n"); 
-		if (!(cdo->capability & CDC_DRIVE_STATUS))
-			return -ENOSYS;
-		if (!CDROM_CAN(CDC_SELECT_DISC))
-			return cdo->drive_status(cdi, CDSL_CURRENT);
-                if ((arg == CDSL_CURRENT) || (arg == CDSL_NONE)) 
-			return cdo->drive_status(cdi, CDSL_CURRENT);
-		if (((int)arg >= cdi->capacity))
-			return -EINVAL;
-		return cdrom_slot_status(cdi, arg);
-		}
+	cdinfo(CD_DO_IOCTL, "entering CDROM_GET_MCN\n");
 
-	/* Ok, this is where problems start.  The current interface for the
-	   CDROM_DISC_STATUS ioctl is flawed.  It makes the false assumption
-	   that CDs are all CDS_DATA_1 or all CDS_AUDIO, etc.  Unfortunatly,
-	   while this is often the case, it is also very common for CDs to
-	   have some tracks with data, and some tracks with audio.  Just 
-	   because I feel like it, I declare the following to be the best
-	   way to cope.  If the CD has ANY data tracks on it, it will be
-	   returned as a data CD.  If it has any XA tracks, I will return
-	   it as that.  Now I could simplify this interface by combining these 
-	   returns with the above, but this more clearly demonstrates
-	   the problem with the current interface.  Too bad this wasn't 
-	   designed to use bitmasks...         -Erik 
-
-	   Well, now we have the option CDS_MIXED: a mixed-type CD. 
-	   User level programmers might feel the ioctl is not very useful.
-	   					---david
-	*/
-	case CDROM_DISC_STATUS: {
-		tracktype tracks;
-		cdinfo(CD_DO_IOCTL, "entering CDROM_DISC_STATUS\n"); 
-		cdrom_count_tracks(cdi, &tracks);
-		if (tracks.error) 
-			return(tracks.error);
-
-		/* Policy mode on */
-		if (tracks.audio > 0) {
-			if (tracks.data==0 && tracks.cdi==0 && tracks.xa==0) 
-				return CDS_AUDIO;
-			else
-				return CDS_MIXED;
-		}
-		if (tracks.cdi > 0) return CDS_XA_2_2;
-		if (tracks.xa > 0) return CDS_XA_2_1;
-		if (tracks.data > 0) return CDS_DATA_1;
-		/* Policy mode off */
+	if (!(cdi->ops->capability & CDC_MCN))
+		return -ENOSYS;
+	ret = cdi->ops->get_mcn(cdi, &mcn);
+	if (ret)
+		return ret;
 
-		cdinfo(CD_WARNING,"This disc doesn't have any tracks I recognize!\n");
-		return CDS_NO_INFO;
-		}
+	if (copy_to_user(argp, &mcn, sizeof(mcn)))
+		return -EFAULT;
+	cdinfo(CD_DO_IOCTL, "CDROM_GET_MCN successful\n");
+	return 0;
+}
 
-	case CDROM_CHANGER_NSLOTS: {
-		cdinfo(CD_DO_IOCTL, "entering CDROM_CHANGER_NSLOTS\n"); 
-		return cdi->capacity;
-		}
+static int cdrom_ioctl_drive_status(struct cdrom_device_info *cdi,
+		unsigned long arg)
+{
+	cdinfo(CD_DO_IOCTL, "entering CDROM_DRIVE_STATUS\n");
+
+	if (!(cdi->ops->capability & CDC_DRIVE_STATUS))
+		return -ENOSYS;
+	if (!CDROM_CAN(CDC_SELECT_DISC) ||
+	    (arg == CDSL_CURRENT || arg == CDSL_NONE))
+		return cdi->ops->drive_status(cdi, CDSL_CURRENT);
+	if (((int)arg >= cdi->capacity))
+		return -EINVAL;
+	return cdrom_slot_status(cdi, arg);
+}
+
+/*
+ * Ok, this is where problems start.  The current interface for the
+ * CDROM_DISC_STATUS ioctl is flawed.  It makes the false assumption that
+ * CDs are all CDS_DATA_1 or all CDS_AUDIO, etc.  Unfortunatly, while this
+ * is often the case, it is also very common for CDs to have some tracks
+ * with data, and some tracks with audio.  Just because I feel like it,
+ * I declare the following to be the best way to cope.  If the CD has ANY
+ * data tracks on it, it will be returned as a data CD.  If it has any XA
+ * tracks, I will return it as that.  Now I could simplify this interface
+ * by combining these  returns with the above, but this more clearly
+ * demonstrates the problem with the current interface.  Too bad this
+ * wasn't designed to use bitmasks...         -Erik
+ *
+ * Well, now we have the option CDS_MIXED: a mixed-type CD.
+ * User level programmers might feel the ioctl is not very useful.
+ *					---david
+ */
+static int cdrom_ioctl_disc_status(struct cdrom_device_info *cdi)
+{
+	tracktype tracks;
+
+	cdinfo(CD_DO_IOCTL, "entering CDROM_DISC_STATUS\n");
+
+	cdrom_count_tracks(cdi, &tracks);
+	if (tracks.error)
+		return tracks.error;
+
+	/* Policy mode on */
+	if (tracks.audio > 0) {
+		if (!tracks.data && !tracks.cdi && !tracks.xa)
+			return CDS_AUDIO;
+		else
+			return CDS_MIXED;
 	}
 
-	/* use the ioctls that are implemented through the generic_packet()
-	   interface. this may look at bit funny, but if -ENOTTY is
-	   returned that particular ioctl is not implemented and we
-	   let it go through the device specific ones. */
+	if (tracks.cdi > 0)
+		return CDS_XA_2_2;
+	if (tracks.xa > 0)
+		return CDS_XA_2_1;
+	if (tracks.data > 0)
+		return CDS_DATA_1;
+	/* Policy mode off */
+
+	cdinfo(CD_WARNING,"This disc doesn't have any tracks I recognize!\n");
+	return CDS_NO_INFO;
+}
+
+static int cdrom_ioctl_changer_nslots(struct cdrom_device_info *cdi)
+{
+	cdinfo(CD_DO_IOCTL, "entering CDROM_CHANGER_NSLOTS\n");
+	return cdi->capacity;
+}
+
+static int cdrom_ioctl_get_subchnl(struct cdrom_device_info *cdi,
+		void __user *argp)
+{
+	struct cdrom_subchnl q;
+	u8 requested, back;
+	int ret;
+
+	/* cdinfo(CD_DO_IOCTL,"entering CDROMSUBCHNL\n");*/
+
+	if (!CDROM_CAN(CDC_PLAY_AUDIO))
+		return -ENOSYS;
+	if (copy_from_user(&q, argp, sizeof(q)))
+		return -EFAULT;
+
+	requested = q.cdsc_format;
+	if (requested != CDROM_MSF && requested != CDROM_LBA)
+		return -EINVAL;
+	q.cdsc_format = CDROM_MSF;
+
+	ret = cdi->ops->audio_ioctl(cdi, CDROMSUBCHNL, &q);
+	if (ret)
+		return ret;
+
+	back = q.cdsc_format; /* local copy */
+	sanitize_format(&q.cdsc_absaddr, &back, requested);
+	sanitize_format(&q.cdsc_reladdr, &q.cdsc_format, requested);
+
+	if (copy_to_user(argp, &q, sizeof(q)))
+		return -EFAULT;
+	/* cdinfo(CD_DO_IOCTL, "CDROMSUBCHNL successful\n"); */
+	return 0;
+}
+
+static int cdrom_ioctl_read_tochdr(struct cdrom_device_info *cdi,
+		void __user *argp)
+{
+	struct cdrom_tochdr header;
+	int ret;
+
+	/* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCHDR\n"); */
+
+	if (!CDROM_CAN(CDC_PLAY_AUDIO))
+		return -ENOSYS;
+	if (copy_from_user(&header, argp, sizeof(header)))
+		return -EFAULT;
+
+	ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCHDR, &header);
+	if (ret)
+		return ret;
+
+	if (copy_to_user(argp, &header, sizeof(header)))
+		return -EFAULT;
+	/* cdinfo(CD_DO_IOCTL, "CDROMREADTOCHDR successful\n"); */
+	return 0;
+}
+
+static int cdrom_ioctl_read_tocentry(struct cdrom_device_info *cdi,
+		void __user *argp)
+{
+	struct cdrom_tocentry entry;
+	u8 requested_format;
+	int ret;
+
+	/* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCENTRY\n"); */
+
+	if (!CDROM_CAN(CDC_PLAY_AUDIO))
+		return -ENOSYS;
+	if (copy_from_user(&entry, argp, sizeof(entry)))
+		return -EFAULT;
+
+	requested_format = entry.cdte_format;
+	if (requested_format != CDROM_MSF || requested_format != CDROM_LBA)
+		return -EINVAL;
+	/* make interface to low-level uniform */
+	entry.cdte_format = CDROM_MSF;
+	ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &entry);
+	if (ret)
+		return ret;
+	sanitize_format(&entry.cdte_addr, &entry.cdte_format, requested_format);
+
+	if (copy_to_user(argp, &entry, sizeof(entry)))
+		return -EFAULT;
+	/* cdinfo(CD_DO_IOCTL, "CDROMREADTOCENTRY successful\n"); */
+	return 0;
+}
+
+static int cdrom_ioctl_play_msf(struct cdrom_device_info *cdi,
+		void __user *argp)
+{
+	struct cdrom_msf msf;
+
+	cdinfo(CD_DO_IOCTL, "entering CDROMPLAYMSF\n");
+
+	if (!CDROM_CAN(CDC_PLAY_AUDIO))
+		return -ENOSYS;
+	if (copy_from_user(&msf, argp, sizeof(msf)))
+		return -EFAULT;
+	return cdi->ops->audio_ioctl(cdi, CDROMPLAYMSF, &msf);
+}
+
+static int cdrom_ioctl_play_trkind(struct cdrom_device_info *cdi,
+		void __user *argp)
+{
+	struct cdrom_ti ti;
+	int ret;
+
+	cdinfo(CD_DO_IOCTL, "entering CDROMPLAYTRKIND\n");
+
+	if (!CDROM_CAN(CDC_PLAY_AUDIO))
+		return -ENOSYS;
+	if (copy_from_user(&ti, argp, sizeof(ti)))
+		return -EFAULT;
+
+	ret = check_for_audio_disc(cdi, cdi->ops);
+	if (ret)
+		return ret;
+	return cdi->ops->audio_ioctl(cdi, CDROMPLAYTRKIND, &ti);
+}
+static int cdrom_ioctl_volctrl(struct cdrom_device_info *cdi,
+		void __user *argp)
+{
+	struct cdrom_volctrl volume;
+
+	cdinfo(CD_DO_IOCTL, "entering CDROMVOLCTRL\n");
+
+	if (!CDROM_CAN(CDC_PLAY_AUDIO))
+		return -ENOSYS;
+	if (copy_from_user(&volume, argp, sizeof(volume)))
+		return -EFAULT;
+	return cdi->ops->audio_ioctl(cdi, CDROMVOLCTRL, &volume);
+}
+
+static int cdrom_ioctl_volread(struct cdrom_device_info *cdi,
+		void __user *argp)
+{
+	struct cdrom_volctrl volume;
+	int ret;
+
+	cdinfo(CD_DO_IOCTL, "entering CDROMVOLREAD\n");
+
+	if (!CDROM_CAN(CDC_PLAY_AUDIO))
+		return -ENOSYS;
+
+	ret = cdi->ops->audio_ioctl(cdi, CDROMVOLREAD, &volume);
+	if (ret)
+		return ret;
+
+	if (copy_to_user(argp, &volume, sizeof(volume)))
+		return -EFAULT;
+	return 0;
+}
+
+static int cdrom_ioctl_audioctl(struct cdrom_device_info *cdi,
+		unsigned int cmd)
+{
+	int ret;
+
+	cdinfo(CD_DO_IOCTL, "doing audio ioctl (start/stop/pause/resume)\n");
+
+	if (!CDROM_CAN(CDC_PLAY_AUDIO))
+		return -ENOSYS;
+	ret = check_for_audio_disc(cdi, cdi->ops);
+	if (ret)
+		return ret;
+	return cdi->ops->audio_ioctl(cdi, cmd, NULL);
+}
+
+/*
+ * Just about every imaginable ioctl is supported in the Uniform layer
+ * these days.
+ * ATAPI / SCSI specific code now mainly resides in mmc_ioctl().
+ */
+int cdrom_ioctl(struct file * file, struct cdrom_device_info *cdi,
+		struct inode *ip, unsigned int cmd, unsigned long arg)
+{
+	void __user *argp = (void __user *)arg;
+	int ret;
+
+	/*
+	 * Try the generic SCSI command ioctl's first.
+	 */
+	ret = scsi_cmd_ioctl(file, ip->i_bdev->bd_disk, cmd, argp);
+	if (ret != -ENOTTY)
+		return ret;
+
+	switch (cmd) {
+	case CDROMMULTISESSION:
+		return cdrom_ioctl_multisession(cdi, argp);
+	case CDROMEJECT:
+		return cdrom_ioctl_eject(cdi);
+	case CDROMCLOSETRAY:
+		return cdrom_ioctl_closetray(cdi);
+	case CDROMEJECT_SW:
+		return cdrom_ioctl_eject_sw(cdi, arg);
+	case CDROM_MEDIA_CHANGED:
+		return cdrom_ioctl_media_changed(cdi, arg);
+	case CDROM_SET_OPTIONS:
+		return cdrom_ioctl_set_options(cdi, arg);
+	case CDROM_CLEAR_OPTIONS:
+		return cdrom_ioctl_clear_options(cdi, arg);
+	case CDROM_SELECT_SPEED:
+		return cdrom_ioctl_select_speed(cdi, arg);
+	case CDROM_SELECT_DISC:
+		return cdrom_ioctl_select_disc(cdi, arg);
+	case CDROMRESET:
+		return cdrom_ioctl_reset(cdi, ip->i_bdev);
+	case CDROM_LOCKDOOR:
+		return cdrom_ioctl_lock_door(cdi, arg);
+	case CDROM_DEBUG:
+		return cdrom_ioctl_debug(cdi, arg);
+	case CDROM_GET_CAPABILITY:
+		return cdrom_ioctl_get_capability(cdi);
+	case CDROM_GET_MCN:
+		return cdrom_ioctl_get_mcn(cdi, argp);
+	case CDROM_DRIVE_STATUS:
+		return cdrom_ioctl_drive_status(cdi, arg);
+	case CDROM_DISC_STATUS:
+		return cdrom_ioctl_disc_status(cdi);
+	case CDROM_CHANGER_NSLOTS:
+		return cdrom_ioctl_changer_nslots(cdi);
+	}
+
+	/*
+	 * Use the ioctls that are implemented through the generic_packet()
+	 * interface. this may look at bit funny, but if -ENOTTY is
+	 * returned that particular ioctl is not implemented and we
+	 * let it go through the device specific ones.
+	 */
 	if (CDROM_CAN(CDC_GENERIC_PACKET)) {
 		ret = mmc_ioctl(cdi, cmd, arg);
-		if (ret != -ENOTTY) {
+		if (ret != -ENOTTY)
 			return ret;
-		}
 	}
 
-	/* note: most of the cdinfo() calls are commented out here,
-	   because they fill up the sys log when CD players poll
-	   the drive. */
+	/*
+	 * Note: most of the cdinfo() calls are commented out here,
+	 * because they fill up the sys log when CD players poll
+	 * the drive.
+	 */
 	switch (cmd) {
-	case CDROMSUBCHNL: {
-		struct cdrom_subchnl q;
-		u_char requested, back;
-		if (!CDROM_CAN(CDC_PLAY_AUDIO))
-			return -ENOSYS;
-		/* cdinfo(CD_DO_IOCTL,"entering CDROMSUBCHNL\n");*/ 
-		IOCTL_IN(arg, struct cdrom_subchnl, q);
-		requested = q.cdsc_format;
-		if (!((requested == CDROM_MSF) ||
-		      (requested == CDROM_LBA)))
-			return -EINVAL;
-		q.cdsc_format = CDROM_MSF;
-		if ((ret=cdo->audio_ioctl(cdi, cmd, &q)))
-			return ret;
-		back = q.cdsc_format; /* local copy */
-		sanitize_format(&q.cdsc_absaddr, &back, requested);
-		sanitize_format(&q.cdsc_reladdr, &q.cdsc_format, requested);
-		IOCTL_OUT(arg, struct cdrom_subchnl, q);
-		/* cdinfo(CD_DO_IOCTL, "CDROMSUBCHNL successful\n"); */ 
-		return 0;
-		}
-	case CDROMREADTOCHDR: {
-		struct cdrom_tochdr header;
-		if (!CDROM_CAN(CDC_PLAY_AUDIO))
-			return -ENOSYS;
-		/* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCHDR\n"); */ 
-		IOCTL_IN(arg, struct cdrom_tochdr, header);
-		if ((ret=cdo->audio_ioctl(cdi, cmd, &header)))
-			return ret;
-		IOCTL_OUT(arg, struct cdrom_tochdr, header);
-		/* cdinfo(CD_DO_IOCTL, "CDROMREADTOCHDR successful\n"); */ 
-		return 0;
-		}
-	case CDROMREADTOCENTRY: {
-		struct cdrom_tocentry entry;
-		u_char requested_format;
-		if (!CDROM_CAN(CDC_PLAY_AUDIO))
-			return -ENOSYS;
-		/* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCENTRY\n"); */ 
-		IOCTL_IN(arg, struct cdrom_tocentry, entry);
-		requested_format = entry.cdte_format;
-		if (!((requested_format == CDROM_MSF) || 
-			(requested_format == CDROM_LBA)))
-				return -EINVAL;
-		/* make interface to low-level uniform */
-		entry.cdte_format = CDROM_MSF;
-		if ((ret=cdo->audio_ioctl(cdi, cmd, &entry)))
-			return ret;
-		sanitize_format(&entry.cdte_addr,
-		&entry.cdte_format, requested_format);
-		IOCTL_OUT(arg, struct cdrom_tocentry, entry);
-		/* cdinfo(CD_DO_IOCTL, "CDROMREADTOCENTRY successful\n"); */ 
-		return 0;
-		}
-	case CDROMPLAYMSF: {
-		struct cdrom_msf msf;
-		if (!CDROM_CAN(CDC_PLAY_AUDIO))
-			return -ENOSYS;
-		cdinfo(CD_DO_IOCTL, "entering CDROMPLAYMSF\n"); 
-		IOCTL_IN(arg, struct cdrom_msf, msf);
-		return cdo->audio_ioctl(cdi, cmd, &msf);
-		}
-	case CDROMPLAYTRKIND: {
-		struct cdrom_ti ti;
-		if (!CDROM_CAN(CDC_PLAY_AUDIO))
-			return -ENOSYS;
-		cdinfo(CD_DO_IOCTL, "entering CDROMPLAYTRKIND\n"); 
-		IOCTL_IN(arg, struct cdrom_ti, ti);
-		CHECKAUDIO;
-		return cdo->audio_ioctl(cdi, cmd, &ti);
-		}
-	case CDROMVOLCTRL: {
-		struct cdrom_volctrl volume;
-		if (!CDROM_CAN(CDC_PLAY_AUDIO))
-			return -ENOSYS;
-		cdinfo(CD_DO_IOCTL, "entering CDROMVOLCTRL\n"); 
-		IOCTL_IN(arg, struct cdrom_volctrl, volume);
-		return cdo->audio_ioctl(cdi, cmd, &volume);
-		}
-	case CDROMVOLREAD: {
-		struct cdrom_volctrl volume;
-		if (!CDROM_CAN(CDC_PLAY_AUDIO))
-			return -ENOSYS;
-		cdinfo(CD_DO_IOCTL, "entering CDROMVOLREAD\n"); 
-		if ((ret=cdo->audio_ioctl(cdi, cmd, &volume)))
-			return ret;
-		IOCTL_OUT(arg, struct cdrom_volctrl, volume);
-		return 0;
-		}
+	case CDROMSUBCHNL:
+		return cdrom_ioctl_get_subchnl(cdi, argp);
+	case CDROMREADTOCHDR:
+		return cdrom_ioctl_read_tochdr(cdi, argp);
+	case CDROMREADTOCENTRY:
+		return cdrom_ioctl_read_tocentry(cdi, argp);
+	case CDROMPLAYMSF:
+		return cdrom_ioctl_play_msf(cdi, argp);
+	case CDROMPLAYTRKIND:
+		return cdrom_ioctl_play_trkind(cdi, argp);
+	case CDROMVOLCTRL:
+		return cdrom_ioctl_volctrl(cdi, argp);
+	case CDROMVOLREAD:
+		return cdrom_ioctl_volread(cdi, argp);
 	case CDROMSTART:
 	case CDROMSTOP:
 	case CDROMPAUSE:
-	case CDROMRESUME: {
-		if (!CDROM_CAN(CDC_PLAY_AUDIO))
-			return -ENOSYS;
-		cdinfo(CD_DO_IOCTL, "doing audio ioctl (start/stop/pause/resume)\n"); 
-		CHECKAUDIO;
-		return cdo->audio_ioctl(cdi, cmd, NULL);
-		}
-	} /* switch */
+	case CDROMRESUME:
+		return cdrom_ioctl_audioctl(cdi, cmd);
+	}
 
-	/* do the device specific ioctls */
-	if (CDROM_CAN(CDC_IOCTLS))
-		return cdo->dev_ioctl(cdi, cmd, arg);
-	
 	return -ENOSYS;
 }
 
diff -urN oldtree/drivers/cdrom/cdu31a.c newtree/drivers/cdrom/cdu31a.c
--- oldtree/drivers/cdrom/cdu31a.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/cdrom/cdu31a.c	2006-01-29 11:18:20.573161200 +0000
@@ -2668,7 +2668,7 @@
 	return retval;
 }
 
-static int scd_dev_ioctl(struct cdrom_device_info *cdi,
+static int scd_read_audio(struct cdrom_device_info *cdi,
 			 unsigned int cmd, unsigned long arg)
 {
 	void __user *argp = (void __user *)arg;
@@ -2894,11 +2894,10 @@
 	.get_mcn		= scd_get_mcn,
 	.reset			= scd_reset,
 	.audio_ioctl		= scd_audio_ioctl,
-	.dev_ioctl		= scd_dev_ioctl,
 	.capability		= CDC_OPEN_TRAY | CDC_CLOSE_TRAY | CDC_LOCK |
 				  CDC_SELECT_SPEED | CDC_MULTI_SESSION |
 				  CDC_MCN | CDC_MEDIA_CHANGED | CDC_PLAY_AUDIO |
-				  CDC_RESET | CDC_IOCTLS | CDC_DRIVE_STATUS,
+				  CDC_RESET | CDC_DRIVE_STATUS,
 	.n_minors		= 1,
 };
 
@@ -2936,6 +2935,9 @@
 		case CDROMCLOSETRAY:
 			retval = scd_tray_move(&scd_info, 0);
 			break;
+		case CDROMREADAUDIO:
+			retval = scd_read_audio(&scd_info, CDROMREADAUDIO, arg);
+			break;
 		default:
 			retval = cdrom_ioctl(file, &scd_info, inode, cmd, arg);
 	}
diff -urN oldtree/drivers/cdrom/cm206.c newtree/drivers/cdrom/cm206.c
--- oldtree/drivers/cdrom/cm206.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/cdrom/cm206.c	2006-01-29 11:18:20.599157248 +0000
@@ -1157,32 +1157,6 @@
 	}
 }
 
-/* Ioctl. These ioctls are specific to the cm206 driver. I have made
-   some driver statistics accessible through ioctl calls.
- */
-
-static int cm206_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
-		       unsigned long arg)
-{
-	switch (cmd) {
-#ifdef STATISTICS
-	case CM206CTL_GET_STAT:
-		if (arg >= NR_STATS)
-			return -EINVAL;
-		else
-			return cd->stats[arg];
-	case CM206CTL_GET_LAST_STAT:
-		if (arg >= NR_STATS)
-			return -EINVAL;
-		else
-			return cd->last_stat[arg];
-#endif
-	default:
-		debug(("Unknown ioctl call 0x%x\n", cmd));
-		return -EINVAL;
-	}
-}
-
 static int cm206_media_changed(struct cdrom_device_info *cdi, int disc_nr)
 {
 	if (cd != NULL) {
@@ -1321,11 +1295,10 @@
 	.get_mcn		= cm206_get_upc,
 	.reset			= cm206_reset,
 	.audio_ioctl		= cm206_audio_ioctl,
-	.dev_ioctl		= cm206_ioctl,
 	.capability		= CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK |
 				  CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
 				  CDC_MCN | CDC_PLAY_AUDIO | CDC_SELECT_SPEED |
-				  CDC_IOCTLS | CDC_DRIVE_STATUS,
+				  CDC_DRIVE_STATUS,
 	.n_minors		= 1,
 };
 
@@ -1350,6 +1323,21 @@
 static int cm206_block_ioctl(struct inode *inode, struct file *file,
 				unsigned cmd, unsigned long arg)
 {
+	switch (cmd) {
+#ifdef STATISTICS
+	case CM206CTL_GET_STAT:
+		if (arg >= NR_STATS)
+			return -EINVAL;
+		return cd->stats[arg];
+	case CM206CTL_GET_LAST_STAT:
+		if (arg >= NR_STATS)
+			return -EINVAL;
+		return cd->last_stat[arg];
+#endif
+	default:
+		break;
+	}
+
 	return cdrom_ioctl(file, &cm206_info, inode, cmd, arg);
 }
 
diff -urN oldtree/drivers/cdrom/sbpcd.c newtree/drivers/cdrom/sbpcd.c
--- oldtree/drivers/cdrom/sbpcd.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/cdrom/sbpcd.c	2006-01-29 11:18:20.604156488 +0000
@@ -4160,18 +4160,13 @@
 	return  0;
 }
 
-/*==========================================================================*/
-/*==========================================================================*/
-/*
- * ioctl support
- */
-static int sbpcd_dev_ioctl(struct cdrom_device_info *cdi, u_int cmd,
-		      u_long arg)
+static int sbpcd_audio_ioctl(struct cdrom_device_info *cdi, u_int cmd,
+		       void * arg)
 {
 	struct sbpcd_drive *p = cdi->handle;
-	int i;
+	int i, st, j;
 	
-	msg(DBG_IO2,"ioctl(%s, 0x%08lX, 0x%08lX)\n", cdi->name, cmd, arg);
+	msg(DBG_IO2,"ioctl(%s, 0x%08lX, 0x%08p)\n", cdi->name, cmd, arg);
 	if (p->drv_id==-1) {
 		msg(DBG_INF, "ioctl: bad device: %s\n", cdi->name);
 		return (-ENXIO);             /* no such drive */
@@ -4183,1194 +4178,1192 @@
 	msg(DBG_IO2,"ioctl: device %s, request %04X\n",cdi->name,cmd);
 	switch (cmd) 		/* Sun-compatible */
 	{
-	case DDIOCSDBG:		/* DDI Debug */
-		if (!capable(CAP_SYS_ADMIN)) RETURN_UP(-EPERM);
-		i=sbpcd_dbg_ioctl(arg,1);
-		RETURN_UP(i);
-	case CDROMRESET:      /* hard reset the drive */
-		msg(DBG_IOC,"ioctl: CDROMRESET entered.\n");
-		i=DriveReset();
-		current_drive->audio_state=0;
-		RETURN_UP(i);
 		
-	case CDROMREADMODE1:
-		msg(DBG_IOC,"ioctl: CDROMREADMODE1 requested.\n");
+	case CDROMPAUSE:     /* Pause the drive */
+		msg(DBG_IOC,"ioctl: CDROMPAUSE entered.\n");
+		/* pause the drive unit when it is currently in PLAY mode,         */
+		/* or reset the starting and ending locations when in PAUSED mode. */
+		/* If applicable, at the next stopping point it reaches            */
+		/* the drive will discontinue playing.                             */
+		switch (current_drive->audio_state)
+		{
+		case audio_playing:
+			if (famL_drive) i=cc_ReadSubQ();
+			else i=cc_Pause_Resume(1);
+			if (i<0) RETURN_UP(-EIO);
+			if (famL_drive) i=cc_Pause_Resume(1);
+			else i=cc_ReadSubQ();
+			if (i<0) RETURN_UP(-EIO);
+			current_drive->pos_audio_start=current_drive->SubQ_run_tot;
+			current_drive->audio_state=audio_pausing;
+			RETURN_UP(0);
+		case audio_pausing:
+			i=cc_Seek(current_drive->pos_audio_start,1);
+			if (i<0) RETURN_UP(-EIO);
+			RETURN_UP(0);
+		default:
+			RETURN_UP(-EINVAL);
+		}
+
+	case CDROMRESUME: /* resume paused audio play */
+		msg(DBG_IOC,"ioctl: CDROMRESUME entered.\n");
+		/* resume playing audio tracks when a previous PLAY AUDIO call has  */
+		/* been paused with a PAUSE command.                                */
+		/* It will resume playing from the location saved in SubQ_run_tot.  */
+		if (current_drive->audio_state!=audio_pausing) RETURN_UP(-EINVAL);
+		if (famL_drive)
+			i=cc_PlayAudio(current_drive->pos_audio_start,
+				       current_drive->pos_audio_end);
+		else i=cc_Pause_Resume(3);
+		if (i<0) RETURN_UP(-EIO);
+		current_drive->audio_state=audio_playing;
+		RETURN_UP(0);
+
+	case CDROMPLAYMSF:
+		msg(DBG_IOC,"ioctl: CDROMPLAYMSF entered.\n");
 #ifdef SAFE_MIXED
 		if (current_drive->has_data>1) RETURN_UP(-EBUSY);
 #endif /* SAFE_MIXED */
-		cc_ModeSelect(CD_FRAMESIZE);
-		cc_ModeSense();
-		current_drive->mode=READ_M1;
+		if (current_drive->audio_state==audio_playing)
+		{
+			i=cc_Pause_Resume(1);
+			if (i<0) RETURN_UP(-EIO);
+			i=cc_ReadSubQ();
+			if (i<0) RETURN_UP(-EIO);
+			current_drive->pos_audio_start=current_drive->SubQ_run_tot;
+			i=cc_Seek(current_drive->pos_audio_start,1);
+		}
+		memcpy(&msf, (void *) arg, sizeof(struct cdrom_msf));
+		/* values come as msf-bin */
+		current_drive->pos_audio_start = (msf.cdmsf_min0<<16) |
+                        (msf.cdmsf_sec0<<8) |
+				msf.cdmsf_frame0;
+		current_drive->pos_audio_end = (msf.cdmsf_min1<<16) |
+			(msf.cdmsf_sec1<<8) |
+				msf.cdmsf_frame1;
+		msg(DBG_IOX,"ioctl: CDROMPLAYMSF %08X %08X\n",
+		    current_drive->pos_audio_start,current_drive->pos_audio_end);
+		i=cc_PlayAudio(current_drive->pos_audio_start,current_drive->pos_audio_end);
+		if (i<0)
+		{
+			msg(DBG_INF,"ioctl: cc_PlayAudio returns %d\n",i);
+			DriveReset();
+			current_drive->audio_state=0;
+			RETURN_UP(-EIO);
+		}
+		current_drive->audio_state=audio_playing;
 		RETURN_UP(0);
 		
-	case CDROMREADMODE2: /* not usable at the moment */
-		msg(DBG_IOC,"ioctl: CDROMREADMODE2 requested.\n");
+	case CDROMPLAYTRKIND: /* Play a track.  This currently ignores index. */
+		msg(DBG_IOC,"ioctl: CDROMPLAYTRKIND entered.\n");
 #ifdef SAFE_MIXED
 		if (current_drive->has_data>1) RETURN_UP(-EBUSY);
 #endif /* SAFE_MIXED */
-		cc_ModeSelect(CD_FRAMESIZE_RAW1);
-		cc_ModeSense();
-		current_drive->mode=READ_M2;
+		if (current_drive->audio_state==audio_playing)
+		{
+			msg(DBG_IOX,"CDROMPLAYTRKIND: already audio_playing.\n");
+#if 1
+			RETURN_UP(0); /* just let us play on */
+#else
+			RETURN_UP(-EINVAL); /* play on, but say "error" */
+#endif
+		}
+		memcpy(&ti,(void *) arg,sizeof(struct cdrom_ti));
+		msg(DBG_IOX,"ioctl: trk0: %d, ind0: %d, trk1:%d, ind1:%d\n",
+		    ti.cdti_trk0,ti.cdti_ind0,ti.cdti_trk1,ti.cdti_ind1);
+		if (ti.cdti_trk0<current_drive->n_first_track) RETURN_UP(-EINVAL);
+		if (ti.cdti_trk0>current_drive->n_last_track) RETURN_UP(-EINVAL);
+		if (ti.cdti_trk1<ti.cdti_trk0) ti.cdti_trk1=ti.cdti_trk0;
+		if (ti.cdti_trk1>current_drive->n_last_track) ti.cdti_trk1=current_drive->n_last_track;
+		current_drive->pos_audio_start=current_drive->TocBuffer[ti.cdti_trk0].address;
+		current_drive->pos_audio_end=current_drive->TocBuffer[ti.cdti_trk1+1].address;
+		i=cc_PlayAudio(current_drive->pos_audio_start,current_drive->pos_audio_end);
+		if (i<0)
+		{
+			msg(DBG_INF,"ioctl: cc_PlayAudio returns %d\n",i);
+			DriveReset();
+			current_drive->audio_state=0;
+			RETURN_UP(-EIO);
+		}
+		current_drive->audio_state=audio_playing;
 		RETURN_UP(0);
 		
-	case CDROMAUDIOBUFSIZ: /* configure the audio buffer size */
-		msg(DBG_IOC,"ioctl: CDROMAUDIOBUFSIZ entered.\n");
-		if (current_drive->sbp_audsiz>0)
-			vfree(current_drive->aud_buf);
-		current_drive->aud_buf=NULL;
-		current_drive->sbp_audsiz=arg;
+	case CDROMREADTOCHDR:        /* Read the table of contents header */
+		msg(DBG_IOC,"ioctl: CDROMREADTOCHDR entered.\n");
+		tochdr.cdth_trk0=current_drive->n_first_track;
+		tochdr.cdth_trk1=current_drive->n_last_track;
+		memcpy((void *) arg, &tochdr, sizeof(struct cdrom_tochdr));
+		RETURN_UP(0);
 		
-		if (current_drive->sbp_audsiz>16)
-		{
-			current_drive->sbp_audsiz = 0;
-			RETURN_UP(current_drive->sbp_audsiz);
-		}
-	
-		if (current_drive->sbp_audsiz>0)
+	case CDROMREADTOCENTRY:      /* Read an entry in the table of contents */
+		msg(DBG_IOC,"ioctl: CDROMREADTOCENTRY entered.\n");
+		memcpy(&tocentry, (void *) arg, sizeof(struct cdrom_tocentry));
+		i=tocentry.cdte_track;
+		if (i==CDROM_LEADOUT) i=current_drive->n_last_track+1;
+		else if (i<current_drive->n_first_track||i>current_drive->n_last_track)
+                  RETURN_UP(-EINVAL);
+		tocentry.cdte_adr=current_drive->TocBuffer[i].ctl_adr&0x0F;
+		tocentry.cdte_ctrl=(current_drive->TocBuffer[i].ctl_adr>>4)&0x0F;
+		tocentry.cdte_datamode=current_drive->TocBuffer[i].format;
+		if (tocentry.cdte_format==CDROM_MSF) /* MSF-bin required */
 		{
-			current_drive->aud_buf=(u_char *) vmalloc(current_drive->sbp_audsiz*CD_FRAMESIZE_RAW);
-			if (current_drive->aud_buf==NULL)
-			{
-				msg(DBG_INF,"audio buffer (%d frames) not available.\n",current_drive->sbp_audsiz);
-				current_drive->sbp_audsiz=0;
-			}
-			else msg(DBG_INF,"audio buffer size: %d frames.\n",current_drive->sbp_audsiz);
+			tocentry.cdte_addr.msf.minute=(current_drive->TocBuffer[i].address>>16)&0x00FF;
+			tocentry.cdte_addr.msf.second=(current_drive->TocBuffer[i].address>>8)&0x00FF;
+			tocentry.cdte_addr.msf.frame=current_drive->TocBuffer[i].address&0x00FF;
 		}
-		RETURN_UP(current_drive->sbp_audsiz);
-
-	case CDROMREADAUDIO:
-	{ /* start of CDROMREADAUDIO */
-		int i=0, j=0, frame, block=0;
-		u_int try=0;
-		u_long timeout;
-		u_char *p;
-		u_int data_tries = 0;
-		u_int data_waits = 0;
-		u_int data_retrying = 0;
-		int status_tries;
-		int error_flag;
+		else if (tocentry.cdte_format==CDROM_LBA) /* blk required */
+			tocentry.cdte_addr.lba=msf2blk(current_drive->TocBuffer[i].address);
+		else RETURN_UP(-EINVAL);
+		memcpy((void *) arg, &tocentry, sizeof(struct cdrom_tocentry));
+		RETURN_UP(0);
 		
-		msg(DBG_IOC,"ioctl: CDROMREADAUDIO entered.\n");
-		if (fam0_drive) RETURN_UP(-EINVAL);
-		if (famL_drive) RETURN_UP(-EINVAL);
-		if (famV_drive) RETURN_UP(-EINVAL);
-		if (famT_drive) RETURN_UP(-EINVAL);
+	case CDROMSTOP:      /* Spin down the drive */
+		msg(DBG_IOC,"ioctl: CDROMSTOP entered.\n");
 #ifdef SAFE_MIXED
 		if (current_drive->has_data>1) RETURN_UP(-EBUSY);
 #endif /* SAFE_MIXED */ 
-		if (current_drive->aud_buf==NULL) RETURN_UP(-EINVAL);
-		if (copy_from_user(&read_audio, (void __user *)arg,
-				   sizeof(struct cdrom_read_audio)))
-			RETURN_UP(-EFAULT);
-		if (read_audio.nframes < 0 || read_audio.nframes>current_drive->sbp_audsiz) RETURN_UP(-EINVAL);
-		if (!access_ok(VERIFY_WRITE, read_audio.buf,
-			      read_audio.nframes*CD_FRAMESIZE_RAW))
-                	RETURN_UP(-EFAULT);
-		
-		if (read_audio.addr_format==CDROM_MSF) /* MSF-bin specification of where to start */
-			block=msf2lba(&read_audio.addr.msf.minute);
-		else if (read_audio.addr_format==CDROM_LBA) /* lba specification of where to start */
-			block=read_audio.addr.lba;
-		else RETURN_UP(-EINVAL);
-#if 000
-		i=cc_SetSpeed(speed_150,0,0);
-		if (i) msg(DBG_AUD,"read_audio: SetSpeed error %d\n", i);
+		i=cc_Pause_Resume(1);
+		current_drive->audio_state=0;
+#if 0
+		cc_DriveReset();
 #endif
-		msg(DBG_AUD,"read_audio: lba: %d, msf: %06X\n",
-		    block, blk2msf(block));
-		msg(DBG_AUD,"read_audio: before cc_ReadStatus.\n");
-#if OLD_BUSY
-		while (busy_data) sbp_sleep(HZ/10); /* wait a bit */
-		busy_audio=1;
-#endif /* OLD_BUSY */ 
-		error_flag=0;
-		for (data_tries=5; data_tries>0; data_tries--)
-		{
-			msg(DBG_AUD,"data_tries=%d ...\n", data_tries);
-			current_drive->mode=READ_AU;
-			cc_ModeSelect(CD_FRAMESIZE_RAW);
-			cc_ModeSense();
-			for (status_tries=3; status_tries > 0; status_tries--)
-			{
-				flags_cmd_out |= f_respo3;
-				cc_ReadStatus();
-				if (sbp_status() != 0) break;
-				if (st_check) cc_ReadError();
-				sbp_sleep(1);    /* wait a bit, try again */
-			}
-			if (status_tries == 0)
-			{
-				msg(DBG_AUD,"read_audio: sbp_status: failed after 3 tries in line %d.\n", __LINE__);
-				continue;
-			}
-			msg(DBG_AUD,"read_audio: sbp_status: ok.\n");
-			
-			flags_cmd_out = f_putcmd | f_respo2 | f_ResponseStatus | f_obey_p_check;
-			if (fam0L_drive)
-			{
-				flags_cmd_out |= f_lopsta | f_getsta | f_bit1;
-				cmd_type=READ_M2;
-				drvcmd[0]=CMD0_READ_XA; /* "read XA frames", old drives */
-				drvcmd[1]=(block>>16)&0x000000ff;
-				drvcmd[2]=(block>>8)&0x000000ff;
-				drvcmd[3]=block&0x000000ff;
-				drvcmd[4]=0;
-				drvcmd[5]=read_audio.nframes; /* # of frames */
-				drvcmd[6]=0;
-			}
-			else if (fam1_drive)
-			{
-				drvcmd[0]=CMD1_READ; /* "read frames", new drives */
-				lba2msf(block,&drvcmd[1]); /* msf-bin format required */
-				drvcmd[4]=0;
-				drvcmd[5]=0;
-				drvcmd[6]=read_audio.nframes; /* # of frames */
-			}
-			else if (fam2_drive)
-			{
-				drvcmd[0]=CMD2_READ_XA2;
-				lba2msf(block,&drvcmd[1]); /* msf-bin format required */
-				drvcmd[4]=0;
-				drvcmd[5]=read_audio.nframes; /* # of frames */
-				drvcmd[6]=0x11; /* raw mode */
-			}
-			else if (famT_drive) /* CD-55A: not tested yet */
-			{
-			}
-			msg(DBG_AUD,"read_audio: before giving \"read\" command.\n");
-			flags_cmd_out=f_putcmd;
-			response_count=0;
-			i=cmd_out();
-			if (i<0) msg(DBG_INF,"error giving READ AUDIO command: %0d\n", i);
-			sbp_sleep(0);
-			msg(DBG_AUD,"read_audio: after giving \"read\" command.\n");
-			for (frame=1;frame<2 && !error_flag; frame++)
-			{
-				try=maxtim_data;
-				for (timeout=jiffies+9*HZ; ; )
-				{
-					for ( ; try!=0;try--)
-					{
-						j=inb(CDi_status);
-						if (!(j&s_not_data_ready)) break;
-						if (!(j&s_not_result_ready)) break;
-						if (fam0L_drive) if (j&s_attention) break;
-					}
-					if (try != 0 || time_after_eq(jiffies, timeout)) break;
-					if (data_retrying == 0) data_waits++;
-					data_retrying = 1;
-					sbp_sleep(1);
-					try = 1;
-				}
-				if (try==0)
-				{
-					msg(DBG_INF,"read_audio: sbp_data: CDi_status timeout.\n");
-					error_flag++;
-					break;
-				}
-				msg(DBG_AUD,"read_audio: sbp_data: CDi_status ok.\n");
-				if (j&s_not_data_ready)
-				{
-					msg(DBG_INF, "read_audio: sbp_data: DATA_READY timeout.\n");
-					error_flag++;
-					break;
-				}
-				msg(DBG_AUD,"read_audio: before reading data.\n");
-				error_flag=0;
-				p = current_drive->aud_buf;
-				if (sbpro_type==1) OUT(CDo_sel_i_d,1);
-				if (do_16bit)
-				{
-					u_short *p2 = (u_short *) p;
+		RETURN_UP(i);
 
-					for (; (u_char *) p2 < current_drive->aud_buf + read_audio.nframes*CD_FRAMESIZE_RAW;)
-				  	{
-						if ((inb_p(CDi_status)&s_not_data_ready)) continue;
+	case CDROMSTART:  /* Spin up the drive */
+		msg(DBG_IOC,"ioctl: CDROMSTART entered.\n");
+		cc_SpinUp();
+		current_drive->audio_state=0;
+		RETURN_UP(0);
 
-						/* get one sample */
-						*p2++ = inw_p(CDi_data);
-						*p2++ = inw_p(CDi_data);
-					}
-				} else {
-					for (; p < current_drive->aud_buf + read_audio.nframes*CD_FRAMESIZE_RAW;)
-				  	{
-						if ((inb_p(CDi_status)&s_not_data_ready)) continue;
+	case CDROMVOLCTRL:   /* Volume control */
+		msg(DBG_IOC,"ioctl: CDROMVOLCTRL entered.\n");
+		memcpy(&volctrl,(char *) arg,sizeof(volctrl));
+		current_drive->vol_chan0=0;
+		current_drive->vol_ctrl0=volctrl.channel0;
+		current_drive->vol_chan1=1;
+		current_drive->vol_ctrl1=volctrl.channel1;
+		i=cc_SetVolume();
+		RETURN_UP(0);
 
-						/* get one sample */
-						*p++ = inb_p(CDi_data);
-						*p++ = inb_p(CDi_data);
-						*p++ = inb_p(CDi_data);
-						*p++ = inb_p(CDi_data);
-					}
-				}
-				if (sbpro_type==1) OUT(CDo_sel_i_d,0);
-				data_retrying = 0;
-			}
-			msg(DBG_AUD,"read_audio: after reading data.\n");
-			if (error_flag)    /* must have been spurious D_RDY or (ATTN&&!D_RDY) */
-			{
-				msg(DBG_AUD,"read_audio: read aborted by drive\n");
-#if 0000
-				i=cc_DriveReset();                /* ugly fix to prevent a hang */
-#else
-				i=cc_ReadError();
-#endif
-				continue;
-			}
-			if (fam0L_drive)
-			{
-				i=maxtim_data;
-				for (timeout=jiffies+9*HZ; time_before(jiffies, timeout); timeout--)
-				{
-					for ( ;i!=0;i--)
-					{
-						j=inb(CDi_status);
-						if (!(j&s_not_data_ready)) break;
-						if (!(j&s_not_result_ready)) break;
-						if (j&s_attention) break;
-					}
-					if (i != 0 || time_after_eq(jiffies, timeout)) break;
-					sbp_sleep(0);
-					i = 1;
-				}
-				if (i==0) msg(DBG_AUD,"read_audio: STATUS TIMEOUT AFTER READ");
-				if (!(j&s_attention))
-				{
-					msg(DBG_AUD,"read_audio: sbp_data: timeout waiting DRV_ATTN - retrying\n");
-					i=cc_DriveReset();  /* ugly fix to prevent a hang */
-					continue;
-				}
+	case CDROMVOLREAD:   /* read Volume settings from drive */
+		msg(DBG_IOC,"ioctl: CDROMVOLREAD entered.\n");
+		st=cc_GetVolume();
+		if (st<0) RETURN_UP(st);
+		volctrl.channel0=current_drive->vol_ctrl0;
+		volctrl.channel1=current_drive->vol_ctrl1;
+		volctrl.channel2=0;
+		volctrl.channel2=0;
+		memcpy((void *)arg,&volctrl,sizeof(volctrl));
+		RETURN_UP(0);
+
+	case CDROMSUBCHNL:   /* Get subchannel info */
+		msg(DBG_IOS,"ioctl: CDROMSUBCHNL entered.\n");
+		/* Bogus, I can do better than this! --AJK
+		if ((st_spinning)||(!subq_valid)) {
+			i=cc_ReadSubQ();
+			if (i<0) RETURN_UP(-EIO);
+		}
+		*/
+		i=cc_ReadSubQ();
+		if (i<0) {
+			j=cc_ReadError(); /* clear out error status from drive */
+			current_drive->audio_state=CDROM_AUDIO_NO_STATUS;
+			/* get and set the disk state here,
+			probably not the right place, but who cares!
+			It makes it work properly! --AJK */
+			if (current_drive->CD_changed==0xFF) {
+				msg(DBG_000,"Disk changed detect\n");
+				current_drive->diskstate_flags &= ~cd_size_bit;
 			}
-			do
-			{
-				if (fam0L_drive) cc_ReadStatus();
-				i=ResponseStatus();  /* builds status_bits, returns orig. status (old) or faked p_success (new) */
-				if (i<0) { msg(DBG_AUD,
-					       "read_audio: cc_ReadStatus error after read: %02X\n",
-					       current_drive->status_bits);
-					   continue; /* FIXME */
-				   }
+			RETURN_UP(-EIO);
+		}
+		if (current_drive->CD_changed==0xFF) {
+			/* reread the TOC because the disk has changed! --AJK */
+			msg(DBG_000,"Disk changed STILL detected, rereading TOC!\n");
+			i=DiskInfo();
+			if(i==0) {
+				current_drive->CD_changed=0x00; /* cd has changed, procede, */
+				RETURN_UP(-EIO); /* and get TOC, etc on next try! --AJK */
+			} else {
+				RETURN_UP(-EIO); /* we weren't ready yet! --AJK */
 			}
-			while ((fam0L_drive)&&(!st_check)&&(!(i&p_success)));
-			if (st_check)
-			{
-				i=cc_ReadError();
-				msg(DBG_AUD,"read_audio: cc_ReadError was necessary after read: %02X\n",i);
-				continue;
+		}
+		memcpy(&SC, (void *) arg, sizeof(struct cdrom_subchnl));
+		/*
+			This virtual crap is very bogus!
+			It doesn't detect when the cd is done playing audio!
+			Lets do this right with proper hardware register reading!
+		*/
+		cc_ReadStatus();
+		i=ResponseStatus();
+		msg(DBG_000,"Drive Status: door_locked =%d.\n", st_door_locked);
+		msg(DBG_000,"Drive Status: door_closed =%d.\n", st_door_closed);
+		msg(DBG_000,"Drive Status: caddy_in =%d.\n", st_caddy_in);
+		msg(DBG_000,"Drive Status: disk_ok =%d.\n", st_diskok);
+		msg(DBG_000,"Drive Status: spinning =%d.\n", st_spinning);
+		msg(DBG_000,"Drive Status: busy =%d.\n", st_busy);
+		/* st_busy indicates if it's _ACTUALLY_ playing audio */
+		switch (current_drive->audio_state)
+		{
+		case audio_playing:
+			if(st_busy==0) {
+				/* CD has stopped playing audio --AJK */
+				current_drive->audio_state=audio_completed;
+				SC.cdsc_audiostatus=CDROM_AUDIO_COMPLETED;
+			} else {
+				SC.cdsc_audiostatus=CDROM_AUDIO_PLAY;
 			}
-			if (copy_to_user(read_audio.buf,
-					 current_drive->aud_buf,
-					 read_audio.nframes * CD_FRAMESIZE_RAW))
-				RETURN_UP(-EFAULT);
-			msg(DBG_AUD,"read_audio: copy_to_user done.\n");
+			break;
+		case audio_pausing:
+			SC.cdsc_audiostatus=CDROM_AUDIO_PAUSED;
+			break;
+		case audio_completed:
+			SC.cdsc_audiostatus=CDROM_AUDIO_COMPLETED;
+			break;
+		default:
+			SC.cdsc_audiostatus=CDROM_AUDIO_NO_STATUS;
 			break;
 		}
-		cc_ModeSelect(CD_FRAMESIZE);
-		cc_ModeSense();
-		current_drive->mode=READ_M1;
-#if OLD_BUSY
-		busy_audio=0;
-#endif /* OLD_BUSY */ 
-		if (data_tries == 0)
+		SC.cdsc_adr=current_drive->SubQ_ctl_adr;
+		SC.cdsc_ctrl=current_drive->SubQ_ctl_adr>>4;
+		SC.cdsc_trk=bcd2bin(current_drive->SubQ_trk);
+		SC.cdsc_ind=bcd2bin(current_drive->SubQ_pnt_idx);
+		if (SC.cdsc_format==CDROM_LBA)
 		{
-			msg(DBG_AUD,"read_audio: failed after 5 tries in line %d.\n", __LINE__);
-			RETURN_UP(-EIO);
+			SC.cdsc_absaddr.lba=msf2blk(current_drive->SubQ_run_tot);
+			SC.cdsc_reladdr.lba=msf2blk(current_drive->SubQ_run_trk);
 		}
-		msg(DBG_AUD,"read_audio: successful return.\n");
+		else /* not only if (SC.cdsc_format==CDROM_MSF) */
+		{
+			SC.cdsc_absaddr.msf.minute=(current_drive->SubQ_run_tot>>16)&0x00FF;
+			SC.cdsc_absaddr.msf.second=(current_drive->SubQ_run_tot>>8)&0x00FF;
+			SC.cdsc_absaddr.msf.frame=current_drive->SubQ_run_tot&0x00FF;
+			SC.cdsc_reladdr.msf.minute=(current_drive->SubQ_run_trk>>16)&0x00FF;
+			SC.cdsc_reladdr.msf.second=(current_drive->SubQ_run_trk>>8)&0x00FF;
+			SC.cdsc_reladdr.msf.frame=current_drive->SubQ_run_trk&0x00FF;
+		}
+		memcpy((void *) arg, &SC, sizeof(struct cdrom_subchnl));
+		msg(DBG_IOS,"CDROMSUBCHNL: %1X %02X %08X %08X %02X %02X %06X %06X\n",
+		    SC.cdsc_format,SC.cdsc_audiostatus,
+		    SC.cdsc_adr,SC.cdsc_ctrl,
+		    SC.cdsc_trk,SC.cdsc_ind,
+		    SC.cdsc_absaddr,SC.cdsc_reladdr);
 		RETURN_UP(0);
-	} /* end of CDROMREADAUDIO */
-		
+
 	default:
 		msg(DBG_IOC,"ioctl: unknown function request %04X\n", cmd);
 		RETURN_UP(-EINVAL);
 	} /* end switch(cmd) */
 }
+/*==========================================================================*/
+/*
+ *  Take care of the different block sizes between cdrom and Linux.
+ */
+static void sbp_transfer(struct request *req)
+{
+	long offs;
 
-static int sbpcd_audio_ioctl(struct cdrom_device_info *cdi, u_int cmd,
-		       void * arg)
+	while ( (req->nr_sectors > 0) &&
+	       (req->sector/4 >= current_drive->sbp_first_frame) &&
+	       (req->sector/4 <= current_drive->sbp_last_frame) )
+	{
+		offs = (req->sector - current_drive->sbp_first_frame * 4) * 512;
+		memcpy(req->buffer, current_drive->sbp_buf + offs, 512);
+		req->nr_sectors--;
+		req->sector++;
+		req->buffer += 512;
+	}
+}
+/*==========================================================================*/
+/*
+ *  special end_request for sbpcd to solve CURRENT==NULL bug. (GTL)
+ *  GTL = Gonzalo Tornaria <tornaria@cmat.edu.uy>
+ *
+ *  This is a kludge so we don't need to modify end_request.
+ *  We put the req we take out after INIT_REQUEST in the requests list,
+ *  so that end_request will discard it.
+ *
+ *  The bug could be present in other block devices, perhaps we
+ *  should modify INIT_REQUEST and end_request instead, and
+ *  change every block device..
+ *
+ *  Could be a race here?? Could e.g. a timer interrupt schedule() us?
+ *  If so, we should copy end_request here, and do it right.. (or
+ *  modify end_request and the block devices).
+ *
+ *  In any case, the race here would be much small than it was, and
+ *  I couldn't reproduce..
+ *
+ *  The race could be: suppose CURRENT==NULL. We put our req in the list,
+ *  and we are scheduled. Other process takes over, and gets into
+ *  do_sbpcd_request. It sees CURRENT!=NULL (it is == to our req), so
+ *  proceeds. It ends, so CURRENT is now NULL.. Now we awake somewhere in
+ *  end_request, but now CURRENT==NULL... oops!
+ *
+ */
+#undef DEBUG_GTL
+
+/*==========================================================================*/
+/*
+ *  I/O request routine, called from Linux kernel.
+ */
+static void do_sbpcd_request(request_queue_t * q)
 {
-	struct sbpcd_drive *p = cdi->handle;
-	int i, st, j;
-	
-	msg(DBG_IO2,"ioctl(%s, 0x%08lX, 0x%08p)\n", cdi->name, cmd, arg);
-	if (p->drv_id==-1) {
-		msg(DBG_INF, "ioctl: bad device: %s\n", cdi->name);
-		return (-ENXIO);             /* no such drive */
+	u_int block;
+	u_int nsect;
+	int status_tries, data_tries;
+	struct request *req;
+	struct sbpcd_drive *p;
+#ifdef DEBUG_GTL
+	static int xx_nr=0;
+	int xnr;
+#endif
+
+ request_loop:
+#ifdef DEBUG_GTL
+	xnr=++xx_nr;
+
+	req = elv_next_request(q);
+
+	if (!req)
+	{
+		printk( "do_sbpcd_request[%di](NULL), Pid:%d, Time:%li\n",
+			xnr, current->pid, jiffies);
+		printk( "do_sbpcd_request[%do](NULL) end 0 (null), Time:%li\n",
+			xnr, jiffies);
+		return;
 	}
+
+	printk(" do_sbpcd_request[%di](%p:%ld+%ld), Pid:%d, Time:%li\n",
+		xnr, req, req->sector, req->nr_sectors, current->pid, jiffies);
+#endif
+
+	req = elv_next_request(q);	/* take out our request so no other */
+	if (!req)
+		return;
+
+	if (req -> sector == -1)
+		end_request(req, 0);
+	spin_unlock_irq(q->queue_lock);
+
 	down(&ioctl_read_sem);
+	if (rq_data_dir(elv_next_request(q)) != READ)
+	{
+		msg(DBG_INF, "bad cmd %d\n", req->cmd[0]);
+		goto err_done;
+	}
+	p = req->rq_disk->private_data;
+#if OLD_BUSY
+	while (busy_audio) sbp_sleep(HZ); /* wait a bit */
+	busy_data=1;
+#endif /* OLD_BUSY */
+
+	if (p->audio_state==audio_playing) goto err_done;
 	if (p != current_drive)
 		switch_drive(p);
-	
-	msg(DBG_IO2,"ioctl: device %s, request %04X\n",cdi->name,cmd);
-	switch (cmd) 		/* Sun-compatible */
+
+	block = req->sector; /* always numbered as 512-byte-pieces */
+	nsect = req->nr_sectors; /* always counted as 512-byte-pieces */
+
+	msg(DBG_BSZ,"read sector %d (%d sectors)\n", block, nsect);
+#if 0
+	msg(DBG_MUL,"read LBA %d\n", block/4);
+#endif
+
+	sbp_transfer(req);
+	/* if we satisfied the request from the buffer, we're done. */
+	if (req->nr_sectors == 0)
 	{
-		
-	case CDROMPAUSE:     /* Pause the drive */
-		msg(DBG_IOC,"ioctl: CDROMPAUSE entered.\n");
-		/* pause the drive unit when it is currently in PLAY mode,         */
-		/* or reset the starting and ending locations when in PAUSED mode. */
-		/* If applicable, at the next stopping point it reaches            */
-		/* the drive will discontinue playing.                             */
-		switch (current_drive->audio_state)
-		{
-		case audio_playing:
-			if (famL_drive) i=cc_ReadSubQ();
-			else i=cc_Pause_Resume(1);
-			if (i<0) RETURN_UP(-EIO);
-			if (famL_drive) i=cc_Pause_Resume(1);
-			else i=cc_ReadSubQ();
-			if (i<0) RETURN_UP(-EIO);
-			current_drive->pos_audio_start=current_drive->SubQ_run_tot;
-			current_drive->audio_state=audio_pausing;
-			RETURN_UP(0);
-		case audio_pausing:
-			i=cc_Seek(current_drive->pos_audio_start,1);
-			if (i<0) RETURN_UP(-EIO);
-			RETURN_UP(0);
-		default:
-			RETURN_UP(-EINVAL);
-		}
-		
-	case CDROMRESUME: /* resume paused audio play */
-		msg(DBG_IOC,"ioctl: CDROMRESUME entered.\n");
-		/* resume playing audio tracks when a previous PLAY AUDIO call has  */
-		/* been paused with a PAUSE command.                                */
-		/* It will resume playing from the location saved in SubQ_run_tot.  */
-		if (current_drive->audio_state!=audio_pausing) RETURN_UP(-EINVAL);
-		if (famL_drive)
-			i=cc_PlayAudio(current_drive->pos_audio_start,
-				       current_drive->pos_audio_end);
-		else i=cc_Pause_Resume(3);
-		if (i<0) RETURN_UP(-EIO);
-		current_drive->audio_state=audio_playing;
-		RETURN_UP(0);
-		
-	case CDROMPLAYMSF:
-		msg(DBG_IOC,"ioctl: CDROMPLAYMSF entered.\n");
-#ifdef SAFE_MIXED
-		if (current_drive->has_data>1) RETURN_UP(-EBUSY);
-#endif /* SAFE_MIXED */ 
-		if (current_drive->audio_state==audio_playing)
+#ifdef DEBUG_GTL
+		printk(" do_sbpcd_request[%do](%p:%ld+%ld) end 2, Time:%li\n",
+			xnr, req, req->sector, req->nr_sectors, jiffies);
+#endif
+		up(&ioctl_read_sem);
+		spin_lock_irq(q->queue_lock);
+		end_request(req, 1);
+		goto request_loop;
+	}
+
+#ifdef FUTURE
+	i=prepare(0,0); /* at moment not really a hassle check, but ... */
+	if (i!=0)
+		msg(DBG_INF,"\"prepare\" tells error %d -- ignored\n", i);
+#endif /* FUTURE */
+
+	if (!st_spinning) cc_SpinUp();
+
+	for (data_tries=n_retries; data_tries > 0; data_tries--)
+	{
+		for (status_tries=3; status_tries > 0; status_tries--)
 		{
-			i=cc_Pause_Resume(1);
-			if (i<0) RETURN_UP(-EIO);
-			i=cc_ReadSubQ();
-			if (i<0) RETURN_UP(-EIO);
-			current_drive->pos_audio_start=current_drive->SubQ_run_tot;
-			i=cc_Seek(current_drive->pos_audio_start,1);
+			flags_cmd_out |= f_respo3;
+			cc_ReadStatus();
+			if (sbp_status() != 0) break;
+			if (st_check) cc_ReadError();
+			sbp_sleep(1);    /* wait a bit, try again */
 		}
-		memcpy(&msf, (void *) arg, sizeof(struct cdrom_msf));
-		/* values come as msf-bin */
-		current_drive->pos_audio_start = (msf.cdmsf_min0<<16) |
-                        (msf.cdmsf_sec0<<8) |
-				msf.cdmsf_frame0;
-		current_drive->pos_audio_end = (msf.cdmsf_min1<<16) |
-			(msf.cdmsf_sec1<<8) |
-				msf.cdmsf_frame1;
-		msg(DBG_IOX,"ioctl: CDROMPLAYMSF %08X %08X\n",
-		    current_drive->pos_audio_start,current_drive->pos_audio_end);
-		i=cc_PlayAudio(current_drive->pos_audio_start,current_drive->pos_audio_end);
-		if (i<0)
+		if (status_tries == 0)
 		{
-			msg(DBG_INF,"ioctl: cc_PlayAudio returns %d\n",i);
-			DriveReset();
-			current_drive->audio_state=0;
-			RETURN_UP(-EIO);
+			msg(DBG_INF,"sbp_status: failed after 3 tries in line %d\n", __LINE__);
+			break;
 		}
-		current_drive->audio_state=audio_playing;
-		RETURN_UP(0);
 		
-	case CDROMPLAYTRKIND: /* Play a track.  This currently ignores index. */
-		msg(DBG_IOC,"ioctl: CDROMPLAYTRKIND entered.\n");
-#ifdef SAFE_MIXED
-		if (current_drive->has_data>1) RETURN_UP(-EBUSY);
-#endif /* SAFE_MIXED */ 
-		if (current_drive->audio_state==audio_playing)
+		sbp_read_cmd(req);
+		sbp_sleep(0);
+		if (sbp_data(req) != 0)
 		{
-			msg(DBG_IOX,"CDROMPLAYTRKIND: already audio_playing.\n");
-#if 1
-			RETURN_UP(0); /* just let us play on */
-#else
-			RETURN_UP(-EINVAL); /* play on, but say "error" */
+#ifdef SAFE_MIXED
+			current_drive->has_data=2; /* is really a data disk */
+#endif /* SAFE_MIXED */
+#ifdef DEBUG_GTL
+			printk(" do_sbpcd_request[%do](%p:%ld+%ld) end 3, Time:%li\n",
+				xnr, req, req->sector, req->nr_sectors, jiffies);
 #endif
+			up(&ioctl_read_sem);
+			spin_lock_irq(q->queue_lock);
+			end_request(req, 1);
+			goto request_loop;
 		}
-		memcpy(&ti,(void *) arg,sizeof(struct cdrom_ti));
-		msg(DBG_IOX,"ioctl: trk0: %d, ind0: %d, trk1:%d, ind1:%d\n",
-		    ti.cdti_trk0,ti.cdti_ind0,ti.cdti_trk1,ti.cdti_ind1);
-		if (ti.cdti_trk0<current_drive->n_first_track) RETURN_UP(-EINVAL);
-		if (ti.cdti_trk0>current_drive->n_last_track) RETURN_UP(-EINVAL);
-		if (ti.cdti_trk1<ti.cdti_trk0) ti.cdti_trk1=ti.cdti_trk0;
-		if (ti.cdti_trk1>current_drive->n_last_track) ti.cdti_trk1=current_drive->n_last_track;
-		current_drive->pos_audio_start=current_drive->TocBuffer[ti.cdti_trk0].address;
-		current_drive->pos_audio_end=current_drive->TocBuffer[ti.cdti_trk1+1].address;
-		i=cc_PlayAudio(current_drive->pos_audio_start,current_drive->pos_audio_end);
-		if (i<0)
-		{
-			msg(DBG_INF,"ioctl: cc_PlayAudio returns %d\n",i);
-			DriveReset();
-			current_drive->audio_state=0;
-			RETURN_UP(-EIO);
-		}
-		current_drive->audio_state=audio_playing;
-		RETURN_UP(0);
-		
-	case CDROMREADTOCHDR:        /* Read the table of contents header */
-		msg(DBG_IOC,"ioctl: CDROMREADTOCHDR entered.\n");
-		tochdr.cdth_trk0=current_drive->n_first_track;
-		tochdr.cdth_trk1=current_drive->n_last_track;
-		memcpy((void *) arg, &tochdr, sizeof(struct cdrom_tochdr));
-		RETURN_UP(0);
-		
-	case CDROMREADTOCENTRY:      /* Read an entry in the table of contents */
-		msg(DBG_IOC,"ioctl: CDROMREADTOCENTRY entered.\n");
-		memcpy(&tocentry, (void *) arg, sizeof(struct cdrom_tocentry));
-		i=tocentry.cdte_track;
-		if (i==CDROM_LEADOUT) i=current_drive->n_last_track+1;
-		else if (i<current_drive->n_first_track||i>current_drive->n_last_track)
-                  RETURN_UP(-EINVAL);
-		tocentry.cdte_adr=current_drive->TocBuffer[i].ctl_adr&0x0F;
-		tocentry.cdte_ctrl=(current_drive->TocBuffer[i].ctl_adr>>4)&0x0F;
-		tocentry.cdte_datamode=current_drive->TocBuffer[i].format;
-		if (tocentry.cdte_format==CDROM_MSF) /* MSF-bin required */
-		{
-			tocentry.cdte_addr.msf.minute=(current_drive->TocBuffer[i].address>>16)&0x00FF;
-			tocentry.cdte_addr.msf.second=(current_drive->TocBuffer[i].address>>8)&0x00FF;
-			tocentry.cdte_addr.msf.frame=current_drive->TocBuffer[i].address&0x00FF;
-		}
-		else if (tocentry.cdte_format==CDROM_LBA) /* blk required */
-			tocentry.cdte_addr.lba=msf2blk(current_drive->TocBuffer[i].address);
-		else RETURN_UP(-EINVAL);
-		memcpy((void *) arg, &tocentry, sizeof(struct cdrom_tocentry));
-		RETURN_UP(0);
-		
-	case CDROMSTOP:      /* Spin down the drive */
-		msg(DBG_IOC,"ioctl: CDROMSTOP entered.\n");
-#ifdef SAFE_MIXED
-		if (current_drive->has_data>1) RETURN_UP(-EBUSY);
-#endif /* SAFE_MIXED */ 
-		i=cc_Pause_Resume(1);
-		current_drive->audio_state=0;
-#if 0
-		cc_DriveReset();
+	}
+
+ err_done:
+#if OLD_BUSY
+	busy_data=0;
+#endif /* OLD_BUSY */
+#ifdef DEBUG_GTL
+	printk(" do_sbpcd_request[%do](%p:%ld+%ld) end 4 (error), Time:%li\n",
+		xnr, req, req->sector, req->nr_sectors, jiffies);
 #endif
-		RETURN_UP(i);
-		
-	case CDROMSTART:  /* Spin up the drive */
-		msg(DBG_IOC,"ioctl: CDROMSTART entered.\n");
-		cc_SpinUp();
-		current_drive->audio_state=0;
-		RETURN_UP(0);
-		
-	case CDROMVOLCTRL:   /* Volume control */
-		msg(DBG_IOC,"ioctl: CDROMVOLCTRL entered.\n");
-		memcpy(&volctrl,(char *) arg,sizeof(volctrl));
-		current_drive->vol_chan0=0;
-		current_drive->vol_ctrl0=volctrl.channel0;
-		current_drive->vol_chan1=1;
-		current_drive->vol_ctrl1=volctrl.channel1;
-		i=cc_SetVolume();
-		RETURN_UP(0);
-		
-	case CDROMVOLREAD:   /* read Volume settings from drive */
-		msg(DBG_IOC,"ioctl: CDROMVOLREAD entered.\n");
-		st=cc_GetVolume();
-		if (st<0) RETURN_UP(st);
-		volctrl.channel0=current_drive->vol_ctrl0;
-		volctrl.channel1=current_drive->vol_ctrl1;
-		volctrl.channel2=0;
-		volctrl.channel2=0;
-		memcpy((void *)arg,&volctrl,sizeof(volctrl));
-		RETURN_UP(0);
+	up(&ioctl_read_sem);
+	sbp_sleep(0);    /* wait a bit, try again */
+	spin_lock_irq(q->queue_lock);
+	end_request(req, 0);
+	goto request_loop;
+}
+/*==========================================================================*/
+/*
+ *  build and send the READ command.
+ */
+static void sbp_read_cmd(struct request *req)
+{
+#undef OLD
 
-	case CDROMSUBCHNL:   /* Get subchannel info */
-		msg(DBG_IOS,"ioctl: CDROMSUBCHNL entered.\n");
-		/* Bogus, I can do better than this! --AJK
-		if ((st_spinning)||(!subq_valid)) {
-			i=cc_ReadSubQ();
-			if (i<0) RETURN_UP(-EIO);
-		}
-		*/
-		i=cc_ReadSubQ();
-		if (i<0) {
-			j=cc_ReadError(); /* clear out error status from drive */
-			current_drive->audio_state=CDROM_AUDIO_NO_STATUS;
-			/* get and set the disk state here, 
-			probably not the right place, but who cares!
-			It makes it work properly! --AJK */
-			if (current_drive->CD_changed==0xFF) {
-				msg(DBG_000,"Disk changed detect\n");
-				current_drive->diskstate_flags &= ~cd_size_bit;
-			}
-			RETURN_UP(-EIO);
-		}
-		if (current_drive->CD_changed==0xFF) {
-			/* reread the TOC because the disk has changed! --AJK */
-			msg(DBG_000,"Disk changed STILL detected, rereading TOC!\n");
-			i=DiskInfo();
-			if(i==0) {
-				current_drive->CD_changed=0x00; /* cd has changed, procede, */
-				RETURN_UP(-EIO); /* and get TOC, etc on next try! --AJK */
-			} else {
-				RETURN_UP(-EIO); /* we weren't ready yet! --AJK */
-			}
-		}
-		memcpy(&SC, (void *) arg, sizeof(struct cdrom_subchnl));
-		/* 
-			This virtual crap is very bogus! 
-			It doesn't detect when the cd is done playing audio!
-			Lets do this right with proper hardware register reading!
-		*/
-		cc_ReadStatus();
-		i=ResponseStatus();
-		msg(DBG_000,"Drive Status: door_locked =%d.\n", st_door_locked);
-		msg(DBG_000,"Drive Status: door_closed =%d.\n", st_door_closed);
-		msg(DBG_000,"Drive Status: caddy_in =%d.\n", st_caddy_in);
-		msg(DBG_000,"Drive Status: disk_ok =%d.\n", st_diskok);
-		msg(DBG_000,"Drive Status: spinning =%d.\n", st_spinning);
-		msg(DBG_000,"Drive Status: busy =%d.\n", st_busy);
-		/* st_busy indicates if it's _ACTUALLY_ playing audio */
-		switch (current_drive->audio_state)
+	int i;
+	int block;
+
+	current_drive->sbp_first_frame=current_drive->sbp_last_frame=-1;      /* purge buffer */
+	current_drive->sbp_current = 0;
+	block=req->sector/4;
+	if (block+current_drive->sbp_bufsiz <= current_drive->CDsize_frm)
+		current_drive->sbp_read_frames = current_drive->sbp_bufsiz;
+	else
+	{
+		current_drive->sbp_read_frames=current_drive->CDsize_frm-block;
+		/* avoid reading past end of data */
+		if (current_drive->sbp_read_frames < 1)
 		{
-		case audio_playing:
-			if(st_busy==0) {
-				/* CD has stopped playing audio --AJK */
-				current_drive->audio_state=audio_completed;
-				SC.cdsc_audiostatus=CDROM_AUDIO_COMPLETED;
-			} else {
-				SC.cdsc_audiostatus=CDROM_AUDIO_PLAY;
-			}
-			break;
-		case audio_pausing:
-			SC.cdsc_audiostatus=CDROM_AUDIO_PAUSED;
-			break;
-		case audio_completed:
-			SC.cdsc_audiostatus=CDROM_AUDIO_COMPLETED;
-			break;
-		default:
-			SC.cdsc_audiostatus=CDROM_AUDIO_NO_STATUS;
-			break;
+			msg(DBG_INF,"requested frame %d, CD size %d ???\n",
+			    block, current_drive->CDsize_frm);
+			current_drive->sbp_read_frames=1;
 		}
-		SC.cdsc_adr=current_drive->SubQ_ctl_adr;
-		SC.cdsc_ctrl=current_drive->SubQ_ctl_adr>>4;
-		SC.cdsc_trk=bcd2bin(current_drive->SubQ_trk);
-		SC.cdsc_ind=bcd2bin(current_drive->SubQ_pnt_idx);
-		if (SC.cdsc_format==CDROM_LBA)
+	}
+
+	flags_cmd_out = f_putcmd | f_respo2 | f_ResponseStatus | f_obey_p_check;
+	clr_cmdbuf();
+	if (famV_drive)
+	  {
+	    drvcmd[0]=CMDV_READ;
+	    lba2msf(block,&drvcmd[1]); /* msf-bcd format required */
+	    bin2bcdx(&drvcmd[1]);
+	    bin2bcdx(&drvcmd[2]);
+	    bin2bcdx(&drvcmd[3]);
+	    drvcmd[4]=current_drive->sbp_read_frames>>8;
+	    drvcmd[5]=current_drive->sbp_read_frames&0xff;
+	    drvcmd[6]=0x02; /* flag "msf-bcd" */
+	}
+	else if (fam0L_drive)
+	{
+		flags_cmd_out |= f_lopsta | f_getsta | f_bit1;
+		if (current_drive->xa_byte==0x20)
 		{
-			SC.cdsc_absaddr.lba=msf2blk(current_drive->SubQ_run_tot);
-			SC.cdsc_reladdr.lba=msf2blk(current_drive->SubQ_run_trk);
+			cmd_type=READ_M2;
+			drvcmd[0]=CMD0_READ_XA; /* "read XA frames", old drives */
+			drvcmd[1]=(block>>16)&0x0ff;
+			drvcmd[2]=(block>>8)&0x0ff;
+			drvcmd[3]=block&0x0ff;
+			drvcmd[4]=(current_drive->sbp_read_frames>>8)&0x0ff;
+			drvcmd[5]=current_drive->sbp_read_frames&0x0ff;
 		}
-		else /* not only if (SC.cdsc_format==CDROM_MSF) */
+		else
 		{
-			SC.cdsc_absaddr.msf.minute=(current_drive->SubQ_run_tot>>16)&0x00FF;
-			SC.cdsc_absaddr.msf.second=(current_drive->SubQ_run_tot>>8)&0x00FF;
-			SC.cdsc_absaddr.msf.frame=current_drive->SubQ_run_tot&0x00FF;
-			SC.cdsc_reladdr.msf.minute=(current_drive->SubQ_run_trk>>16)&0x00FF;
-			SC.cdsc_reladdr.msf.second=(current_drive->SubQ_run_trk>>8)&0x00FF;
-			SC.cdsc_reladdr.msf.frame=current_drive->SubQ_run_trk&0x00FF;
+			drvcmd[0]=CMD0_READ; /* "read frames", old drives */
+			if (current_drive->drv_type>=drv_201)
+			{
+				lba2msf(block,&drvcmd[1]); /* msf-bcd format required */
+				bin2bcdx(&drvcmd[1]);
+				bin2bcdx(&drvcmd[2]);
+				bin2bcdx(&drvcmd[3]);
+			}
+			else
+			{
+				drvcmd[1]=(block>>16)&0x0ff;
+				drvcmd[2]=(block>>8)&0x0ff;
+				drvcmd[3]=block&0x0ff;
+			}
+			drvcmd[4]=(current_drive->sbp_read_frames>>8)&0x0ff;
+			drvcmd[5]=current_drive->sbp_read_frames&0x0ff;
+			drvcmd[6]=(current_drive->drv_type<drv_201)?0:2; /* flag "lba or msf-bcd format" */
 		}
-		memcpy((void *) arg, &SC, sizeof(struct cdrom_subchnl));
-		msg(DBG_IOS,"CDROMSUBCHNL: %1X %02X %08X %08X %02X %02X %06X %06X\n",
-		    SC.cdsc_format,SC.cdsc_audiostatus,
-		    SC.cdsc_adr,SC.cdsc_ctrl,
-		    SC.cdsc_trk,SC.cdsc_ind,
-		    SC.cdsc_absaddr,SC.cdsc_reladdr);
-		RETURN_UP(0);
-		
-	default:
-		msg(DBG_IOC,"ioctl: unknown function request %04X\n", cmd);
-		RETURN_UP(-EINVAL);
-	} /* end switch(cmd) */
-}
-/*==========================================================================*/
-/*
- *  Take care of the different block sizes between cdrom and Linux.
- */
-static void sbp_transfer(struct request *req)
-{
-	long offs;
-	
-	while ( (req->nr_sectors > 0) &&
-	       (req->sector/4 >= current_drive->sbp_first_frame) &&
-	       (req->sector/4 <= current_drive->sbp_last_frame) )
+	}
+	else if (fam1_drive)
 	{
-		offs = (req->sector - current_drive->sbp_first_frame * 4) * 512;
-		memcpy(req->buffer, current_drive->sbp_buf + offs, 512);
-		req->nr_sectors--;
-		req->sector++;
-		req->buffer += 512;
+		drvcmd[0]=CMD1_READ;
+		lba2msf(block,&drvcmd[1]); /* msf-bin format required */
+		drvcmd[5]=(current_drive->sbp_read_frames>>8)&0x0ff;
+		drvcmd[6]=current_drive->sbp_read_frames&0x0ff;
 	}
-}
-/*==========================================================================*/
-/*
- *  special end_request for sbpcd to solve CURRENT==NULL bug. (GTL)
- *  GTL = Gonzalo Tornaria <tornaria@cmat.edu.uy>
- *
- *  This is a kludge so we don't need to modify end_request.
- *  We put the req we take out after INIT_REQUEST in the requests list,
- *  so that end_request will discard it. 
- *
- *  The bug could be present in other block devices, perhaps we
- *  should modify INIT_REQUEST and end_request instead, and
- *  change every block device.. 
- *
- *  Could be a race here?? Could e.g. a timer interrupt schedule() us?
- *  If so, we should copy end_request here, and do it right.. (or
- *  modify end_request and the block devices).
- *
- *  In any case, the race here would be much small than it was, and
- *  I couldn't reproduce..
- *
- *  The race could be: suppose CURRENT==NULL. We put our req in the list,
- *  and we are scheduled. Other process takes over, and gets into
- *  do_sbpcd_request. It sees CURRENT!=NULL (it is == to our req), so
- *  proceeds. It ends, so CURRENT is now NULL.. Now we awake somewhere in
- *  end_request, but now CURRENT==NULL... oops!
- *
- */
-#undef DEBUG_GTL
-
+	else if (fam2_drive)
+	{
+		drvcmd[0]=CMD2_READ;
+		lba2msf(block,&drvcmd[1]); /* msf-bin format required */
+		drvcmd[4]=(current_drive->sbp_read_frames>>8)&0x0ff;
+		drvcmd[5]=current_drive->sbp_read_frames&0x0ff;
+		drvcmd[6]=0x02;
+	}
+	else if (famT_drive)
+	{
+		drvcmd[0]=CMDT_READ;
+		drvcmd[2]=(block>>24)&0x0ff;
+		drvcmd[3]=(block>>16)&0x0ff;
+		drvcmd[4]=(block>>8)&0x0ff;
+		drvcmd[5]=block&0x0ff;
+		drvcmd[7]=(current_drive->sbp_read_frames>>8)&0x0ff;
+		drvcmd[8]=current_drive->sbp_read_frames&0x0ff;
+	}
+	flags_cmd_out=f_putcmd;
+	response_count=0;
+	i=cmd_out();
+	if (i<0) msg(DBG_INF,"error giving READ command: %0d\n", i);
+	return;
+}
 /*==========================================================================*/
 /*
- *  I/O request routine, called from Linux kernel.
+ *  Check the completion of the read-data command.  On success, read
+ *  the current_drive->sbp_bufsiz * 2048 bytes of data from the disk into buffer.
  */
-static void do_sbpcd_request(request_queue_t * q)
+static int sbp_data(struct request *req)
 {
-	u_int block;
-	u_int nsect;
-	int status_tries, data_tries;
-	struct request *req;
-	struct sbpcd_drive *p;
-#ifdef DEBUG_GTL
-	static int xx_nr=0;
-	int xnr;
-#endif
-
- request_loop:
-#ifdef DEBUG_GTL
-	xnr=++xx_nr;
-
-	req = elv_next_request(q);
-
-	if (!req)
-	{
-		printk( "do_sbpcd_request[%di](NULL), Pid:%d, Time:%li\n",
-			xnr, current->pid, jiffies);
-		printk( "do_sbpcd_request[%do](NULL) end 0 (null), Time:%li\n",
-			xnr, jiffies);
-		return;
-	}
+	int i=0, j=0, l, frame;
+	u_int try=0;
+	u_long timeout;
+	u_char *p;
+	u_int data_tries = 0;
+	u_int data_waits = 0;
+	u_int data_retrying = 0;
+	int error_flag;
+	int xa_count;
+	int max_latency;
+	int success;
+	int wait;
+	int duration;
 
-	printk(" do_sbpcd_request[%di](%p:%ld+%ld), Pid:%d, Time:%li\n",
-		xnr, req, req->sector, req->nr_sectors, current->pid, jiffies);
+	error_flag=0;
+	success=0;
+#if LONG_TIMING
+	max_latency=9*HZ;
+#else
+	if (current_drive->f_multisession) max_latency=15*HZ;
+	else max_latency=5*HZ;
 #endif
+	duration=jiffies;
+	for (frame=0;frame<current_drive->sbp_read_frames&&!error_flag; frame++)
+	{
+		SBPCD_CLI;
 
-	req = elv_next_request(q);	/* take out our request so no other */
-	if (!req)
-		return;
+		del_timer(&data_timer);
+		data_timer.expires=jiffies+max_latency;
+		timed_out_data=0;
+		add_timer(&data_timer);
+		while (!timed_out_data)
+		{
+			if (current_drive->f_multisession) try=maxtim_data*4;
+			else try=maxtim_data;
+			msg(DBG_000,"sbp_data: CDi_status loop: try=%d.\n",try);
+			for ( ; try!=0;try--)
+			{
+				j=inb(CDi_status);
+				if (!(j&s_not_data_ready)) break;
+				if (!(j&s_not_result_ready)) break;
+				if (fam0LV_drive) if (j&s_attention) break;
+			}
+			if (!(j&s_not_data_ready)) goto data_ready;
+			if (try==0)
+			{
+				if (data_retrying == 0) data_waits++;
+				data_retrying = 1;
+				msg(DBG_000,"sbp_data: CDi_status loop: sleeping.\n");
+				sbp_sleep(1);
+				try = 1;
+			}
+		}
+		msg(DBG_INF,"sbp_data: CDi_status loop expired.\n");
+	data_ready:
+		del_timer(&data_timer);
 
-	if (req -> sector == -1)
-		end_request(req, 0);
-	spin_unlock_irq(q->queue_lock);
+		if (timed_out_data)
+		{
+			msg(DBG_INF,"sbp_data: CDi_status timeout (timed_out_data) (%02X).\n", j);
+			error_flag++;
+		}
+		if (try==0)
+		{
+			msg(DBG_INF,"sbp_data: CDi_status timeout (try=0) (%02X).\n", j);
+			error_flag++;
+		}
+		if (!(j&s_not_result_ready))
+		{
+			msg(DBG_INF, "sbp_data: RESULT_READY where DATA_READY awaited (%02X).\n", j);
+			response_count=20;
+			j=ResponseInfo();
+			j=inb(CDi_status);
+		}
+		if (j&s_not_data_ready)
+		{
+			if ((current_drive->ored_ctl_adr&0x40)==0)
+				msg(DBG_INF, "CD contains no data tracks.\n");
+			else msg(DBG_INF, "sbp_data: DATA_READY timeout (%02X).\n", j);
+			error_flag++;
+		}
+		SBPCD_STI;
+		if (error_flag) break;
 
-	down(&ioctl_read_sem);
-	if (rq_data_dir(elv_next_request(q)) != READ)
-	{
-		msg(DBG_INF, "bad cmd %d\n", req->cmd[0]);
-		goto err_done;
+		msg(DBG_000, "sbp_data: beginning to read.\n");
+		p = current_drive->sbp_buf + frame *  CD_FRAMESIZE;
+		if (sbpro_type==1) OUT(CDo_sel_i_d,1);
+		if (cmd_type==READ_M2) {
+                        if (do_16bit) insw(CDi_data, xa_head_buf, CD_XA_HEAD>>1);
+                        else insb(CDi_data, xa_head_buf, CD_XA_HEAD);
+		}
+		if (do_16bit) insw(CDi_data, p, CD_FRAMESIZE>>1);
+		else insb(CDi_data, p, CD_FRAMESIZE);
+		if (cmd_type==READ_M2) {
+                        if (do_16bit) insw(CDi_data, xa_tail_buf, CD_XA_TAIL>>1);
+                        else insb(CDi_data, xa_tail_buf, CD_XA_TAIL);
+		}
+		current_drive->sbp_current++;
+		if (sbpro_type==1) OUT(CDo_sel_i_d,0);
+		if (cmd_type==READ_M2)
+		{
+			for (xa_count=0;xa_count<CD_XA_HEAD;xa_count++)
+				sprintf(&msgbuf[xa_count*3], " %02X", xa_head_buf[xa_count]);
+			msgbuf[xa_count*3]=0;
+			msg(DBG_XA1,"xa head:%s\n", msgbuf);
+		}
+		data_retrying = 0;
+		data_tries++;
+		if (data_tries >= 1000)
+		{
+			msg(DBG_INF,"sbp_data() statistics: %d waits in %d frames.\n", data_waits, data_tries);
+			data_waits = data_tries = 0;
+		}
 	}
-	p = req->rq_disk->private_data;
-#if OLD_BUSY
-	while (busy_audio) sbp_sleep(HZ); /* wait a bit */
-	busy_data=1;
-#endif /* OLD_BUSY */
-	
-	if (p->audio_state==audio_playing) goto err_done;
-	if (p != current_drive)
-		switch_drive(p);
-
-	block = req->sector; /* always numbered as 512-byte-pieces */
-	nsect = req->nr_sectors; /* always counted as 512-byte-pieces */
-	
-	msg(DBG_BSZ,"read sector %d (%d sectors)\n", block, nsect);
+	duration=jiffies-duration;
+	msg(DBG_TEA,"time to read %d frames: %d jiffies .\n",frame,duration);
+	if (famT_drive)
+	{
+		wait=8;
+		do
+		{
+			if (teac==2)
+                          {
+                            if ((i=CDi_stat_loop_T()) == -1) break;
+                          }
+                        else
+                          {
+                            sbp_sleep(1);
+                            OUT(CDo_sel_i_d,0);
+                            i=inb(CDi_status);
+                          }
+			if (!(i&s_not_data_ready))
+			{
+				OUT(CDo_sel_i_d,1);
+				j=0;
+				do
+				{
+					if (do_16bit) i=inw(CDi_data);
+					else i=inb(CDi_data);
+					j++;
+					i=inb(CDi_status);
+				}
+				while (!(i&s_not_data_ready));
+				msg(DBG_TEA, "==========too much data (%d bytes/words)==============.\n", j);
+			}
+			if (!(i&s_not_result_ready))
+			{
+				OUT(CDo_sel_i_d,0);
+				l=0;
+				do
+				{
+					infobuf[l++]=inb(CDi_info);
+					i=inb(CDi_status);
+				}
+				while (!(i&s_not_result_ready));
+				if (infobuf[0]==0x00) success=1;
+#if 1
+				for (j=0;j<l;j++) sprintf(&msgbuf[j*3], " %02X", infobuf[j]);
+				msgbuf[j*3]=0;
+				msg(DBG_TEA,"sbp_data info response:%s\n", msgbuf);
+#endif
+				if (infobuf[0]==0x02)
+				{
+					error_flag++;
+					do
+					{
+						++recursion;
+						if (recursion>1) msg(DBG_TEA,"cmd_out_T READ_ERR recursion (sbp_data): %d !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n",recursion);
+						else msg(DBG_TEA,"sbp_data: CMDT_READ_ERR necessary.\n");
+						clr_cmdbuf();
+						drvcmd[0]=CMDT_READ_ERR;
+						j=cmd_out_T(); /* !!! recursive here !!! */
+						--recursion;
+						sbp_sleep(1);
+					}
+					while (j<0);
+					current_drive->error_state=infobuf[2];
+					current_drive->b3=infobuf[3];
+					current_drive->b4=infobuf[4];
+				}
+				break;
+			}
+			else
+			{
 #if 0
-	msg(DBG_MUL,"read LBA %d\n", block/4);
+				msg(DBG_TEA, "============= waiting for result=================.\n");
+				sbp_sleep(1);
 #endif
-	
-	sbp_transfer(req);
-	/* if we satisfied the request from the buffer, we're done. */
-	if (req->nr_sectors == 0)
+			}
+		}
+		while (wait--);
+	}
+
+	if (error_flag) /* must have been spurious D_RDY or (ATTN&&!D_RDY) */
 	{
-#ifdef DEBUG_GTL
-		printk(" do_sbpcd_request[%do](%p:%ld+%ld) end 2, Time:%li\n",
-			xnr, req, req->sector, req->nr_sectors, jiffies);
+		msg(DBG_TEA, "================error flag: %d=================.\n", error_flag);
+		msg(DBG_INF,"sbp_data: read aborted by drive.\n");
+#if 1
+		i=cc_DriveReset(); /* ugly fix to prevent a hang */
+#else
+		i=cc_ReadError();
 #endif
-		up(&ioctl_read_sem);
-		spin_lock_irq(q->queue_lock);
-		end_request(req, 1);
-		goto request_loop;
+		return (0);
 	}
-
-#ifdef FUTURE
-	i=prepare(0,0); /* at moment not really a hassle check, but ... */
-	if (i!=0)
-		msg(DBG_INF,"\"prepare\" tells error %d -- ignored\n", i);
-#endif /* FUTURE */ 
-	
-	if (!st_spinning) cc_SpinUp();
 	
-	for (data_tries=n_retries; data_tries > 0; data_tries--)
+	if (fam0LV_drive)
 	{
-		for (status_tries=3; status_tries > 0; status_tries--)
+		SBPCD_CLI;
+		i=maxtim_data;
+		for (timeout=jiffies+HZ; time_before(jiffies, timeout); timeout--)
 		{
-			flags_cmd_out |= f_respo3;
-			cc_ReadStatus();
-			if (sbp_status() != 0) break;
-			if (st_check) cc_ReadError();
-			sbp_sleep(1);    /* wait a bit, try again */
+			for ( ;i!=0;i--)
+			{
+				j=inb(CDi_status);
+				if (!(j&s_not_data_ready)) break;
+				if (!(j&s_not_result_ready)) break;
+				if (j&s_attention) break;
+			}
+			if (i != 0 || time_after_eq(jiffies, timeout)) break;
+			sbp_sleep(0);
+			i = 1;
 		}
-		if (status_tries == 0)
+		if (i==0) msg(DBG_INF,"status timeout after READ.\n");
+		if (!(j&s_attention))
 		{
-			msg(DBG_INF,"sbp_status: failed after 3 tries in line %d\n", __LINE__);
-			break;
+			msg(DBG_INF,"sbp_data: timeout waiting DRV_ATTN - retrying.\n");
+			i=cc_DriveReset();  /* ugly fix to prevent a hang */
+			SBPCD_STI;
+			return (0);
 		}
-		
-		sbp_read_cmd(req);
-		sbp_sleep(0);
-		if (sbp_data(req) != 0)
+		SBPCD_STI;
+	}
+
+#if 0
+	if (!success)
+#endif
+		do
 		{
-#ifdef SAFE_MIXED
-			current_drive->has_data=2; /* is really a data disk */
-#endif /* SAFE_MIXED */ 
-#ifdef DEBUG_GTL
-			printk(" do_sbpcd_request[%do](%p:%ld+%ld) end 3, Time:%li\n",
-				xnr, req, req->sector, req->nr_sectors, jiffies);
+			if (fam0LV_drive) cc_ReadStatus();
+#if 1
+			if (famT_drive) msg(DBG_TEA, "================before ResponseStatus=================.\n", i);
 #endif
-			up(&ioctl_read_sem);
-			spin_lock_irq(q->queue_lock);
-			end_request(req, 1);
-			goto request_loop;
+			i=ResponseStatus();  /* builds status_bits, returns orig. status (old) or faked p_success (new) */
+#if 1
+			if (famT_drive)	msg(DBG_TEA, "================ResponseStatus: %d=================.\n", i);
+#endif
+			if (i<0)
+			{
+				msg(DBG_INF,"bad cc_ReadStatus after read: %02X\n", current_drive->status_bits);
+				return (0);
+			}
 		}
+		while ((fam0LV_drive)&&(!st_check)&&(!(i&p_success)));
+	if (st_check)
+	{
+		i=cc_ReadError();
+		msg(DBG_INF,"cc_ReadError was necessary after read: %d\n",i);
+		return (0);
+	}
+	if (fatal_err)
+	{
+		fatal_err=0;
+		current_drive->sbp_first_frame=current_drive->sbp_last_frame=-1;      /* purge buffer */
+		current_drive->sbp_current = 0;
+		msg(DBG_INF,"sbp_data: fatal_err - retrying.\n");
+		return (0);
 	}
 	
- err_done:
-#if OLD_BUSY
-	busy_data=0;
-#endif /* OLD_BUSY */
-#ifdef DEBUG_GTL
-	printk(" do_sbpcd_request[%do](%p:%ld+%ld) end 4 (error), Time:%li\n",
-		xnr, req, req->sector, req->nr_sectors, jiffies);
-#endif
-	up(&ioctl_read_sem);
-	sbp_sleep(0);    /* wait a bit, try again */
-	spin_lock_irq(q->queue_lock);
-	end_request(req, 0);
-	goto request_loop;
+	current_drive->sbp_first_frame = req -> sector / 4;
+	current_drive->sbp_last_frame = current_drive->sbp_first_frame + current_drive->sbp_read_frames - 1;
+	sbp_transfer(req);
+	return (1);
 }
 /*==========================================================================*/
-/*
- *  build and send the READ command.
- */
-static void sbp_read_cmd(struct request *req)
+
+static int sbpcd_block_open(struct inode *inode, struct file *file)
 {
-#undef OLD
+	struct sbpcd_drive *p = inode->i_bdev->bd_disk->private_data;
+	return cdrom_open(p->sbpcd_infop, inode, file);
+}
 
-	int i;
-	int block;
+static int sbpcd_block_release(struct inode *inode, struct file *file)
+{
+	struct sbpcd_drive *p = inode->i_bdev->bd_disk->private_data;
+	return cdrom_release(p->sbpcd_infop, file);
+}
+
+static int sbpcd_block_ioctl(struct inode *inode, struct file *file,
+				unsigned cmd, unsigned long arg)
+{
+	struct sbpcd_drive *p = inode->i_bdev->bd_disk->private_data;
+	struct cdrom_device_info *cdi = p->sbpcd_infop;
+	int ret, i;
+
+	ret = cdrom_ioctl(file, p->sbpcd_infop, inode, cmd, arg);
+	if (ret != -ENOSYS)
+		return ret;
+
+	msg(DBG_IO2,"ioctl(%s, 0x%08lX, 0x%08lX)\n", cdi->name, cmd, arg);
+	if (p->drv_id==-1) {
+		msg(DBG_INF, "ioctl: bad device: %s\n", cdi->name);
+		return (-ENXIO);             /* no such drive */
+	}
+	down(&ioctl_read_sem);
+	if (p != current_drive)
+		switch_drive(p);
 	
-	current_drive->sbp_first_frame=current_drive->sbp_last_frame=-1;      /* purge buffer */
-	current_drive->sbp_current = 0;
-	block=req->sector/4;
-	if (block+current_drive->sbp_bufsiz <= current_drive->CDsize_frm)
-		current_drive->sbp_read_frames = current_drive->sbp_bufsiz;
-	else
+	msg(DBG_IO2,"ioctl: device %s, request %04X\n",cdi->name,cmd);
+	switch (cmd) 		/* Sun-compatible */
 	{
-		current_drive->sbp_read_frames=current_drive->CDsize_frm-block;
-		/* avoid reading past end of data */
-		if (current_drive->sbp_read_frames < 1)
+	case DDIOCSDBG:		/* DDI Debug */
+		if (!capable(CAP_SYS_ADMIN)) RETURN_UP(-EPERM);
+		i=sbpcd_dbg_ioctl(arg,1);
+		RETURN_UP(i);
+	case CDROMRESET:      /* hard reset the drive */
+		msg(DBG_IOC,"ioctl: CDROMRESET entered.\n");
+		i=DriveReset();
+		current_drive->audio_state=0;
+		RETURN_UP(i);
+
+	case CDROMREADMODE1:
+		msg(DBG_IOC,"ioctl: CDROMREADMODE1 requested.\n");
+#ifdef SAFE_MIXED
+		if (current_drive->has_data>1) RETURN_UP(-EBUSY);
+#endif /* SAFE_MIXED */
+		cc_ModeSelect(CD_FRAMESIZE);
+		cc_ModeSense();
+		current_drive->mode=READ_M1;
+		RETURN_UP(0);
+
+	case CDROMREADMODE2: /* not usable at the moment */
+		msg(DBG_IOC,"ioctl: CDROMREADMODE2 requested.\n");
+#ifdef SAFE_MIXED
+		if (current_drive->has_data>1) RETURN_UP(-EBUSY);
+#endif /* SAFE_MIXED */
+		cc_ModeSelect(CD_FRAMESIZE_RAW1);
+		cc_ModeSense();
+		current_drive->mode=READ_M2;
+		RETURN_UP(0);
+
+	case CDROMAUDIOBUFSIZ: /* configure the audio buffer size */
+		msg(DBG_IOC,"ioctl: CDROMAUDIOBUFSIZ entered.\n");
+		if (current_drive->sbp_audsiz>0)
+			vfree(current_drive->aud_buf);
+		current_drive->aud_buf=NULL;
+		current_drive->sbp_audsiz=arg;
+
+		if (current_drive->sbp_audsiz>16)
 		{
-			msg(DBG_INF,"requested frame %d, CD size %d ???\n",
-			    block, current_drive->CDsize_frm);
-			current_drive->sbp_read_frames=1;
+			current_drive->sbp_audsiz = 0;
+			RETURN_UP(current_drive->sbp_audsiz);
 		}
-	}
 	
-	flags_cmd_out = f_putcmd | f_respo2 | f_ResponseStatus | f_obey_p_check;
-	clr_cmdbuf();
-	if (famV_drive)
-	  {
-	    drvcmd[0]=CMDV_READ;
-	    lba2msf(block,&drvcmd[1]); /* msf-bcd format required */
-	    bin2bcdx(&drvcmd[1]);
-	    bin2bcdx(&drvcmd[2]);
-	    bin2bcdx(&drvcmd[3]);
-	    drvcmd[4]=current_drive->sbp_read_frames>>8;
-	    drvcmd[5]=current_drive->sbp_read_frames&0xff;
-	    drvcmd[6]=0x02; /* flag "msf-bcd" */
-	}
-	else if (fam0L_drive)
-	{
-		flags_cmd_out |= f_lopsta | f_getsta | f_bit1;
-		if (current_drive->xa_byte==0x20)
+		if (current_drive->sbp_audsiz>0)
 		{
-			cmd_type=READ_M2;
-			drvcmd[0]=CMD0_READ_XA; /* "read XA frames", old drives */
-			drvcmd[1]=(block>>16)&0x0ff;
-			drvcmd[2]=(block>>8)&0x0ff;
-			drvcmd[3]=block&0x0ff;
-			drvcmd[4]=(current_drive->sbp_read_frames>>8)&0x0ff;
-			drvcmd[5]=current_drive->sbp_read_frames&0x0ff;
+			current_drive->aud_buf=(u_char *) vmalloc(current_drive->sbp_audsiz*CD_FRAMESIZE_RAW);
+			if (current_drive->aud_buf==NULL)
+			{
+				msg(DBG_INF,"audio buffer (%d frames) not available.\n",current_drive->sbp_audsiz);
+				current_drive->sbp_audsiz=0;
+			}
+			else msg(DBG_INF,"audio buffer size: %d frames.\n",current_drive->sbp_audsiz);
 		}
-		else
+		RETURN_UP(current_drive->sbp_audsiz);
+
+	case CDROMREADAUDIO:
+	{ /* start of CDROMREADAUDIO */
+		int i=0, j=0, frame, block=0;
+		u_int try=0;
+		u_long timeout;
+		u_char *p;
+		u_int data_tries = 0;
+		u_int data_waits = 0;
+		u_int data_retrying = 0;
+		int status_tries;
+		int error_flag;
+
+		msg(DBG_IOC,"ioctl: CDROMREADAUDIO entered.\n");
+		if (fam0_drive) RETURN_UP(-EINVAL);
+		if (famL_drive) RETURN_UP(-EINVAL);
+		if (famV_drive) RETURN_UP(-EINVAL);
+		if (famT_drive) RETURN_UP(-EINVAL);
+#ifdef SAFE_MIXED
+		if (current_drive->has_data>1) RETURN_UP(-EBUSY);
+#endif /* SAFE_MIXED */
+		if (current_drive->aud_buf==NULL) RETURN_UP(-EINVAL);
+		if (copy_from_user(&read_audio, (void __user *)arg,
+				   sizeof(struct cdrom_read_audio)))
+			RETURN_UP(-EFAULT);
+		if (read_audio.nframes < 0 || read_audio.nframes>current_drive->sbp_audsiz) RETURN_UP(-EINVAL);
+		if (!access_ok(VERIFY_WRITE, read_audio.buf,
+			      read_audio.nframes*CD_FRAMESIZE_RAW))
+                	RETURN_UP(-EFAULT);
+
+		if (read_audio.addr_format==CDROM_MSF) /* MSF-bin specification of where to start */
+			block=msf2lba(&read_audio.addr.msf.minute);
+		else if (read_audio.addr_format==CDROM_LBA) /* lba specification of where to start */
+			block=read_audio.addr.lba;
+		else RETURN_UP(-EINVAL);
+#if 000
+		i=cc_SetSpeed(speed_150,0,0);
+		if (i) msg(DBG_AUD,"read_audio: SetSpeed error %d\n", i);
+#endif
+		msg(DBG_AUD,"read_audio: lba: %d, msf: %06X\n",
+		    block, blk2msf(block));
+		msg(DBG_AUD,"read_audio: before cc_ReadStatus.\n");
+#if OLD_BUSY
+		while (busy_data) sbp_sleep(HZ/10); /* wait a bit */
+		busy_audio=1;
+#endif /* OLD_BUSY */
+		error_flag=0;
+		for (data_tries=5; data_tries>0; data_tries--)
 		{
-			drvcmd[0]=CMD0_READ; /* "read frames", old drives */
-			if (current_drive->drv_type>=drv_201)
+			msg(DBG_AUD,"data_tries=%d ...\n", data_tries);
+			current_drive->mode=READ_AU;
+			cc_ModeSelect(CD_FRAMESIZE_RAW);
+			cc_ModeSense();
+			for (status_tries=3; status_tries > 0; status_tries--)
 			{
-				lba2msf(block,&drvcmd[1]); /* msf-bcd format required */
-				bin2bcdx(&drvcmd[1]);
-				bin2bcdx(&drvcmd[2]);
-				bin2bcdx(&drvcmd[3]);
+				flags_cmd_out |= f_respo3;
+				cc_ReadStatus();
+				if (sbp_status() != 0) break;
+				if (st_check) cc_ReadError();
+				sbp_sleep(1);    /* wait a bit, try again */
 			}
-			else
+			if (status_tries == 0)
 			{
-				drvcmd[1]=(block>>16)&0x0ff;
-				drvcmd[2]=(block>>8)&0x0ff;
-				drvcmd[3]=block&0x0ff;
+				msg(DBG_AUD,"read_audio: sbp_status: failed after 3 tries in line %d.\n", __LINE__);
+				continue;
 			}
-			drvcmd[4]=(current_drive->sbp_read_frames>>8)&0x0ff;
-			drvcmd[5]=current_drive->sbp_read_frames&0x0ff;
-			drvcmd[6]=(current_drive->drv_type<drv_201)?0:2; /* flag "lba or msf-bcd format" */
-		}
-	}
-	else if (fam1_drive)
-	{
-		drvcmd[0]=CMD1_READ;
-		lba2msf(block,&drvcmd[1]); /* msf-bin format required */
-		drvcmd[5]=(current_drive->sbp_read_frames>>8)&0x0ff;
-		drvcmd[6]=current_drive->sbp_read_frames&0x0ff;
-	}
-	else if (fam2_drive)
-	{
-		drvcmd[0]=CMD2_READ;
-		lba2msf(block,&drvcmd[1]); /* msf-bin format required */
-		drvcmd[4]=(current_drive->sbp_read_frames>>8)&0x0ff;
-		drvcmd[5]=current_drive->sbp_read_frames&0x0ff;
-		drvcmd[6]=0x02;
-	}
-	else if (famT_drive)
-	{
-		drvcmd[0]=CMDT_READ;
-		drvcmd[2]=(block>>24)&0x0ff;
-		drvcmd[3]=(block>>16)&0x0ff;
-		drvcmd[4]=(block>>8)&0x0ff;
-		drvcmd[5]=block&0x0ff;
-		drvcmd[7]=(current_drive->sbp_read_frames>>8)&0x0ff;
-		drvcmd[8]=current_drive->sbp_read_frames&0x0ff;
-	}
-	flags_cmd_out=f_putcmd;
-	response_count=0;
-	i=cmd_out();
-	if (i<0) msg(DBG_INF,"error giving READ command: %0d\n", i);
-	return;
-}
-/*==========================================================================*/
-/*
- *  Check the completion of the read-data command.  On success, read
- *  the current_drive->sbp_bufsiz * 2048 bytes of data from the disk into buffer.
- */
-static int sbp_data(struct request *req)
-{
-	int i=0, j=0, l, frame;
-	u_int try=0;
-	u_long timeout;
-	u_char *p;
-	u_int data_tries = 0;
-	u_int data_waits = 0;
-	u_int data_retrying = 0;
-	int error_flag;
-	int xa_count;
-	int max_latency;
-	int success;
-	int wait;
-	int duration;
-	
-	error_flag=0;
-	success=0;
-#if LONG_TIMING
-	max_latency=9*HZ;
-#else
-	if (current_drive->f_multisession) max_latency=15*HZ;
-	else max_latency=5*HZ;
-#endif
-	duration=jiffies;
-	for (frame=0;frame<current_drive->sbp_read_frames&&!error_flag; frame++)
-	{
-		SBPCD_CLI;
-		
-		del_timer(&data_timer);
-		data_timer.expires=jiffies+max_latency;
-		timed_out_data=0;
-		add_timer(&data_timer);
-		while (!timed_out_data) 
-		{
-			if (current_drive->f_multisession) try=maxtim_data*4;
-			else try=maxtim_data;
-			msg(DBG_000,"sbp_data: CDi_status loop: try=%d.\n",try);
-			for ( ; try!=0;try--)
+			msg(DBG_AUD,"read_audio: sbp_status: ok.\n");
+
+			flags_cmd_out = f_putcmd | f_respo2 | f_ResponseStatus | f_obey_p_check;
+			if (fam0L_drive)
 			{
-				j=inb(CDi_status);
-				if (!(j&s_not_data_ready)) break;
-				if (!(j&s_not_result_ready)) break;
-				if (fam0LV_drive) if (j&s_attention) break;
+				flags_cmd_out |= f_lopsta | f_getsta | f_bit1;
+				cmd_type=READ_M2;
+				drvcmd[0]=CMD0_READ_XA; /* "read XA frames", old drives */
+				drvcmd[1]=(block>>16)&0x000000ff;
+				drvcmd[2]=(block>>8)&0x000000ff;
+				drvcmd[3]=block&0x000000ff;
+				drvcmd[4]=0;
+				drvcmd[5]=read_audio.nframes; /* # of frames */
+				drvcmd[6]=0;
 			}
-			if (!(j&s_not_data_ready)) goto data_ready;
-			if (try==0)
+			else if (fam1_drive)
 			{
-				if (data_retrying == 0) data_waits++;
-				data_retrying = 1;
-				msg(DBG_000,"sbp_data: CDi_status loop: sleeping.\n");
-				sbp_sleep(1);
-				try = 1;
+				drvcmd[0]=CMD1_READ; /* "read frames", new drives */
+				lba2msf(block,&drvcmd[1]); /* msf-bin format required */
+				drvcmd[4]=0;
+				drvcmd[5]=0;
+				drvcmd[6]=read_audio.nframes; /* # of frames */
 			}
-		}
-		msg(DBG_INF,"sbp_data: CDi_status loop expired.\n");
-	data_ready:
-		del_timer(&data_timer);
-
-		if (timed_out_data)
-		{
-			msg(DBG_INF,"sbp_data: CDi_status timeout (timed_out_data) (%02X).\n", j);
-			error_flag++;
-		}
-		if (try==0)
-		{
-			msg(DBG_INF,"sbp_data: CDi_status timeout (try=0) (%02X).\n", j);
-			error_flag++;
-		}
-		if (!(j&s_not_result_ready))
-		{
-			msg(DBG_INF, "sbp_data: RESULT_READY where DATA_READY awaited (%02X).\n", j);
-			response_count=20;
-			j=ResponseInfo();
-			j=inb(CDi_status);
-		}
-		if (j&s_not_data_ready)
-		{
-			if ((current_drive->ored_ctl_adr&0x40)==0)
-				msg(DBG_INF, "CD contains no data tracks.\n");
-			else msg(DBG_INF, "sbp_data: DATA_READY timeout (%02X).\n", j);
-			error_flag++;
-		}
-		SBPCD_STI;
-		if (error_flag) break;
-
-		msg(DBG_000, "sbp_data: beginning to read.\n");
-		p = current_drive->sbp_buf + frame *  CD_FRAMESIZE;
-		if (sbpro_type==1) OUT(CDo_sel_i_d,1);
-		if (cmd_type==READ_M2) {
-                        if (do_16bit) insw(CDi_data, xa_head_buf, CD_XA_HEAD>>1);
-                        else insb(CDi_data, xa_head_buf, CD_XA_HEAD);
-		}
-		if (do_16bit) insw(CDi_data, p, CD_FRAMESIZE>>1);
-		else insb(CDi_data, p, CD_FRAMESIZE);
-		if (cmd_type==READ_M2) {
-                        if (do_16bit) insw(CDi_data, xa_tail_buf, CD_XA_TAIL>>1);
-                        else insb(CDi_data, xa_tail_buf, CD_XA_TAIL);
-		}
-		current_drive->sbp_current++;
-		if (sbpro_type==1) OUT(CDo_sel_i_d,0);
-		if (cmd_type==READ_M2)
-		{
-			for (xa_count=0;xa_count<CD_XA_HEAD;xa_count++)
-				sprintf(&msgbuf[xa_count*3], " %02X", xa_head_buf[xa_count]);
-			msgbuf[xa_count*3]=0;
-			msg(DBG_XA1,"xa head:%s\n", msgbuf);
-		}
-		data_retrying = 0;
-		data_tries++;
-		if (data_tries >= 1000)
-		{
-			msg(DBG_INF,"sbp_data() statistics: %d waits in %d frames.\n", data_waits, data_tries);
-			data_waits = data_tries = 0;
-		}
-	}
-	duration=jiffies-duration;
-	msg(DBG_TEA,"time to read %d frames: %d jiffies .\n",frame,duration);
-	if (famT_drive)
-	{
-		wait=8;
-		do
-		{
-			if (teac==2)
-                          {
-                            if ((i=CDi_stat_loop_T()) == -1) break;
-                          }
-                        else
-                          {
-                            sbp_sleep(1);
-                            OUT(CDo_sel_i_d,0); 
-                            i=inb(CDi_status);
-                          } 
-			if (!(i&s_not_data_ready))
+			else if (fam2_drive)
 			{
-				OUT(CDo_sel_i_d,1);
-				j=0;
-				do
+				drvcmd[0]=CMD2_READ_XA2;
+				lba2msf(block,&drvcmd[1]); /* msf-bin format required */
+				drvcmd[4]=0;
+				drvcmd[5]=read_audio.nframes; /* # of frames */
+				drvcmd[6]=0x11; /* raw mode */
+			}
+			else if (famT_drive) /* CD-55A: not tested yet */
+			{
+			}
+			msg(DBG_AUD,"read_audio: before giving \"read\" command.\n");
+			flags_cmd_out=f_putcmd;
+			response_count=0;
+			i=cmd_out();
+			if (i<0) msg(DBG_INF,"error giving READ AUDIO command: %0d\n", i);
+			sbp_sleep(0);
+			msg(DBG_AUD,"read_audio: after giving \"read\" command.\n");
+			for (frame=1;frame<2 && !error_flag; frame++)
+			{
+				try=maxtim_data;
+				for (timeout=jiffies+9*HZ; ; )
 				{
-					if (do_16bit) i=inw(CDi_data);
-					else i=inb(CDi_data);
-					j++;
-					i=inb(CDi_status);
+					for ( ; try!=0;try--)
+					{
+						j=inb(CDi_status);
+						if (!(j&s_not_data_ready)) break;
+						if (!(j&s_not_result_ready)) break;
+						if (fam0L_drive) if (j&s_attention) break;
+					}
+					if (try != 0 || time_after_eq(jiffies, timeout)) break;
+					if (data_retrying == 0) data_waits++;
+					data_retrying = 1;
+					sbp_sleep(1);
+					try = 1;
 				}
-				while (!(i&s_not_data_ready));
-				msg(DBG_TEA, "==========too much data (%d bytes/words)==============.\n", j);
-			}
-			if (!(i&s_not_result_ready))
-			{
-				OUT(CDo_sel_i_d,0);
-				l=0;
-				do
+				if (try==0)
 				{
-					infobuf[l++]=inb(CDi_info);
-					i=inb(CDi_status);
+					msg(DBG_INF,"read_audio: sbp_data: CDi_status timeout.\n");
+					error_flag++;
+					break;
 				}
-				while (!(i&s_not_result_ready));
-				if (infobuf[0]==0x00) success=1;
-#if 1
-				for (j=0;j<l;j++) sprintf(&msgbuf[j*3], " %02X", infobuf[j]);
-				msgbuf[j*3]=0;
-				msg(DBG_TEA,"sbp_data info response:%s\n", msgbuf);
-#endif
-				if (infobuf[0]==0x02)
+				msg(DBG_AUD,"read_audio: sbp_data: CDi_status ok.\n");
+				if (j&s_not_data_ready)
 				{
+					msg(DBG_INF, "read_audio: sbp_data: DATA_READY timeout.\n");
 					error_flag++;
-					do
-					{
-						++recursion;
-						if (recursion>1) msg(DBG_TEA,"cmd_out_T READ_ERR recursion (sbp_data): %d !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n",recursion);
-						else msg(DBG_TEA,"sbp_data: CMDT_READ_ERR necessary.\n");
-						clr_cmdbuf();
-						drvcmd[0]=CMDT_READ_ERR;
-						j=cmd_out_T(); /* !!! recursive here !!! */
-						--recursion;
-						sbp_sleep(1);
+					break;
+				}
+				msg(DBG_AUD,"read_audio: before reading data.\n");
+				error_flag=0;
+				p = current_drive->aud_buf;
+				if (sbpro_type==1) OUT(CDo_sel_i_d,1);
+				if (do_16bit)
+				{
+					u_short *p2 = (u_short *) p;
+
+					for (; (u_char *) p2 < current_drive->aud_buf + read_audio.nframes*CD_FRAMESIZE_RAW;)
+				  	{
+						if ((inb_p(CDi_status)&s_not_data_ready)) continue;
+
+						/* get one sample */
+						*p2++ = inw_p(CDi_data);
+						*p2++ = inw_p(CDi_data);
+					}
+				} else {
+					for (; p < current_drive->aud_buf + read_audio.nframes*CD_FRAMESIZE_RAW;)
+				  	{
+						if ((inb_p(CDi_status)&s_not_data_ready)) continue;
+
+						/* get one sample */
+						*p++ = inb_p(CDi_data);
+						*p++ = inb_p(CDi_data);
+						*p++ = inb_p(CDi_data);
+						*p++ = inb_p(CDi_data);
 					}
-					while (j<0);
-					current_drive->error_state=infobuf[2];
-					current_drive->b3=infobuf[3];
-					current_drive->b4=infobuf[4];
 				}
-				break;
+				if (sbpro_type==1) OUT(CDo_sel_i_d,0);
+				data_retrying = 0;
 			}
-			else
+			msg(DBG_AUD,"read_audio: after reading data.\n");
+			if (error_flag)    /* must have been spurious D_RDY or (ATTN&&!D_RDY) */
 			{
-#if 0
-				msg(DBG_TEA, "============= waiting for result=================.\n");
-				sbp_sleep(1);
-#endif
-			}
-		}
-		while (wait--);
-	}
-
-	if (error_flag) /* must have been spurious D_RDY or (ATTN&&!D_RDY) */
-	{
-		msg(DBG_TEA, "================error flag: %d=================.\n", error_flag);
-		msg(DBG_INF,"sbp_data: read aborted by drive.\n");
-#if 1
-		i=cc_DriveReset(); /* ugly fix to prevent a hang */
+				msg(DBG_AUD,"read_audio: read aborted by drive\n");
+#if 0000
+				i=cc_DriveReset();                /* ugly fix to prevent a hang */
 #else
-		i=cc_ReadError();
+				i=cc_ReadError();
 #endif
-		return (0);
-	}
-	
-	if (fam0LV_drive)
-	{
-		SBPCD_CLI;
-		i=maxtim_data;
-		for (timeout=jiffies+HZ; time_before(jiffies, timeout); timeout--)
-		{
-			for ( ;i!=0;i--)
+				continue;
+			}
+			if (fam0L_drive)
 			{
-				j=inb(CDi_status);
-				if (!(j&s_not_data_ready)) break;
-				if (!(j&s_not_result_ready)) break;
-				if (j&s_attention) break;
+				i=maxtim_data;
+				for (timeout=jiffies+9*HZ; time_before(jiffies, timeout); timeout--)
+				{
+					for ( ;i!=0;i--)
+					{
+						j=inb(CDi_status);
+						if (!(j&s_not_data_ready)) break;
+						if (!(j&s_not_result_ready)) break;
+						if (j&s_attention) break;
+					}
+					if (i != 0 || time_after_eq(jiffies, timeout)) break;
+					sbp_sleep(0);
+					i = 1;
+				}
+				if (i==0) msg(DBG_AUD,"read_audio: STATUS TIMEOUT AFTER READ");
+				if (!(j&s_attention))
+				{
+					msg(DBG_AUD,"read_audio: sbp_data: timeout waiting DRV_ATTN - retrying\n");
+					i=cc_DriveReset();  /* ugly fix to prevent a hang */
+					continue;
+				}
 			}
-			if (i != 0 || time_after_eq(jiffies, timeout)) break;
-			sbp_sleep(0);
-			i = 1;
-		}
-		if (i==0) msg(DBG_INF,"status timeout after READ.\n");
-		if (!(j&s_attention))
-		{
-			msg(DBG_INF,"sbp_data: timeout waiting DRV_ATTN - retrying.\n");
-			i=cc_DriveReset();  /* ugly fix to prevent a hang */
-			SBPCD_STI;
-			return (0);
-		}
-		SBPCD_STI;
-	}
-	
-#if 0
-	if (!success)
-#endif
-		do
-		{
-			if (fam0LV_drive) cc_ReadStatus();
-#if 1
-			if (famT_drive) msg(DBG_TEA, "================before ResponseStatus=================.\n", i);
-#endif
-			i=ResponseStatus();  /* builds status_bits, returns orig. status (old) or faked p_success (new) */
-#if 1
-			if (famT_drive)	msg(DBG_TEA, "================ResponseStatus: %d=================.\n", i);
-#endif
-			if (i<0)
+			do
 			{
-				msg(DBG_INF,"bad cc_ReadStatus after read: %02X\n", current_drive->status_bits);
-				return (0);
+				if (fam0L_drive) cc_ReadStatus();
+				i=ResponseStatus();  /* builds status_bits, returns orig. status (old) or faked p_success (new) */
+				if (i<0) { msg(DBG_AUD,
+					       "read_audio: cc_ReadStatus error after read: %02X\n",
+					       current_drive->status_bits);
+					   continue; /* FIXME */
+				   }
+			}
+			while ((fam0L_drive)&&(!st_check)&&(!(i&p_success)));
+			if (st_check)
+			{
+				i=cc_ReadError();
+				msg(DBG_AUD,"read_audio: cc_ReadError was necessary after read: %02X\n",i);
+				continue;
 			}
+			if (copy_to_user(read_audio.buf,
+					 current_drive->aud_buf,
+					 read_audio.nframes * CD_FRAMESIZE_RAW))
+				RETURN_UP(-EFAULT);
+			msg(DBG_AUD,"read_audio: copy_to_user done.\n");
+			break;
 		}
-		while ((fam0LV_drive)&&(!st_check)&&(!(i&p_success)));
-	if (st_check)
-	{
-		i=cc_ReadError();
-		msg(DBG_INF,"cc_ReadError was necessary after read: %d\n",i);
-		return (0);
-	}
-	if (fatal_err)
-	{
-		fatal_err=0;
-		current_drive->sbp_first_frame=current_drive->sbp_last_frame=-1;      /* purge buffer */
-		current_drive->sbp_current = 0;
-		msg(DBG_INF,"sbp_data: fatal_err - retrying.\n");
-		return (0);
-	}
-	
-	current_drive->sbp_first_frame = req -> sector / 4;
-	current_drive->sbp_last_frame = current_drive->sbp_first_frame + current_drive->sbp_read_frames - 1;
-	sbp_transfer(req);
-	return (1);
-}
-/*==========================================================================*/
-
-static int sbpcd_block_open(struct inode *inode, struct file *file)
-{
-	struct sbpcd_drive *p = inode->i_bdev->bd_disk->private_data;
-	return cdrom_open(p->sbpcd_infop, inode, file);
-}
-
-static int sbpcd_block_release(struct inode *inode, struct file *file)
-{
-	struct sbpcd_drive *p = inode->i_bdev->bd_disk->private_data;
-	return cdrom_release(p->sbpcd_infop, file);
-}
+		cc_ModeSelect(CD_FRAMESIZE);
+		cc_ModeSense();
+		current_drive->mode=READ_M1;
+#if OLD_BUSY
+		busy_audio=0;
+#endif /* OLD_BUSY */
+		if (data_tries == 0)
+		{
+			msg(DBG_AUD,"read_audio: failed after 5 tries in line %d.\n", __LINE__);
+			RETURN_UP(-EIO);
+		}
+		msg(DBG_AUD,"read_audio: successful return.\n");
+		RETURN_UP(0);
+	} /* end of CDROMREADAUDIO */
 
-static int sbpcd_block_ioctl(struct inode *inode, struct file *file,
-				unsigned cmd, unsigned long arg)
-{
-	struct sbpcd_drive *p = inode->i_bdev->bd_disk->private_data;
-	return cdrom_ioctl(file, p->sbpcd_infop, inode, cmd, arg);
+	default:
+		msg(DBG_IOC,"ioctl: unknown function request %04X\n", cmd);
+		RETURN_UP(-EINVAL);
+	} /* end switch(cmd) */
 }
 
 static int sbpcd_block_media_changed(struct gendisk *disk)
@@ -5478,10 +5471,9 @@
 	.get_mcn		= sbpcd_get_mcn,
 	.reset			= sbpcd_reset,
 	.audio_ioctl		= sbpcd_audio_ioctl,
-	.dev_ioctl		= sbpcd_dev_ioctl,
 	.capability		= CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK |
 				CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
-				CDC_MCN | CDC_PLAY_AUDIO | CDC_IOCTLS,
+				CDC_MCN | CDC_PLAY_AUDIO,
 	.n_minors		= 1,
 };
 
diff -urN oldtree/drivers/cdrom/viocd.c newtree/drivers/cdrom/viocd.c
--- oldtree/drivers/cdrom/viocd.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/cdrom/viocd.c	2006-01-29 11:18:20.605156336 +0000
@@ -629,7 +629,7 @@
 	.media_changed = viocd_media_changed,
 	.lock_door = viocd_lock_door,
 	.generic_packet = viocd_packet,
-	.capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | CDC_SELECT_SPEED | CDC_SELECT_DISC | CDC_MULTI_SESSION | CDC_MCN | CDC_MEDIA_CHANGED | CDC_PLAY_AUDIO | CDC_RESET | CDC_IOCTLS | CDC_DRIVE_STATUS | CDC_GENERIC_PACKET | CDC_CD_R | CDC_CD_RW | CDC_DVD | CDC_DVD_R | CDC_DVD_RAM | CDC_RAM
+	.capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK | CDC_SELECT_SPEED | CDC_SELECT_DISC | CDC_MULTI_SESSION | CDC_MCN | CDC_MEDIA_CHANGED | CDC_PLAY_AUDIO | CDC_RESET | CDC_DRIVE_STATUS | CDC_GENERIC_PACKET | CDC_CD_R | CDC_CD_RW | CDC_DVD | CDC_DVD_R | CDC_DVD_RAM | CDC_RAM
 };
 
 static int __init find_capability(const char *type)
diff -urN oldtree/drivers/char/agp/amd64-agp.c newtree/drivers/char/agp/amd64-agp.c
--- oldtree/drivers/char/agp/amd64-agp.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/char/agp/amd64-agp.c	2006-01-29 11:18:10.811645176 +0000
@@ -600,6 +600,26 @@
 	agp_put_bridge(bridge);
 }
 
+#ifdef CONFIG_PM
+
+static int agp_amd64_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	pci_save_state(pdev);
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+	return 0;
+}
+
+static int agp_amd64_resume(struct pci_dev *pdev)
+{
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+
+	return amd_8151_configure();
+}
+
+#endif /* CONFIG_PM */
+
 static struct pci_device_id agp_amd64_pci_table[] = {
 	{
 	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
@@ -718,6 +738,10 @@
 	.id_table	= agp_amd64_pci_table,
 	.probe		= agp_amd64_probe,
 	.remove		= agp_amd64_remove,
+#ifdef CONFIG_PM
+	.suspend	= agp_amd64_suspend,
+	.resume		= agp_amd64_resume,
+#endif
 };
 
 
diff -urN oldtree/drivers/char/agp/nvidia-agp.c newtree/drivers/char/agp/nvidia-agp.c
--- oldtree/drivers/char/agp/nvidia-agp.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/char/agp/nvidia-agp.c	2006-01-29 11:18:20.615154816 +0000
@@ -11,6 +11,7 @@
 #include <linux/gfp.h>
 #include <linux/page-flags.h>
 #include <linux/mm.h>
+#include <linux/jiffies.h>
 #include "agp.h"
 
 /* NVIDIA registers */
@@ -256,7 +257,7 @@
 		do {
 			pci_read_config_dword(nvidia_private.dev_1,
 					NVIDIA_1_WBC, &wbc_reg);
-			if ((signed)(end - jiffies) <= 0) {
+			if (time_before_eq(end, jiffies)) {
 				printk(KERN_ERR PFX
 				    "TLB flush took more than 3 seconds.\n");
 			}
diff -urN oldtree/drivers/hwmon/hdaps.c newtree/drivers/hwmon/hdaps.c
--- oldtree/drivers/hwmon/hdaps.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/hwmon/hdaps.c	2006-01-29 11:18:10.812645024 +0000
@@ -36,7 +36,7 @@
 #include <asm/io.h>
 
 #define HDAPS_LOW_PORT		0x1600	/* first port used by hdaps */
-#define HDAPS_NR_PORTS		0x30	/* number of ports: 0x1600 - 0x162f */
+#define HDAPS_HI_PORT		0x162f	/* last port used by hdaps */
 
 #define HDAPS_PORT_STATE	0x1611	/* device state */
 #define HDAPS_PORT_YPOS		0x1612	/* y-axis position */
@@ -62,6 +62,12 @@
 #define HDAPS_INPUT_FUZZ	4	/* input event threshold */
 #define HDAPS_INPUT_FLAT	4
 
+static struct resource __initdata hdaps_iores = {
+	.flags	= IORESOURCE_IO,
+	.start	= HDAPS_LOW_PORT,
+	.end	= HDAPS_HI_PORT,
+};
+
 static struct timer_list hdaps_timer;
 static struct platform_device *pdev;
 static struct input_dev *hdaps_idev;
@@ -284,34 +290,6 @@
 }
 
 
-/* Device model stuff */
-
-static int hdaps_probe(struct platform_device *dev)
-{
-	int ret;
-
-	ret = hdaps_device_init();
-	if (ret)
-		return ret;
-
-	printk(KERN_INFO "hdaps: device successfully initialized.\n");
-	return 0;
-}
-
-static int hdaps_resume(struct platform_device *dev)
-{
-	return hdaps_device_init();
-}
-
-static struct platform_driver hdaps_driver = {
-	.probe = hdaps_probe,
-	.resume = hdaps_resume,
-	.driver	= {
-		.name = "hdaps",
-		.owner = THIS_MODULE,
-	},
-};
-
 /*
  * hdaps_calibrate - Set our "resting" values.  Callers must hold hdaps_sem.
  */
@@ -320,13 +298,13 @@
 	__hdaps_read_pair(HDAPS_PORT_XPOS, HDAPS_PORT_YPOS, &rest_x, &rest_y);
 }
 
-static void hdaps_mousedev_poll(unsigned long unused)
+static void hdaps_inputdev_poll(unsigned long unused)
 {
 	int x, y;
 
 	/* Cannot sleep.  Try nonblockingly.  If we fail, try again later. */
 	if (down_trylock(&hdaps_sem)) {
-		mod_timer(&hdaps_timer,jiffies + HDAPS_POLL_PERIOD);
+		mod_timer(&hdaps_timer, jiffies + HDAPS_POLL_PERIOD);
 		return;
 	}
 
@@ -339,7 +317,7 @@
 
 	mod_timer(&hdaps_timer, jiffies + HDAPS_POLL_PERIOD);
 
-out:
+ out:
 	up(&hdaps_sem);
 }
 
@@ -473,6 +451,80 @@
 	.attrs = hdaps_attributes,
 };
 
+/* Device model stuff */
+
+static int __devinit hdaps_probe(struct platform_device *dev)
+{
+	int error;
+
+	/* initial calibrate for the input device */
+	hdaps_calibrate();
+
+	hdaps_idev = input_allocate_device();
+	if (!hdaps_idev)
+		return -ENOMEM;
+
+	/* initialize the input class */
+	hdaps_idev->name = "hdaps";
+	hdaps_idev->phys = "hdaps/input0";
+	hdaps_idev->cdev.dev = &dev->dev;
+	hdaps_idev->evbit[0] = BIT(EV_ABS);
+	input_set_abs_params(hdaps_idev, ABS_X,
+			-256, 256, HDAPS_INPUT_FUZZ, HDAPS_INPUT_FLAT);
+	input_set_abs_params(hdaps_idev, ABS_Y,
+			-256, 256, HDAPS_INPUT_FUZZ, HDAPS_INPUT_FLAT);
+
+	error = sysfs_create_group(&pdev->dev.kobj, &hdaps_attribute_group);
+	if (error)
+		goto err_free_input_dev;
+
+	error = hdaps_device_init();
+	if (error)
+		goto err_sysfs_remove_group;
+
+	error = input_register_device(hdaps_idev);
+	if (error)
+		goto err_sysfs_remove_group;
+
+	/* start up our timer for the input device */
+	init_timer(&hdaps_timer);
+	hdaps_timer.function = hdaps_inputdev_poll;
+	hdaps_timer.expires = jiffies + HDAPS_POLL_PERIOD;
+	add_timer(&hdaps_timer);
+
+	return 0;
+
+ err_free_input_dev:
+	input_free_device(hdaps_idev);
+ err_sysfs_remove_group:
+	sysfs_remove_group(&dev->dev.kobj, &hdaps_attribute_group);
+	return error;
+}
+
+static int __devexit hdaps_remove(struct platform_device *dev)
+{
+	del_timer_sync(&hdaps_timer);
+	input_unregister_device(hdaps_idev);
+	sysfs_remove_group(&pdev->dev.kobj, &hdaps_attribute_group);
+
+	return 0;
+}
+
+static int hdaps_resume(struct platform_device *dev)
+{
+	return hdaps_device_init();
+}
+
+static struct platform_driver hdaps_driver = {
+	.driver		= {
+		.name	= "hdaps",
+		.owner	= THIS_MODULE,
+	},
+	.probe		= hdaps_probe,
+	.remove		= __devexit_p(hdaps_remove),
+	.resume		= hdaps_resume,
+};
+
 
 /* Module stuff */
 
@@ -511,7 +563,7 @@
 
 static int __init hdaps_init(void)
 {
-	int ret;
+	int error;
 
 	/* Note that DMI_MATCH(...,"ThinkPad T42") will match "ThinkPad T42p" */
 	struct dmi_system_id hdaps_whitelist[] = {
@@ -532,79 +584,43 @@
 
 	if (!dmi_check_system(hdaps_whitelist)) {
 		printk(KERN_WARNING "hdaps: supported laptop not found!\n");
-		ret = -ENXIO;
-		goto out;
+		return -ENODEV;
 	}
 
-	if (!request_region(HDAPS_LOW_PORT, HDAPS_NR_PORTS, "hdaps")) {
-		ret = -ENXIO;
-		goto out;
+	error = platform_driver_register(&hdaps_driver);
+	if (error)
+		return error;
+
+	pdev = platform_device_alloc("hdaps", -1);
+	if (!pdev) {
+		error = -ENOMEM;
+		goto err_unregister_driver;
 	}
 
-	ret = platform_driver_register(&hdaps_driver);
-	if (ret)
-		goto out_region;
-
-	pdev = platform_device_register_simple("hdaps", -1, NULL, 0);
-	if (IS_ERR(pdev)) {
-		ret = PTR_ERR(pdev);
-		goto out_driver;
-	}
-
-	ret = sysfs_create_group(&pdev->dev.kobj, &hdaps_attribute_group);
-	if (ret)
-		goto out_device;
-
-	hdaps_idev = input_allocate_device();
-	if (!hdaps_idev) {
-		ret = -ENOMEM;
-		goto out_group;
-	}
-
-	/* initial calibrate for the input device */
-	hdaps_calibrate();
-
-	/* initialize the input class */
-	hdaps_idev->name = "hdaps";
-	hdaps_idev->cdev.dev = &pdev->dev;
-	hdaps_idev->evbit[0] = BIT(EV_ABS);
-	input_set_abs_params(hdaps_idev, ABS_X,
-			-256, 256, HDAPS_INPUT_FUZZ, HDAPS_INPUT_FLAT);
-	input_set_abs_params(hdaps_idev, ABS_Y,
-			-256, 256, HDAPS_INPUT_FUZZ, HDAPS_INPUT_FLAT);
-
-	input_register_device(hdaps_idev);
-
-	/* start up our timer for the input device */
-	init_timer(&hdaps_timer);
-	hdaps_timer.function = hdaps_mousedev_poll;
-	hdaps_timer.expires = jiffies + HDAPS_POLL_PERIOD;
-	add_timer(&hdaps_timer);
+	error = platform_device_add_resources(pdev, &hdaps_iores, 1);
+	if (error)
+		goto err_free_device;
+
+	error = platform_device_add(pdev);
+	if (error)
+		goto err_free_device;
 
 	printk(KERN_INFO "hdaps: driver successfully loaded.\n");
 	return 0;
 
-out_group:
-	sysfs_remove_group(&pdev->dev.kobj, &hdaps_attribute_group);
-out_device:
-	platform_device_unregister(pdev);
-out_driver:
+ err_free_device:
+	platform_device_put(pdev);
+ err_unregister_driver:
 	platform_driver_unregister(&hdaps_driver);
-out_region:
-	release_region(HDAPS_LOW_PORT, HDAPS_NR_PORTS);
-out:
-	printk(KERN_WARNING "hdaps: driver init failed (ret=%d)!\n", ret);
-	return ret;
+
+	printk(KERN_WARNING "hdaps: driver init failed (error=%d)!\n", error);
+	return error;
 }
 
 static void __exit hdaps_exit(void)
 {
-	del_timer_sync(&hdaps_timer);
-	input_unregister_device(hdaps_idev);
-	sysfs_remove_group(&pdev->dev.kobj, &hdaps_attribute_group);
 	platform_device_unregister(pdev);
 	platform_driver_unregister(&hdaps_driver);
-	release_region(HDAPS_LOW_PORT, HDAPS_NR_PORTS);
 
 	printk(KERN_INFO "hdaps: driver unloaded.\n");
 }
diff -urN oldtree/drivers/ide/ide-cd.c newtree/drivers/ide/ide-cd.c
--- oldtree/drivers/ide/ide-cd.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/ide/ide-cd.c	2006-01-29 11:18:20.608155880 +0000
@@ -2481,52 +2481,6 @@
 }
 
 static
-int ide_cdrom_dev_ioctl (struct cdrom_device_info *cdi,
-			 unsigned int cmd, unsigned long arg)
-{
-	struct packet_command cgc;
-	char buffer[16];
-	int stat;
-
-	init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_UNKNOWN);
-
-	/* These will be moved into the Uniform layer shortly... */
-	switch (cmd) {
- 	case CDROMSETSPINDOWN: {
- 		char spindown;
- 
- 		if (copy_from_user(&spindown, (void __user *) arg, sizeof(char)))
-			return -EFAULT;
- 
-                if ((stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CDROM_PAGE, 0)))
-			return stat;
-
- 		buffer[11] = (buffer[11] & 0xf0) | (spindown & 0x0f);
-
- 		return cdrom_mode_select(cdi, &cgc);
- 	} 
- 
- 	case CDROMGETSPINDOWN: {
- 		char spindown;
- 
-                if ((stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CDROM_PAGE, 0)))
-			return stat;
- 
- 		spindown = buffer[11] & 0x0f;
- 
-		if (copy_to_user((void __user *) arg, &spindown, sizeof (char)))
-			return -EFAULT;
- 
- 		return 0;
- 	}
-  
-	default:
-		return -EINVAL;
-	}
-
-}
-
-static
 int ide_cdrom_audio_ioctl (struct cdrom_device_info *cdi,
 			   unsigned int cmd, void *arg)
 			   
@@ -2862,12 +2816,11 @@
 	.get_mcn		= ide_cdrom_get_mcn,
 	.reset			= ide_cdrom_reset,
 	.audio_ioctl		= ide_cdrom_audio_ioctl,
-	.dev_ioctl		= ide_cdrom_dev_ioctl,
 	.capability		= CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK |
 				CDC_SELECT_SPEED | CDC_SELECT_DISC |
 				CDC_MULTI_SESSION | CDC_MCN |
 				CDC_MEDIA_CHANGED | CDC_PLAY_AUDIO | CDC_RESET |
-				CDC_IOCTLS | CDC_DRIVE_STATUS | CDC_CD_R |
+				CDC_DRIVE_STATUS | CDC_CD_R |
 				CDC_CD_RW | CDC_DVD | CDC_DVD_R| CDC_DVD_RAM |
 				CDC_GENERIC_PACKET | CDC_MO_DRIVE | CDC_MRW |
 				CDC_MRW_W | CDC_RAM,
@@ -3378,6 +3331,45 @@
 	return 0;
 }
 
+static int idecd_set_spindown(struct cdrom_device_info *cdi, unsigned long arg)
+{
+	struct packet_command cgc;
+	char buffer[16];
+	int stat;
+	char spindown;
+
+	if (copy_from_user(&spindown, (void __user *)arg, sizeof(char)))
+		return -EFAULT;
+
+	init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_UNKNOWN);
+
+	stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CDROM_PAGE, 0);
+	if (stat)
+		return stat;
+
+	buffer[11] = (buffer[11] & 0xf0) | (spindown & 0x0f);
+	return cdrom_mode_select(cdi, &cgc);
+}
+
+static int idecd_get_spindown(struct cdrom_device_info *cdi, unsigned long arg)
+{
+	struct packet_command cgc;
+	char buffer[16];
+	int stat;
+ 	char spindown;
+
+	init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_UNKNOWN);
+
+	stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CDROM_PAGE, 0);
+	if (stat)
+		return stat;
+
+	spindown = buffer[11] & 0x0f;
+	if (copy_to_user((void __user *)arg, &spindown, sizeof (char)))
+		return -EFAULT;
+	return 0;
+}
+
 static int idecd_ioctl (struct inode *inode, struct file *file,
 			unsigned int cmd, unsigned long arg)
 {
@@ -3385,7 +3377,16 @@
 	struct cdrom_info *info = ide_cd_g(bdev->bd_disk);
 	int err;
 
-	err  = generic_ide_ioctl(info->drive, file, bdev, cmd, arg);
+	switch (cmd) {
+ 	case CDROMSETSPINDOWN:
+		return idecd_set_spindown(&info->devinfo, arg);
+ 	case CDROMGETSPINDOWN:
+		return idecd_get_spindown(&info->devinfo, arg);
+	default:
+		break;
+ 	}
+
+	err = generic_ide_ioctl(info->drive, file, bdev, cmd, arg);
 	if (err == -EINVAL)
 		err = cdrom_ioctl(file, &info->devinfo, inode, cmd, arg);
 
diff -urN oldtree/drivers/ide/ide-cd.c.orig newtree/drivers/ide/ide-cd.c.orig
--- oldtree/drivers/ide/ide-cd.c.orig	1970-01-01 00:00:00.000000000 +0000
+++ newtree/drivers/ide/ide-cd.c.orig	2006-01-28 19:08:42.000000000 +0000
@@ -0,0 +1,3514 @@
+/*
+ * linux/drivers/ide/ide-cd.c
+ *
+ * Copyright (C) 1994, 1995, 1996  scott snyder  <snyder@fnald0.fnal.gov>
+ * Copyright (C) 1996-1998  Erik Andersen <andersee@debian.org>
+ * Copyright (C) 1998-2000  Jens Axboe <axboe@suse.de>
+ *
+ * May be copied or modified under the terms of the GNU General Public
+ * License.  See linux/COPYING for more information.
+ *
+ * ATAPI CD-ROM driver.  To be used with ide.c.
+ * See Documentation/cdrom/ide-cd for usage information.
+ *
+ * Suggestions are welcome. Patches that work are more welcome though. ;-)
+ * For those wishing to work on this driver, please be sure you download
+ * and comply with the latest Mt. Fuji (SFF8090 version 4) and ATAPI 
+ * (SFF-8020i rev 2.6) standards. These documents can be obtained by 
+ * anonymous ftp from:
+ * ftp://fission.dt.wdc.com/pub/standards/SFF_atapi/spec/SFF8020-r2.6/PS/8020r26.ps
+ * ftp://ftp.avc-pioneer.com/Mtfuji4/Spec/Fuji4r10.pdf
+ *
+ * Drives that deviate from these standards will be accommodated as much
+ * as possible via compile time or command-line options.  Since I only have
+ * a few drives, you generally need to send me patches...
+ *
+ * ----------------------------------
+ * TO DO LIST:
+ * -Make it so that Pioneer CD DR-A24X and friends don't get screwed up on
+ *   boot
+ *
+ * ----------------------------------
+ * 1.00  Oct 31, 1994 -- Initial version.
+ * 1.01  Nov  2, 1994 -- Fixed problem with starting request in
+ *                       cdrom_check_status.
+ * 1.03  Nov 25, 1994 -- leaving unmask_intr[] as a user-setting (as for disks)
+ * (from mlord)       -- minor changes to cdrom_setup()
+ *                    -- renamed ide_dev_s to ide_drive_t, enable irq on command
+ * 2.00  Nov 27, 1994 -- Generalize packet command interface;
+ *                       add audio ioctls.
+ * 2.01  Dec  3, 1994 -- Rework packet command interface to handle devices
+ *                       which send an interrupt when ready for a command.
+ * 2.02  Dec 11, 1994 -- Cache the TOC in the driver.
+ *                       Don't use SCMD_PLAYAUDIO_TI; it's not included
+ *                       in the current version of ATAPI.
+ *                       Try to use LBA instead of track or MSF addressing
+ *                       when possible.
+ *                       Don't wait for READY_STAT.
+ * 2.03  Jan 10, 1995 -- Rewrite block read routines to handle block sizes
+ *                       other than 2k and to move multiple sectors in a
+ *                       single transaction.
+ * 2.04  Apr 21, 1995 -- Add work-around for Creative Labs CD220E drives.
+ *                       Thanks to Nick Saw <cwsaw@pts7.pts.mot.com> for
+ *                       help in figuring this out.  Ditto for Acer and
+ *                       Aztech drives, which seem to have the same problem.
+ * 2.04b May 30, 1995 -- Fix to match changes in ide.c version 3.16 -ml
+ * 2.05  Jun  8, 1995 -- Don't attempt to retry after an illegal request
+ *                        or data protect error.
+ *                       Use HWIF and DEV_HWIF macros as in ide.c.
+ *                       Always try to do a request_sense after
+ *                        a failed command.
+ *                       Include an option to give textual descriptions
+ *                        of ATAPI errors.
+ *                       Fix a bug in handling the sector cache which
+ *                        showed up if the drive returned data in 512 byte
+ *                        blocks (like Pioneer drives).  Thanks to
+ *                        Richard Hirst <srh@gpt.co.uk> for diagnosing this.
+ *                       Properly supply the page number field in the
+ *                        MODE_SELECT command.
+ *                       PLAYAUDIO12 is broken on the Aztech; work around it.
+ * 2.05x Aug 11, 1995 -- lots of data structure renaming/restructuring in ide.c
+ *                       (my apologies to Scott, but now ide-cd.c is independent)
+ * 3.00  Aug 22, 1995 -- Implement CDROMMULTISESSION ioctl.
+ *                       Implement CDROMREADAUDIO ioctl (UNTESTED).
+ *                       Use input_ide_data() and output_ide_data().
+ *                       Add door locking.
+ *                       Fix usage count leak in cdrom_open, which happened
+ *                        when a read-write mount was attempted.
+ *                       Try to load the disk on open.
+ *                       Implement CDROMEJECT_SW ioctl (off by default).
+ *                       Read total cdrom capacity during open.
+ *                       Rearrange logic in cdrom_decode_status.  Issue
+ *                        request sense commands for failed packet commands
+ *                        from here instead of from cdrom_queue_packet_command.
+ *                        Fix a race condition in retrieving error information.
+ *                       Suppress printing normal unit attention errors and
+ *                        some drive not ready errors.
+ *                       Implement CDROMVOLREAD ioctl.
+ *                       Implement CDROMREADMODE1/2 ioctls.
+ *                       Fix race condition in setting up interrupt handlers
+ *                        when the `serialize' option is used.
+ * 3.01  Sep  2, 1995 -- Fix ordering of reenabling interrupts in
+ *                        cdrom_queue_request.
+ *                       Another try at using ide_[input,output]_data.
+ * 3.02  Sep 16, 1995 -- Stick total disk capacity in partition table as well.
+ *                       Make VERBOSE_IDE_CD_ERRORS dump failed command again.
+ *                       Dump out more information for ILLEGAL REQUEST errs.
+ *                       Fix handling of errors occurring before the
+ *                        packet command is transferred.
+ *                       Fix transfers with odd bytelengths.
+ * 3.03  Oct 27, 1995 -- Some Creative drives have an id of just `CD'.
+ *                       `DCI-2S10' drives are broken too.
+ * 3.04  Nov 20, 1995 -- So are Vertos drives.
+ * 3.05  Dec  1, 1995 -- Changes to go with overhaul of ide.c and ide-tape.c
+ * 3.06  Dec 16, 1995 -- Add support needed for partitions.
+ *                       More workarounds for Vertos bugs (based on patches
+ *                        from Holger Dietze <dietze@aix520.informatik.uni-leipzig.de>).
+ *                       Try to eliminate byteorder assumptions.
+ *                       Use atapi_cdrom_subchnl struct definition.
+ *                       Add STANDARD_ATAPI compilation option.
+ * 3.07  Jan 29, 1996 -- More twiddling for broken drives: Sony 55D,
+ *                        Vertos 300.
+ *                       Add NO_DOOR_LOCKING configuration option.
+ *                       Handle drive_cmd requests w/NULL args (for hdparm -t).
+ *                       Work around sporadic Sony55e audio play problem.
+ * 3.07a Feb 11, 1996 -- check drive->id for NULL before dereferencing, to fix
+ *                        problem with "hde=cdrom" with no drive present.  -ml
+ * 3.08  Mar  6, 1996 -- More Vertos workarounds.
+ * 3.09  Apr  5, 1996 -- Add CDROMCLOSETRAY ioctl.
+ *                       Switch to using MSF addressing for audio commands.
+ *                       Reformat to match kernel tabbing style.
+ *                       Add CDROM_GET_UPC ioctl.
+ * 3.10  Apr 10, 1996 -- Fix compilation error with STANDARD_ATAPI.
+ * 3.11  Apr 29, 1996 -- Patch from Heiko Eissfeldt <heiko@colossus.escape.de>
+ *                       to remove redundant verify_area calls.
+ * 3.12  May  7, 1996 -- Rudimentary changer support.  Based on patches
+ *                        from Gerhard Zuber <zuber@berlin.snafu.de>.
+ *                       Let open succeed even if there's no loaded disc.
+ * 3.13  May 19, 1996 -- Fixes for changer code.
+ * 3.14  May 29, 1996 -- Add work-around for Vertos 600.
+ *                        (From Hennus Bergman <hennus@sky.ow.nl>.)
+ * 3.15  July 2, 1996 -- Added support for Sanyo 3 CD changers
+ *                        from Ben Galliart <bgallia@luc.edu> with 
+ *                        special help from Jeff Lightfoot 
+ *                        <jeffml@pobox.com>
+ * 3.15a July 9, 1996 -- Improved Sanyo 3 CD changer identification
+ * 3.16  Jul 28, 1996 -- Fix from Gadi to reduce kernel stack usage for ioctl.
+ * 3.17  Sep 17, 1996 -- Tweak audio reads for some drives.
+ *                       Start changing CDROMLOADFROMSLOT to CDROM_SELECT_DISC.
+ * 3.18  Oct 31, 1996 -- Added module and DMA support.
+ *                       
+ *                       
+ * 4.00  Nov 5, 1996   -- New ide-cd maintainer,
+ *                                 Erik B. Andersen <andersee@debian.org>
+ *                     -- Newer Creative drives don't always set the error
+ *                          register correctly.  Make sure we see media changes
+ *                          regardless.
+ *                     -- Integrate with generic cdrom driver.
+ *                     -- CDROMGETSPINDOWN and CDROMSETSPINDOWN ioctls, based on
+ *                          a patch from Ciro Cattuto <>.
+ *                     -- Call set_device_ro.
+ *                     -- Implement CDROMMECHANISMSTATUS and CDROMSLOTTABLE
+ *                          ioctls, based on patch by Erik Andersen
+ *                     -- Add some probes of drive capability during setup.
+ *
+ * 4.01  Nov 11, 1996  -- Split into ide-cd.c and ide-cd.h
+ *                     -- Removed CDROMMECHANISMSTATUS and CDROMSLOTTABLE 
+ *                          ioctls in favor of a generalized approach 
+ *                          using the generic cdrom driver.
+ *                     -- Fully integrated with the 2.1.X kernel.
+ *                     -- Other stuff that I forgot (lots of changes)
+ *
+ * 4.02  Dec 01, 1996  -- Applied patch from Gadi Oxman <gadio@netvision.net.il>
+ *                          to fix the drive door locking problems.
+ *
+ * 4.03  Dec 04, 1996  -- Added DSC overlap support.
+ * 4.04  Dec 29, 1996  -- Added CDROMREADRAW ioclt based on patch 
+ *                          by Ales Makarov (xmakarov@sun.felk.cvut.cz)
+ *
+ * 4.05  Nov 20, 1997  -- Modified to print more drive info on init
+ *                        Minor other changes
+ *                        Fix errors on CDROMSTOP (If you have a "Dolphin",
+ *                          you must define IHAVEADOLPHIN)
+ *                        Added identifier so new Sanyo CD-changer works
+ *                        Better detection if door locking isn't supported
+ *
+ * 4.06  Dec 17, 1997  -- fixed endless "tray open" messages  -ml
+ * 4.07  Dec 17, 1997  -- fallback to set pc->stat on "tray open"
+ * 4.08  Dec 18, 1997  -- spew less noise when tray is empty
+ *                     -- fix speed display for ACER 24X, 18X
+ * 4.09  Jan 04, 1998  -- fix handling of the last block so we return
+ *                         an end of file instead of an I/O error (Gadi)
+ * 4.10  Jan 24, 1998  -- fixed a bug so now changers can change to a new
+ *                         slot when there is no disc in the current slot.
+ *                     -- Fixed a memory leak where info->changer_info was
+ *                         malloc'ed but never free'd when closing the device.
+ *                     -- Cleaned up the global namespace a bit by making more
+ *                         functions static that should already have been.
+ * 4.11  Mar 12, 1998  -- Added support for the CDROM_SELECT_SPEED ioctl
+ *                         based on a patch for 2.0.33 by Jelle Foks 
+ *                         <jelle@scintilla.utwente.nl>, a patch for 2.0.33
+ *                         by Toni Giorgino <toni@pcape2.pi.infn.it>, the SCSI
+ *                         version, and my own efforts.  -erik
+ *                     -- Fixed a stupid bug which egcs was kind enough to
+ *                         inform me of where "Illegal mode for this track"
+ *                         was never returned due to a comparison on data
+ *                         types of limited range.
+ * 4.12  Mar 29, 1998  -- Fixed bug in CDROM_SELECT_SPEED so write speed is 
+ *                         now set ionly for CD-R and CD-RW drives.  I had 
+ *                         removed this support because it produced errors.
+ *                         It produced errors _only_ for non-writers. duh.
+ * 4.13  May 05, 1998  -- Suppress useless "in progress of becoming ready"
+ *                         messages, since this is not an error.
+ *                     -- Change error messages to be const
+ *                     -- Remove a "\t" which looks ugly in the syslogs
+ * 4.14  July 17, 1998 -- Change to pointing to .ps version of ATAPI spec
+ *                         since the .pdf version doesn't seem to work...
+ *                     -- Updated the TODO list to something more current.
+ *
+ * 4.15  Aug 25, 1998  -- Updated ide-cd.h to respect mechine endianess, 
+ *                         patch thanks to "Eddie C. Dost" <ecd@skynet.be>
+ *
+ * 4.50  Oct 19, 1998  -- New maintainers!
+ *                         Jens Axboe <axboe@image.dk>
+ *                         Chris Zwilling <chris@cloudnet.com>
+ *
+ * 4.51  Dec 23, 1998  -- Jens Axboe <axboe@image.dk>
+ *                      - ide_cdrom_reset enabled since the ide subsystem
+ *                         handles resets fine now. <axboe@image.dk>
+ *                      - Transfer size fix for Samsung CD-ROMs, thanks to
+ *                        "Ville Hallik" <ville.hallik@mail.ee>.
+ *                      - other minor stuff.
+ *
+ * 4.52  Jan 19, 1999  -- Jens Axboe <axboe@image.dk>
+ *                      - Detect DVD-ROM/RAM drives
+ *
+ * 4.53  Feb 22, 1999   - Include other model Samsung and one Goldstar
+ *                         drive in transfer size limit.
+ *                      - Fix the I/O error when doing eject without a medium
+ *                         loaded on some drives.
+ *                      - CDROMREADMODE2 is now implemented through
+ *                         CDROMREADRAW, since many drives don't support
+ *                         MODE2 (even though ATAPI 2.6 says they must).
+ *                      - Added ignore parameter to ide-cd (as a module), eg
+ *                         	insmod ide-cd ignore='hda hdb'
+ *                         Useful when using ide-cd in conjunction with
+ *                         ide-scsi. TODO: non-modular way of doing the
+ *                         same.
+ *
+ * 4.54  Aug 5, 1999	- Support for MMC2 class commands through the generic
+ *			  packet interface to cdrom.c.
+ *			- Unified audio ioctl support, most of it.
+ *			- cleaned up various deprecated verify_area().
+ *			- Added ide_cdrom_packet() as the interface for
+ *			  the Uniform generic_packet().
+ *			- bunch of other stuff, will fill in logs later.
+ *			- report 1 slot for non-changers, like the other
+ *			  cd-rom drivers. don't report select disc for
+ *			  non-changers as well.
+ *			- mask out audio playing, if the device can't do it.
+ *
+ * 4.55  Sep 1, 1999	- Eliminated the rest of the audio ioctls, except
+ *			  for CDROMREADTOC[ENTRY|HEADER]. Some of the drivers
+ *			  use this independently of the actual audio handling.
+ *			  They will disappear later when I get the time to
+ *			  do it cleanly.
+ *			- Minimize the TOC reading - only do it when we
+ *			  know a media change has occurred.
+ *			- Moved all the CDROMREADx ioctls to the Uniform layer.
+ *			- Heiko Eissfeldt <heiko@colossus.escape.de> supplied
+ *			  some fixes for CDI.
+ *			- CD-ROM leaving door locked fix from Andries
+ *			  Brouwer <Andries.Brouwer@cwi.nl>
+ *			- Erik Andersen <andersen@xmission.com> unified
+ *			  commands across the various drivers and how
+ *			  sense errors are handled.
+ *
+ * 4.56  Sep 12, 1999	- Removed changer support - it is now in the
+ *			  Uniform layer.
+ *			- Added partition based multisession handling.
+ *			- Mode sense and mode select moved to the
+ *			  Uniform layer.
+ *			- Fixed a problem with WPI CDS-32X drive - it
+ *			  failed the capabilities 
+ *
+ * 4.57  Apr 7, 2000	- Fixed sense reporting.
+ *			- Fixed possible oops in ide_cdrom_get_last_session()
+ *			- Fix locking mania and make ide_cdrom_reset relock
+ *			- Stop spewing errors to log when magicdev polls with
+ *			  TEST_UNIT_READY on some drives.
+ *			- Various fixes from Tobias Ringstrom:
+ *			  tray if it was locked prior to the reset.
+ *			  - cdrom_read_capacity returns one frame too little.
+ *			  - Fix real capacity reporting.
+ *
+ * 4.58  May 1, 2000	- Clean up ACER50 stuff.
+ *			- Fix small problem with ide_cdrom_capacity
+ *
+ * 4.59  Aug 11, 2000	- Fix changer problem in cdrom_read_toc, we weren't
+ *			  correctly sensing a disc change.
+ *			- Rearranged some code
+ *			- Use extended sense on drives that support it for
+ *			  correctly reporting tray status -- from
+ *			  Michael D Johnson <johnsom@orst.edu>
+ * 4.60  Dec 17, 2003	- Add mt rainier support
+ *			- Bump timeout for packet commands, matches sr
+ *			- Odd stuff
+ * 4.61  Jan 22, 2004	- support hardware sector sizes other than 2kB,
+ *			  Pascal Schmidt <der.eremit@email.de>
+ *
+ *************************************************************************/
+ 
+#define IDECD_VERSION "4.61"
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/cdrom.h>
+#include <linux/ide.h>
+#include <linux/completion.h>
+
+#include <scsi/scsi.h>	/* For SCSI -> ATAPI command conversion */
+
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+
+#include "ide-cd.h"
+
+static DECLARE_MUTEX(idecd_ref_sem);
+
+#define to_ide_cd(obj) container_of(obj, struct cdrom_info, kref) 
+
+#define ide_cd_g(disk) \
+	container_of((disk)->private_data, struct cdrom_info, driver)
+
+static struct cdrom_info *ide_cd_get(struct gendisk *disk)
+{
+	struct cdrom_info *cd = NULL;
+
+	down(&idecd_ref_sem);
+	cd = ide_cd_g(disk);
+	if (cd)
+		kref_get(&cd->kref);
+	up(&idecd_ref_sem);
+	return cd;
+}
+
+static void ide_cd_release(struct kref *);
+
+static void ide_cd_put(struct cdrom_info *cd)
+{
+	down(&idecd_ref_sem);
+	kref_put(&cd->kref, ide_cd_release);
+	up(&idecd_ref_sem);
+}
+
+/****************************************************************************
+ * Generic packet command support and error handling routines.
+ */
+
+/* Mark that we've seen a media change, and invalidate our internal
+   buffers. */
+static void cdrom_saw_media_change (ide_drive_t *drive)
+{
+	struct cdrom_info *info = drive->driver_data;
+	
+	CDROM_STATE_FLAGS (drive)->media_changed = 1;
+	CDROM_STATE_FLAGS (drive)->toc_valid = 0;
+	info->nsectors_buffered = 0;
+}
+
+static int cdrom_log_sense(ide_drive_t *drive, struct request *rq,
+			   struct request_sense *sense)
+{
+	int log = 0;
+
+	if (!sense || !rq || (rq->flags & REQ_QUIET))
+		return 0;
+
+	switch (sense->sense_key) {
+		case NO_SENSE: case RECOVERED_ERROR:
+			break;
+		case NOT_READY:
+			/*
+			 * don't care about tray state messages for
+			 * e.g. capacity commands or in-progress or
+			 * becoming ready
+			 */
+			if (sense->asc == 0x3a || sense->asc == 0x04)
+				break;
+			log = 1;
+			break;
+		case ILLEGAL_REQUEST:
+			/*
+			 * don't log START_STOP unit with LoEj set, since
+			 * we cannot reliably check if drive can auto-close
+			 */
+			if (rq->cmd[0] == GPCMD_START_STOP_UNIT && sense->asc == 0x24)
+				log = 0;
+			break;
+		case UNIT_ATTENTION:
+			/*
+			 * Make good and sure we've seen this potential media
+			 * change. Some drives (i.e. Creative) fail to present
+			 * the correct sense key in the error register.
+			 */
+			cdrom_saw_media_change(drive);
+			break;
+		default:
+			log = 1;
+			break;
+	}
+	return log;
+}
+
+static
+void cdrom_analyze_sense_data(ide_drive_t *drive,
+			      struct request *failed_command,
+			      struct request_sense *sense)
+{
+	if (!cdrom_log_sense(drive, failed_command, sense))
+		return;
+
+	/*
+	 * If a read toc is executed for a CD-R or CD-RW medium where
+	 * the first toc has not been recorded yet, it will fail with
+	 * 05/24/00 (which is a confusing error)
+	 */
+	if (failed_command && failed_command->cmd[0] == GPCMD_READ_TOC_PMA_ATIP)
+		if (sense->sense_key == 0x05 && sense->asc == 0x24)
+			return;
+
+#if VERBOSE_IDE_CD_ERRORS
+	{
+		int i;
+		const char *s = "bad sense key!";
+		char buf[80];
+
+		printk ("ATAPI device %s:\n", drive->name);
+		if (sense->error_code==0x70)
+			printk("  Error: ");
+		else if (sense->error_code==0x71)
+			printk("  Deferred Error: ");
+		else if (sense->error_code == 0x7f)
+			printk("  Vendor-specific Error: ");
+		else
+			printk("  Unknown Error Type: ");
+
+		if (sense->sense_key < ARY_LEN(sense_key_texts))
+			s = sense_key_texts[sense->sense_key];
+
+		printk("%s -- (Sense key=0x%02x)\n", s, sense->sense_key);
+
+		if (sense->asc == 0x40) {
+			sprintf(buf, "Diagnostic failure on component 0x%02x",
+				 sense->ascq);
+			s = buf;
+		} else {
+			int lo = 0, mid, hi = ARY_LEN(sense_data_texts);
+			unsigned long key = (sense->sense_key << 16);
+			key |= (sense->asc << 8);
+			if (!(sense->ascq >= 0x80 && sense->ascq <= 0xdd))
+				key |= sense->ascq;
+			s = NULL;
+
+			while (hi > lo) {
+				mid = (lo + hi) / 2;
+				if (sense_data_texts[mid].asc_ascq == key ||
+				    sense_data_texts[mid].asc_ascq == (0xff0000|key)) {
+					s = sense_data_texts[mid].text;
+					break;
+				}
+				else if (sense_data_texts[mid].asc_ascq > key)
+					hi = mid;
+				else
+					lo = mid+1;
+			}
+		}
+
+		if (s == NULL) {
+			if (sense->asc > 0x80)
+				s = "(vendor-specific error)";
+			else
+				s = "(reserved error code)";
+		}
+
+		printk(KERN_ERR "  %s -- (asc=0x%02x, ascq=0x%02x)\n",
+			s, sense->asc, sense->ascq);
+
+		if (failed_command != NULL) {
+
+			int lo=0, mid, hi= ARY_LEN (packet_command_texts);
+			s = NULL;
+
+			while (hi > lo) {
+				mid = (lo + hi) / 2;
+				if (packet_command_texts[mid].packet_command ==
+				    failed_command->cmd[0]) {
+					s = packet_command_texts[mid].text;
+					break;
+				}
+				if (packet_command_texts[mid].packet_command >
+				    failed_command->cmd[0])
+					hi = mid;
+				else
+					lo = mid+1;
+			}
+
+			printk (KERN_ERR "  The failed \"%s\" packet command was: \n  \"", s);
+			for (i=0; i<sizeof (failed_command->cmd); i++)
+				printk ("%02x ", failed_command->cmd[i]);
+			printk ("\"\n");
+		}
+
+		/* The SKSV bit specifies validity of the sense_key_specific
+		 * in the next two commands. It is bit 7 of the first byte.
+		 * In the case of NOT_READY, if SKSV is set the drive can
+		 * give us nice ETA readings.
+		 */
+		if (sense->sense_key == NOT_READY && (sense->sks[0] & 0x80)) {
+			int progress = (sense->sks[1] << 8 | sense->sks[2]) * 100;
+			printk(KERN_ERR "  Command is %02d%% complete\n", progress / 0xffff);
+
+		}
+
+		if (sense->sense_key == ILLEGAL_REQUEST &&
+		    (sense->sks[0] & 0x80) != 0) {
+			printk(KERN_ERR "  Error in %s byte %d",
+				(sense->sks[0] & 0x40) != 0 ?
+				"command packet" : "command data",
+				(sense->sks[1] << 8) + sense->sks[2]);
+
+			if ((sense->sks[0] & 0x40) != 0)
+				printk (" bit %d", sense->sks[0] & 0x07);
+
+			printk ("\n");
+		}
+	}
+
+#else /* not VERBOSE_IDE_CD_ERRORS */
+
+	/* Suppress printing unit attention and `in progress of becoming ready'
+	   errors when we're not being verbose. */
+
+	if (sense->sense_key == UNIT_ATTENTION ||
+	    (sense->sense_key == NOT_READY && (sense->asc == 4 ||
+						sense->asc == 0x3a)))
+		return;
+
+	printk(KERN_ERR "%s: error code: 0x%02x  sense_key: 0x%02x  asc: 0x%02x  ascq: 0x%02x\n",
+		drive->name,
+		sense->error_code, sense->sense_key,
+		sense->asc, sense->ascq);
+#endif /* not VERBOSE_IDE_CD_ERRORS */
+}
+
+/*
+ * Initialize a ide-cd packet command request
+ */
+static void cdrom_prepare_request(ide_drive_t *drive, struct request *rq)
+{
+	struct cdrom_info *cd = drive->driver_data;
+
+	ide_init_drive_cmd(rq);
+	rq->flags = REQ_PC;
+	rq->rq_disk = cd->disk;
+}
+
+static void cdrom_queue_request_sense(ide_drive_t *drive, void *sense,
+				      struct request *failed_command)
+{
+	struct cdrom_info *info		= drive->driver_data;
+	struct request *rq		= &info->request_sense_request;
+
+	if (sense == NULL)
+		sense = &info->sense_data;
+
+	/* stuff the sense request in front of our current request */
+	cdrom_prepare_request(drive, rq);
+
+	rq->data = sense;
+	rq->cmd[0] = GPCMD_REQUEST_SENSE;
+	rq->cmd[4] = rq->data_len = 18;
+
+	rq->flags = REQ_SENSE;
+
+	/* NOTE! Save the failed command in "rq->buffer" */
+	rq->buffer = (void *) failed_command;
+
+	(void) ide_do_drive_cmd(drive, rq, ide_preempt);
+}
+
+static void cdrom_end_request (ide_drive_t *drive, int uptodate)
+{
+	struct request *rq = HWGROUP(drive)->rq;
+	int nsectors = rq->hard_cur_sectors;
+
+	if ((rq->flags & REQ_SENSE) && uptodate) {
+		/*
+		 * For REQ_SENSE, "rq->buffer" points to the original failed
+		 * request
+		 */
+		struct request *failed = (struct request *) rq->buffer;
+		struct cdrom_info *info = drive->driver_data;
+		void *sense = &info->sense_data;
+		unsigned long flags;
+
+		if (failed) {
+			if (failed->sense) {
+				sense = failed->sense;
+				failed->sense_len = rq->sense_len;
+			}
+
+			/*
+			 * now end failed request
+			 */
+			spin_lock_irqsave(&ide_lock, flags);
+			end_that_request_chunk(failed, 0, failed->data_len);
+			end_that_request_last(failed);
+			spin_unlock_irqrestore(&ide_lock, flags);
+		}
+
+		cdrom_analyze_sense_data(drive, failed, sense);
+	}
+
+	if (!rq->current_nr_sectors && blk_fs_request(rq))
+		uptodate = 1;
+	/* make sure it's fully ended */
+	if (blk_pc_request(rq))
+		nsectors = (rq->data_len + 511) >> 9;
+	if (!nsectors)
+		nsectors = 1;
+
+	ide_end_request(drive, uptodate, nsectors);
+}
+
+/* Returns 0 if the request should be continued.
+   Returns 1 if the request was ended. */
+static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
+{
+	struct request *rq = HWGROUP(drive)->rq;
+	int stat, err, sense_key;
+	
+	/* Check for errors. */
+	stat = HWIF(drive)->INB(IDE_STATUS_REG);
+	if (stat_ret)
+		*stat_ret = stat;
+
+	if (OK_STAT(stat, good_stat, BAD_R_STAT))
+		return 0;
+
+	/* Get the IDE error register. */
+	err = HWIF(drive)->INB(IDE_ERROR_REG);
+	sense_key = err >> 4;
+
+	if (rq == NULL) {
+		printk("%s: missing rq in cdrom_decode_status\n", drive->name);
+		return 1;
+	}
+
+	if (rq->flags & REQ_SENSE) {
+		/* We got an error trying to get sense info
+		   from the drive (probably while trying
+		   to recover from a former error).  Just give up. */
+
+		rq->flags |= REQ_FAILED;
+		cdrom_end_request(drive, 0);
+		ide_error(drive, "request sense failure", stat);
+		return 1;
+
+	} else if (rq->flags & (REQ_PC | REQ_BLOCK_PC)) {
+		/* All other functions, except for READ. */
+		unsigned long flags;
+
+		/*
+		 * if we have an error, pass back CHECK_CONDITION as the
+		 * scsi status byte
+		 */
+		if ((rq->flags & REQ_BLOCK_PC) && !rq->errors)
+			rq->errors = SAM_STAT_CHECK_CONDITION;
+
+		/* Check for tray open. */
+		if (sense_key == NOT_READY) {
+			cdrom_saw_media_change (drive);
+		} else if (sense_key == UNIT_ATTENTION) {
+			/* Check for media change. */
+			cdrom_saw_media_change (drive);
+			/*printk("%s: media changed\n",drive->name);*/
+			return 0;
+		} else if (!(rq->flags & REQ_QUIET)) {
+			/* Otherwise, print an error. */
+			ide_dump_status(drive, "packet command error", stat);
+		}
+		
+		rq->flags |= REQ_FAILED;
+
+		/*
+		 * instead of playing games with moving completions around,
+		 * remove failed request completely and end it when the
+		 * request sense has completed
+		 */
+		if (stat & ERR_STAT) {
+			spin_lock_irqsave(&ide_lock, flags);
+			blkdev_dequeue_request(rq);
+			HWGROUP(drive)->rq = NULL;
+			spin_unlock_irqrestore(&ide_lock, flags);
+
+			cdrom_queue_request_sense(drive, rq->sense, rq);
+		} else
+			cdrom_end_request(drive, 0);
+
+	} else if (blk_fs_request(rq)) {
+		int do_end_request = 0;
+
+		/* Handle errors from READ and WRITE requests. */
+
+		if (blk_noretry_request(rq))
+			do_end_request = 1;
+
+		if (sense_key == NOT_READY) {
+			/* Tray open. */
+			if (rq_data_dir(rq) == READ) {
+				cdrom_saw_media_change (drive);
+
+				/* Fail the request. */
+				printk ("%s: tray open\n", drive->name);
+				do_end_request = 1;
+			} else {
+				struct cdrom_info *info = drive->driver_data;
+
+				/* allow the drive 5 seconds to recover, some
+				 * devices will return this error while flushing
+				 * data from cache */
+				if (!rq->errors)
+					info->write_timeout = jiffies + ATAPI_WAIT_WRITE_BUSY;
+				rq->errors = 1;
+				if (time_after(jiffies, info->write_timeout))
+					do_end_request = 1;
+				else {
+					unsigned long flags;
+
+					/*
+					 * take a breather relying on the
+					 * unplug timer to kick us again
+					 */
+					spin_lock_irqsave(&ide_lock, flags);
+					blk_plug_device(drive->queue);
+					spin_unlock_irqrestore(&ide_lock,flags);
+					return 1;
+				}
+			}
+		} else if (sense_key == UNIT_ATTENTION) {
+			/* Media change. */
+			cdrom_saw_media_change (drive);
+
+			/* Arrange to retry the request.
+			   But be sure to give up if we've retried
+			   too many times. */
+			if (++rq->errors > ERROR_MAX)
+				do_end_request = 1;
+		} else if (sense_key == ILLEGAL_REQUEST ||
+			   sense_key == DATA_PROTECT) {
+			/* No point in retrying after an illegal
+			   request or data protect error.*/
+			ide_dump_status (drive, "command error", stat);
+			do_end_request = 1;
+		} else if (sense_key == MEDIUM_ERROR) {
+			/* No point in re-trying a zillion times on a bad 
+			 * sector...  If we got here the error is not correctable */
+			ide_dump_status (drive, "media error (bad sector)", stat);
+			do_end_request = 1;
+		} else if (sense_key == BLANK_CHECK) {
+			/* Disk appears blank ?? */
+			ide_dump_status (drive, "media error (blank)", stat);
+			do_end_request = 1;
+		} else if ((err & ~ABRT_ERR) != 0) {
+			/* Go to the default handler
+			   for other errors. */
+			ide_error(drive, "cdrom_decode_status", stat);
+			return 1;
+		} else if ((++rq->errors > ERROR_MAX)) {
+			/* We've racked up too many retries.  Abort. */
+			do_end_request = 1;
+		}
+
+		if (do_end_request)
+			cdrom_end_request(drive, 0);
+
+		/* If we got a CHECK_CONDITION status,
+		   queue a request sense command. */
+		if ((stat & ERR_STAT) != 0)
+			cdrom_queue_request_sense(drive, NULL, NULL);
+	} else {
+		blk_dump_rq_flags(rq, "ide-cd: bad rq");
+		cdrom_end_request(drive, 0);
+	}
+
+	/* Retry, or handle the next request. */
+	return 1;
+}
+
+static int cdrom_timer_expiry(ide_drive_t *drive)
+{
+	struct request *rq = HWGROUP(drive)->rq;
+	unsigned long wait = 0;
+
+	/*
+	 * Some commands are *slow* and normally take a long time to
+	 * complete. Usually we can use the ATAPI "disconnect" to bypass
+	 * this, but not all commands/drives support that. Let
+	 * ide_timer_expiry keep polling us for these.
+	 */
+	switch (rq->cmd[0]) {
+		case GPCMD_BLANK:
+		case GPCMD_FORMAT_UNIT:
+		case GPCMD_RESERVE_RZONE_TRACK:
+		case GPCMD_CLOSE_TRACK:
+		case GPCMD_FLUSH_CACHE:
+			wait = ATAPI_WAIT_PC;
+			break;
+		default:
+			if (!(rq->flags & REQ_QUIET))
+				printk(KERN_INFO "ide-cd: cmd 0x%x timed out\n", rq->cmd[0]);
+			wait = 0;
+			break;
+	}
+	return wait;
+}
+
+/* Set up the device registers for transferring a packet command on DEV,
+   expecting to later transfer XFERLEN bytes.  HANDLER is the routine
+   which actually transfers the command to the drive.  If this is a
+   drq_interrupt device, this routine will arrange for HANDLER to be
+   called when the interrupt from the drive arrives.  Otherwise, HANDLER
+   will be called immediately after the drive is prepared for the transfer. */
+
+static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive,
+						  int xferlen,
+						  ide_handler_t *handler)
+{
+	ide_startstop_t startstop;
+	struct cdrom_info *info = drive->driver_data;
+	ide_hwif_t *hwif = drive->hwif;
+
+	/* Wait for the controller to be idle. */
+	if (ide_wait_stat(&startstop, drive, 0, BUSY_STAT, WAIT_READY))
+		return startstop;
+
+	if (info->dma)
+		info->dma = !hwif->dma_setup(drive);
+
+	/* Set up the controller registers. */
+	/* FIXME: for Virtual DMA we must check harder */
+	HWIF(drive)->OUTB(info->dma, IDE_FEATURE_REG);
+	HWIF(drive)->OUTB(0, IDE_IREASON_REG);
+	HWIF(drive)->OUTB(0, IDE_SECTOR_REG);
+
+	HWIF(drive)->OUTB(xferlen & 0xff, IDE_BCOUNTL_REG);
+	HWIF(drive)->OUTB(xferlen >> 8  , IDE_BCOUNTH_REG);
+	if (IDE_CONTROL_REG)
+		HWIF(drive)->OUTB(drive->ctl, IDE_CONTROL_REG);
+ 
+	if (CDROM_CONFIG_FLAGS (drive)->drq_interrupt) {
+		/* packet command */
+		ide_execute_command(drive, WIN_PACKETCMD, handler, ATAPI_WAIT_PC, cdrom_timer_expiry);
+		return ide_started;
+	} else {
+		unsigned long flags;
+
+		/* packet command */
+		spin_lock_irqsave(&ide_lock, flags);
+		hwif->OUTBSYNC(drive, WIN_PACKETCMD, IDE_COMMAND_REG);
+		ndelay(400);
+		spin_unlock_irqrestore(&ide_lock, flags);
+
+		return (*handler) (drive);
+	}
+}
+
+/* Send a packet command to DRIVE described by CMD_BUF and CMD_LEN.
+   The device registers must have already been prepared
+   by cdrom_start_packet_command.
+   HANDLER is the interrupt handler to call when the command completes
+   or there's data ready. */
+/*
+ * changed 5 parameters to 3 for dvd-ram
+ * struct packet_command *pc; now packet_command_t *pc;
+ */
+#define ATAPI_MIN_CDB_BYTES 12
+static ide_startstop_t cdrom_transfer_packet_command (ide_drive_t *drive,
+					  struct request *rq,
+					  ide_handler_t *handler)
+{
+	ide_hwif_t *hwif = drive->hwif;
+	int cmd_len;
+	struct cdrom_info *info = drive->driver_data;
+	ide_startstop_t startstop;
+
+	if (CDROM_CONFIG_FLAGS(drive)->drq_interrupt) {
+		/* Here we should have been called after receiving an interrupt
+		   from the device.  DRQ should how be set. */
+
+		/* Check for errors. */
+		if (cdrom_decode_status(drive, DRQ_STAT, NULL))
+			return ide_stopped;
+	} else {
+		/* Otherwise, we must wait for DRQ to get set. */
+		if (ide_wait_stat(&startstop, drive, DRQ_STAT,
+				BUSY_STAT, WAIT_READY))
+			return startstop;
+	}
+
+	/* Arm the interrupt handler. */
+	ide_set_handler(drive, handler, rq->timeout, cdrom_timer_expiry);
+
+	/* ATAPI commands get padded out to 12 bytes minimum */
+	cmd_len = COMMAND_SIZE(rq->cmd[0]);
+	if (cmd_len < ATAPI_MIN_CDB_BYTES)
+		cmd_len = ATAPI_MIN_CDB_BYTES;
+
+	/* Send the command to the device. */
+	HWIF(drive)->atapi_output_bytes(drive, rq->cmd, cmd_len);
+
+	/* Start the DMA if need be */
+	if (info->dma)
+		hwif->dma_start(drive);
+
+	return ide_started;
+}
+
+/****************************************************************************
+ * Block read functions.
+ */
+
+/*
+ * Buffer up to SECTORS_TO_TRANSFER sectors from the drive in our sector
+ * buffer.  Once the first sector is added, any subsequent sectors are
+ * assumed to be continuous (until the buffer is cleared).  For the first
+ * sector added, SECTOR is its sector number.  (SECTOR is then ignored until
+ * the buffer is cleared.)
+ */
+static void cdrom_buffer_sectors (ide_drive_t *drive, unsigned long sector,
+                                  int sectors_to_transfer)
+{
+	struct cdrom_info *info = drive->driver_data;
+
+	/* Number of sectors to read into the buffer. */
+	int sectors_to_buffer = min_t(int, sectors_to_transfer,
+				     (SECTOR_BUFFER_SIZE >> SECTOR_BITS) -
+				       info->nsectors_buffered);
+
+	char *dest;
+
+	/* If we couldn't get a buffer, don't try to buffer anything... */
+	if (info->buffer == NULL)
+		sectors_to_buffer = 0;
+
+	/* If this is the first sector in the buffer, remember its number. */
+	if (info->nsectors_buffered == 0)
+		info->sector_buffered = sector;
+
+	/* Read the data into the buffer. */
+	dest = info->buffer + info->nsectors_buffered * SECTOR_SIZE;
+	while (sectors_to_buffer > 0) {
+		HWIF(drive)->atapi_input_bytes(drive, dest, SECTOR_SIZE);
+		--sectors_to_buffer;
+		--sectors_to_transfer;
+		++info->nsectors_buffered;
+		dest += SECTOR_SIZE;
+	}
+
+	/* Throw away any remaining data. */
+	while (sectors_to_transfer > 0) {
+		static char dum[SECTOR_SIZE];
+		HWIF(drive)->atapi_input_bytes(drive, dum, sizeof (dum));
+		--sectors_to_transfer;
+	}
+}
+
+/*
+ * Check the contents of the interrupt reason register from the cdrom
+ * and attempt to recover if there are problems.  Returns  0 if everything's
+ * ok; nonzero if the request has been terminated.
+ */
+static inline
+int cdrom_read_check_ireason (ide_drive_t *drive, int len, int ireason)
+{
+	if (ireason == 2)
+		return 0;
+	else if (ireason == 0) {
+		/* Whoops... The drive is expecting to receive data from us! */
+		printk(KERN_ERR "%s: read_intr: Drive wants to transfer data the "
+						"wrong way!\n", drive->name);
+
+		/* Throw some data at the drive so it doesn't hang
+		   and quit this request. */
+		while (len > 0) {
+			int dum = 0;
+			HWIF(drive)->atapi_output_bytes(drive, &dum, sizeof (dum));
+			len -= sizeof (dum);
+		}
+	} else  if (ireason == 1) {
+		/* Some drives (ASUS) seem to tell us that status
+		 * info is available. just get it and ignore.
+		 */
+		(void) HWIF(drive)->INB(IDE_STATUS_REG);
+		return 0;
+	} else {
+		/* Drive wants a command packet, or invalid ireason... */
+		printk(KERN_ERR "%s: read_intr: bad interrupt reason %x\n", drive->name,
+								ireason);
+	}
+
+	cdrom_end_request(drive, 0);
+	return -1;
+}
+
+/*
+ * Interrupt routine.  Called when a read request has completed.
+ */
+static ide_startstop_t cdrom_read_intr (ide_drive_t *drive)
+{
+	int stat;
+	int ireason, len, sectors_to_transfer, nskip;
+	struct cdrom_info *info = drive->driver_data;
+	u8 lowcyl = 0, highcyl = 0;
+	int dma = info->dma, dma_error = 0;
+
+	struct request *rq = HWGROUP(drive)->rq;
+
+	/*
+	 * handle dma case
+	 */
+	if (dma) {
+		info->dma = 0;
+		if ((dma_error = HWIF(drive)->ide_dma_end(drive)))
+			__ide_dma_off(drive);
+	}
+
+	if (cdrom_decode_status(drive, 0, &stat))
+		return ide_stopped;
+
+	if (dma) {
+		if (!dma_error) {
+			ide_end_request(drive, 1, rq->nr_sectors);
+			return ide_stopped;
+		} else
+			return ide_error(drive, "dma error", stat);
+	}
+
+	/* Read the interrupt reason and the transfer length. */
+	ireason = HWIF(drive)->INB(IDE_IREASON_REG) & 0x3;
+	lowcyl  = HWIF(drive)->INB(IDE_BCOUNTL_REG);
+	highcyl = HWIF(drive)->INB(IDE_BCOUNTH_REG);
+
+	len = lowcyl + (256 * highcyl);
+
+	/* If DRQ is clear, the command has completed. */
+	if ((stat & DRQ_STAT) == 0) {
+		/* If we're not done filling the current buffer, complain.
+		   Otherwise, complete the command normally. */
+		if (rq->current_nr_sectors > 0) {
+			printk (KERN_ERR "%s: cdrom_read_intr: data underrun (%d blocks)\n",
+				drive->name, rq->current_nr_sectors);
+			rq->flags |= REQ_FAILED;
+			cdrom_end_request(drive, 0);
+		} else
+			cdrom_end_request(drive, 1);
+		return ide_stopped;
+	}
+
+	/* Check that the drive is expecting to do the same thing we are. */
+	if (cdrom_read_check_ireason (drive, len, ireason))
+		return ide_stopped;
+
+	/* Assume that the drive will always provide data in multiples
+	   of at least SECTOR_SIZE, as it gets hairy to keep track
+	   of the transfers otherwise. */
+	if ((len % SECTOR_SIZE) != 0) {
+		printk (KERN_ERR "%s: cdrom_read_intr: Bad transfer size %d\n",
+			drive->name, len);
+		if (CDROM_CONFIG_FLAGS(drive)->limit_nframes)
+			printk (KERN_ERR "  This drive is not supported by this version of the driver\n");
+		else {
+			printk (KERN_ERR "  Trying to limit transfer sizes\n");
+			CDROM_CONFIG_FLAGS(drive)->limit_nframes = 1;
+		}
+		cdrom_end_request(drive, 0);
+		return ide_stopped;
+	}
+
+	/* The number of sectors we need to read from the drive. */
+	sectors_to_transfer = len / SECTOR_SIZE;
+
+	/* First, figure out if we need to bit-bucket
+	   any of the leading sectors. */
+	nskip = min_t(int, rq->current_nr_sectors - bio_cur_sectors(rq->bio), sectors_to_transfer);
+
+	while (nskip > 0) {
+		/* We need to throw away a sector. */
+		static char dum[SECTOR_SIZE];
+		HWIF(drive)->atapi_input_bytes(drive, dum, sizeof (dum));
+
+		--rq->current_nr_sectors;
+		--nskip;
+		--sectors_to_transfer;
+	}
+
+	/* Now loop while we still have data to read from the drive. */
+	while (sectors_to_transfer > 0) {
+		int this_transfer;
+
+		/* If we've filled the present buffer but there's another
+		   chained buffer after it, move on. */
+		if (rq->current_nr_sectors == 0 && rq->nr_sectors)
+			cdrom_end_request(drive, 1);
+
+		/* If the buffers are full, cache the rest of the data in our
+		   internal buffer. */
+		if (rq->current_nr_sectors == 0) {
+			cdrom_buffer_sectors(drive, rq->sector, sectors_to_transfer);
+			sectors_to_transfer = 0;
+		} else {
+			/* Transfer data to the buffers.
+			   Figure out how many sectors we can transfer
+			   to the current buffer. */
+			this_transfer = min_t(int, sectors_to_transfer,
+					     rq->current_nr_sectors);
+
+			/* Read this_transfer sectors
+			   into the current buffer. */
+			while (this_transfer > 0) {
+				HWIF(drive)->atapi_input_bytes(drive, rq->buffer, SECTOR_SIZE);
+				rq->buffer += SECTOR_SIZE;
+				--rq->nr_sectors;
+				--rq->current_nr_sectors;
+				++rq->sector;
+				--this_transfer;
+				--sectors_to_transfer;
+			}
+		}
+	}
+
+	/* Done moving data!  Wait for another interrupt. */
+	ide_set_handler(drive, &cdrom_read_intr, ATAPI_WAIT_PC, NULL);
+	return ide_started;
+}
+
+/*
+ * Try to satisfy some of the current read request from our cached data.
+ * Returns nonzero if the request has been completed, zero otherwise.
+ */
+static int cdrom_read_from_buffer (ide_drive_t *drive)
+{
+	struct cdrom_info *info = drive->driver_data;
+	struct request *rq = HWGROUP(drive)->rq;
+	unsigned short sectors_per_frame;
+
+	sectors_per_frame = queue_hardsect_size(drive->queue) >> SECTOR_BITS;
+
+	/* Can't do anything if there's no buffer. */
+	if (info->buffer == NULL) return 0;
+
+	/* Loop while this request needs data and the next block is present
+	   in our cache. */
+	while (rq->nr_sectors > 0 &&
+	       rq->sector >= info->sector_buffered &&
+	       rq->sector < info->sector_buffered + info->nsectors_buffered) {
+		if (rq->current_nr_sectors == 0)
+			cdrom_end_request(drive, 1);
+
+		memcpy (rq->buffer,
+			info->buffer +
+			(rq->sector - info->sector_buffered) * SECTOR_SIZE,
+			SECTOR_SIZE);
+		rq->buffer += SECTOR_SIZE;
+		--rq->current_nr_sectors;
+		--rq->nr_sectors;
+		++rq->sector;
+	}
+
+	/* If we've satisfied the current request,
+	   terminate it successfully. */
+	if (rq->nr_sectors == 0) {
+		cdrom_end_request(drive, 1);
+		return -1;
+	}
+
+	/* Move on to the next buffer if needed. */
+	if (rq->current_nr_sectors == 0)
+		cdrom_end_request(drive, 1);
+
+	/* If this condition does not hold, then the kluge i use to
+	   represent the number of sectors to skip at the start of a transfer
+	   will fail.  I think that this will never happen, but let's be
+	   paranoid and check. */
+	if (rq->current_nr_sectors < bio_cur_sectors(rq->bio) &&
+	    (rq->sector & (sectors_per_frame - 1))) {
+		printk(KERN_ERR "%s: cdrom_read_from_buffer: buffer botch (%ld)\n",
+			drive->name, (long)rq->sector);
+		cdrom_end_request(drive, 0);
+		return -1;
+	}
+
+	return 0;
+}
+
+/*
+ * Routine to send a read packet command to the drive.
+ * This is usually called directly from cdrom_start_read.
+ * However, for drq_interrupt devices, it is called from an interrupt
+ * when the drive is ready to accept the command.
+ */
+static ide_startstop_t cdrom_start_read_continuation (ide_drive_t *drive)
+{
+	struct request *rq = HWGROUP(drive)->rq;
+	unsigned short sectors_per_frame;
+	int nskip;
+
+	sectors_per_frame = queue_hardsect_size(drive->queue) >> SECTOR_BITS;
+
+	/* If the requested sector doesn't start on a cdrom block boundary,
+	   we must adjust the start of the transfer so that it does,
+	   and remember to skip the first few sectors.
+	   If the CURRENT_NR_SECTORS field is larger than the size
+	   of the buffer, it will mean that we're to skip a number
+	   of sectors equal to the amount by which CURRENT_NR_SECTORS
+	   is larger than the buffer size. */
+	nskip = rq->sector & (sectors_per_frame - 1);
+	if (nskip > 0) {
+		/* Sanity check... */
+		if (rq->current_nr_sectors != bio_cur_sectors(rq->bio) &&
+			(rq->sector & (sectors_per_frame - 1))) {
+			printk(KERN_ERR "%s: cdrom_start_read_continuation: buffer botch (%u)\n",
+				drive->name, rq->current_nr_sectors);
+			cdrom_end_request(drive, 0);
+			return ide_stopped;
+		}
+		rq->current_nr_sectors += nskip;
+	}
+
+	/* Set up the command */
+	rq->timeout = ATAPI_WAIT_PC;
+
+	/* Send the command to the drive and return. */
+	return cdrom_transfer_packet_command(drive, rq, &cdrom_read_intr);
+}
+
+
+#define IDECD_SEEK_THRESHOLD	(1000)			/* 1000 blocks */
+#define IDECD_SEEK_TIMER	(5 * WAIT_MIN_SLEEP)	/* 100 ms */
+#define IDECD_SEEK_TIMEOUT	(2 * WAIT_CMD)		/* 20 sec */
+
+static ide_startstop_t cdrom_seek_intr (ide_drive_t *drive)
+{
+	struct cdrom_info *info = drive->driver_data;
+	int stat;
+	static int retry = 10;
+
+	if (cdrom_decode_status(drive, 0, &stat))
+		return ide_stopped;
+	CDROM_CONFIG_FLAGS(drive)->seeking = 1;
+
+	if (retry && time_after(jiffies, info->start_seek + IDECD_SEEK_TIMER)) {
+		if (--retry == 0) {
+			/*
+			 * this condition is far too common, to bother
+			 * users about it
+			 */
+			/* printk("%s: disabled DSC seek overlap\n", drive->name);*/ 
+			drive->dsc_overlap = 0;
+		}
+	}
+	return ide_stopped;
+}
+
+static ide_startstop_t cdrom_start_seek_continuation (ide_drive_t *drive)
+{
+	struct request *rq = HWGROUP(drive)->rq;
+	sector_t frame = rq->sector;
+
+	sector_div(frame, queue_hardsect_size(drive->queue) >> SECTOR_BITS);
+
+	memset(rq->cmd, 0, sizeof(rq->cmd));
+	rq->cmd[0] = GPCMD_SEEK;
+	put_unaligned(cpu_to_be32(frame), (unsigned int *) &rq->cmd[2]);
+
+	rq->timeout = ATAPI_WAIT_PC;
+	return cdrom_transfer_packet_command(drive, rq, &cdrom_seek_intr);
+}
+
+static ide_startstop_t cdrom_start_seek (ide_drive_t *drive, unsigned int block)
+{
+	struct cdrom_info *info = drive->driver_data;
+
+	info->dma = 0;
+	info->start_seek = jiffies;
+	return cdrom_start_packet_command(drive, 0, cdrom_start_seek_continuation);
+}
+
+/* Fix up a possibly partially-processed request so that we can
+   start it over entirely, or even put it back on the request queue. */
+static void restore_request (struct request *rq)
+{
+	if (rq->buffer != bio_data(rq->bio)) {
+		sector_t n = (rq->buffer - (char *) bio_data(rq->bio)) / SECTOR_SIZE;
+
+		rq->buffer = bio_data(rq->bio);
+		rq->nr_sectors += n;
+		rq->sector -= n;
+	}
+	rq->hard_cur_sectors = rq->current_nr_sectors = bio_cur_sectors(rq->bio);
+	rq->hard_nr_sectors = rq->nr_sectors;
+	rq->hard_sector = rq->sector;
+	rq->q->prep_rq_fn(rq->q, rq);
+}
+
+/*
+ * Start a read request from the CD-ROM.
+ */
+static ide_startstop_t cdrom_start_read (ide_drive_t *drive, unsigned int block)
+{
+	struct cdrom_info *info = drive->driver_data;
+	struct request *rq = HWGROUP(drive)->rq;
+	unsigned short sectors_per_frame;
+
+	sectors_per_frame = queue_hardsect_size(drive->queue) >> SECTOR_BITS;
+
+	/* We may be retrying this request after an error.  Fix up
+	   any weirdness which might be present in the request packet. */
+	restore_request(rq);
+
+	/* Satisfy whatever we can of this request from our cached sector. */
+	if (cdrom_read_from_buffer(drive))
+		return ide_stopped;
+
+	blk_attempt_remerge(drive->queue, rq);
+
+	/* Clear the local sector buffer. */
+	info->nsectors_buffered = 0;
+
+	/* use dma, if possible. */
+	info->dma = drive->using_dma;
+	if ((rq->sector & (sectors_per_frame - 1)) ||
+	    (rq->nr_sectors & (sectors_per_frame - 1)))
+		info->dma = 0;
+
+	/* Start sending the read request to the drive. */
+	return cdrom_start_packet_command(drive, 32768, cdrom_start_read_continuation);
+}
+
+/****************************************************************************
+ * Execute all other packet commands.
+ */
+
+/* Interrupt routine for packet command completion. */
+static ide_startstop_t cdrom_pc_intr (ide_drive_t *drive)
+{
+	int ireason, len, thislen;
+	struct request *rq = HWGROUP(drive)->rq;
+	u8 lowcyl = 0, highcyl = 0;
+	int stat;
+
+	/* Check for errors. */
+	if (cdrom_decode_status(drive, 0, &stat))
+		return ide_stopped;
+
+	/* Read the interrupt reason and the transfer length. */
+	ireason = HWIF(drive)->INB(IDE_IREASON_REG);
+	lowcyl  = HWIF(drive)->INB(IDE_BCOUNTL_REG);
+	highcyl = HWIF(drive)->INB(IDE_BCOUNTH_REG);
+
+	len = lowcyl + (256 * highcyl);
+
+	/* If DRQ is clear, the command has completed.
+	   Complain if we still have data left to transfer. */
+	if ((stat & DRQ_STAT) == 0) {
+		/* Some of the trailing request sense fields are optional, and
+		   some drives don't send them.  Sigh. */
+		if (rq->cmd[0] == GPCMD_REQUEST_SENSE &&
+		    rq->data_len > 0 &&
+		    rq->data_len <= 5) {
+			while (rq->data_len > 0) {
+				*(unsigned char *)rq->data++ = 0;
+				--rq->data_len;
+			}
+		}
+
+		if (rq->data_len == 0)
+			cdrom_end_request(drive, 1);
+		else {
+			/* Comment this out, because this always happens 
+			   right after a reset occurs, and it is annoying to 
+			   always print expected stuff.  */
+			/*
+			printk ("%s: cdrom_pc_intr: data underrun %d\n",
+				drive->name, pc->buflen);
+			*/
+			rq->flags |= REQ_FAILED;
+			cdrom_end_request(drive, 0);
+		}
+		return ide_stopped;
+	}
+
+	/* Figure out how much data to transfer. */
+	thislen = rq->data_len;
+	if (thislen > len) thislen = len;
+
+	/* The drive wants to be written to. */
+	if ((ireason & 3) == 0) {
+		if (!rq->data) {
+			blk_dump_rq_flags(rq, "cdrom_pc_intr, write");
+			goto confused;
+		}
+		/* Transfer the data. */
+		HWIF(drive)->atapi_output_bytes(drive, rq->data, thislen);
+
+		/* If we haven't moved enough data to satisfy the drive,
+		   add some padding. */
+		while (len > thislen) {
+			int dum = 0;
+			HWIF(drive)->atapi_output_bytes(drive, &dum, sizeof(dum));
+			len -= sizeof(dum);
+		}
+
+		/* Keep count of how much data we've moved. */
+		rq->data += thislen;
+		rq->data_len -= thislen;
+	}
+
+	/* Same drill for reading. */
+	else if ((ireason & 3) == 2) {
+		if (!rq->data) {
+			blk_dump_rq_flags(rq, "cdrom_pc_intr, write");
+			goto confused;
+		}
+		/* Transfer the data. */
+		HWIF(drive)->atapi_input_bytes(drive, rq->data, thislen);
+
+		/* If we haven't moved enough data to satisfy the drive,
+		   add some padding. */
+		while (len > thislen) {
+			int dum = 0;
+			HWIF(drive)->atapi_input_bytes(drive, &dum, sizeof(dum));
+			len -= sizeof(dum);
+		}
+
+		/* Keep count of how much data we've moved. */
+		rq->data += thislen;
+		rq->data_len -= thislen;
+
+		if (rq->flags & REQ_SENSE)
+			rq->sense_len += thislen;
+	} else {
+confused:
+		printk (KERN_ERR "%s: cdrom_pc_intr: The drive "
+			"appears confused (ireason = 0x%02x)\n",
+			drive->name, ireason);
+		rq->flags |= REQ_FAILED;
+	}
+
+	/* Now we wait for another interrupt. */
+	ide_set_handler(drive, &cdrom_pc_intr, ATAPI_WAIT_PC, cdrom_timer_expiry);
+	return ide_started;
+}
+
+static ide_startstop_t cdrom_do_pc_continuation (ide_drive_t *drive)
+{
+	struct request *rq = HWGROUP(drive)->rq;
+
+	if (!rq->timeout)
+		rq->timeout = ATAPI_WAIT_PC;
+
+	/* Send the command to the drive and return. */
+	return cdrom_transfer_packet_command(drive, rq, &cdrom_pc_intr);
+}
+
+
+static ide_startstop_t cdrom_do_packet_command (ide_drive_t *drive)
+{
+	int len;
+	struct request *rq = HWGROUP(drive)->rq;
+	struct cdrom_info *info = drive->driver_data;
+
+	info->dma = 0;
+	rq->flags &= ~REQ_FAILED;
+	len = rq->data_len;
+
+	/* Start sending the command to the drive. */
+	return cdrom_start_packet_command(drive, len, cdrom_do_pc_continuation);
+}
+
+
+static
+int cdrom_queue_packet_command(ide_drive_t *drive, struct request *rq)
+{
+	struct request_sense sense;
+	int retries = 10;
+	unsigned int flags = rq->flags;
+
+	if (rq->sense == NULL)
+		rq->sense = &sense;
+
+	/* Start of retry loop. */
+	do {
+		int error;
+		unsigned long time = jiffies;
+		rq->flags = flags;
+
+		error = ide_do_drive_cmd(drive, rq, ide_wait);
+		time = jiffies - time;
+
+		/* FIXME: we should probably abort/retry or something 
+		 * in case of failure */
+		if (rq->flags & REQ_FAILED) {
+			/* The request failed.  Retry if it was due to a unit
+			   attention status
+			   (usually means media was changed). */
+			struct request_sense *reqbuf = rq->sense;
+
+			if (reqbuf->sense_key == UNIT_ATTENTION)
+				cdrom_saw_media_change(drive);
+			else if (reqbuf->sense_key == NOT_READY &&
+				 reqbuf->asc == 4 && reqbuf->ascq != 4) {
+				/* The drive is in the process of loading
+				   a disk.  Retry, but wait a little to give
+				   the drive time to complete the load. */
+				ssleep(2);
+			} else {
+				/* Otherwise, don't retry. */
+				retries = 0;
+			}
+			--retries;
+		}
+
+		/* End of retry loop. */
+	} while ((rq->flags & REQ_FAILED) && retries >= 0);
+
+	/* Return an error if the command failed. */
+	return (rq->flags & REQ_FAILED) ? -EIO : 0;
+}
+
+/*
+ * Write handling
+ */
+static inline int cdrom_write_check_ireason(ide_drive_t *drive, int len, int ireason)
+{
+	/* Two notes about IDE interrupt reason here - 0 means that
+	 * the drive wants to receive data from us, 2 means that
+	 * the drive is expecting to transfer data to us.
+	 */
+	if (ireason == 0)
+		return 0;
+	else if (ireason == 2) {
+		/* Whoops... The drive wants to send data. */
+		printk(KERN_ERR "%s: write_intr: wrong transfer direction!\n",
+							drive->name);
+
+		while (len > 0) {
+			int dum = 0;
+			HWIF(drive)->atapi_input_bytes(drive, &dum, sizeof(dum));
+			len -= sizeof(dum);
+		}
+	} else {
+		/* Drive wants a command packet, or invalid ireason... */
+		printk(KERN_ERR "%s: write_intr: bad interrupt reason %x\n",
+							drive->name, ireason);
+	}
+
+	cdrom_end_request(drive, 0);
+	return 1;
+}
+
+static void post_transform_command(struct request *req)
+{
+	u8 *c = req->cmd;
+	char *ibuf;
+
+	if (!blk_pc_request(req))
+		return;
+
+	if (req->bio)
+		ibuf = bio_data(req->bio);
+	else
+		ibuf = req->data;
+
+	if (!ibuf)
+		return;
+
+	/*
+	 * set ansi-revision and response data as atapi
+	 */
+	if (c[0] == GPCMD_INQUIRY) {
+		ibuf[2] |= 2;
+		ibuf[3] = (ibuf[3] & 0xf0) | 2;
+	}
+}
+
+typedef void (xfer_func_t)(ide_drive_t *, void *, u32);
+
+/*
+ * best way to deal with dma that is not sector aligned right now... note
+ * that in this path we are not using ->data or ->buffer at all. this irs
+ * can replace cdrom_pc_intr, cdrom_read_intr, and cdrom_write_intr in the
+ * future.
+ */
+static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
+{
+	struct cdrom_info *info = drive->driver_data;
+	struct request *rq = HWGROUP(drive)->rq;
+	int dma_error, dma, stat, ireason, len, thislen;
+	u8 lowcyl, highcyl;
+	xfer_func_t *xferfunc;
+	unsigned long flags;
+
+	/* Check for errors. */
+	dma_error = 0;
+	dma = info->dma;
+	if (dma) {
+		info->dma = 0;
+		dma_error = HWIF(drive)->ide_dma_end(drive);
+	}
+
+	if (cdrom_decode_status(drive, 0, &stat))
+		return ide_stopped;
+
+	/*
+	 * using dma, transfer is complete now
+	 */
+	if (dma) {
+		if (dma_error) {
+			printk(KERN_ERR "ide-cd: dma error\n");
+			__ide_dma_off(drive);
+			return ide_error(drive, "dma error", stat);
+		}
+
+		end_that_request_chunk(rq, 1, rq->data_len);
+		rq->data_len = 0;
+		goto end_request;
+	}
+
+	/*
+	 * ok we fall to pio :/
+	 */
+	ireason = HWIF(drive)->INB(IDE_IREASON_REG) & 0x3;
+	lowcyl  = HWIF(drive)->INB(IDE_BCOUNTL_REG);
+	highcyl = HWIF(drive)->INB(IDE_BCOUNTH_REG);
+
+	len = lowcyl + (256 * highcyl);
+	thislen = rq->data_len;
+	if (thislen > len)
+		thislen = len;
+
+	/*
+	 * If DRQ is clear, the command has completed.
+	 */
+	if ((stat & DRQ_STAT) == 0)
+		goto end_request;
+
+	/*
+	 * check which way to transfer data
+	 */
+	if (rq_data_dir(rq) == WRITE) {
+		/*
+		 * write to drive
+		 */
+		if (cdrom_write_check_ireason(drive, len, ireason))
+			return ide_stopped;
+
+		xferfunc = HWIF(drive)->atapi_output_bytes;
+	} else  {
+		/*
+		 * read from drive
+		 */
+		if (cdrom_read_check_ireason(drive, len, ireason))
+			return ide_stopped;
+
+		xferfunc = HWIF(drive)->atapi_input_bytes;
+	}
+
+	/*
+	 * transfer data
+	 */
+	while (thislen > 0) {
+		int blen = blen = rq->data_len;
+		char *ptr = rq->data;
+
+		/*
+		 * bio backed?
+		 */
+		if (rq->bio) {
+			ptr = bio_data(rq->bio);
+			blen = bio_iovec(rq->bio)->bv_len;
+		}
+
+		if (!ptr) {
+			printk(KERN_ERR "%s: confused, missing data\n", drive->name);
+			break;
+		}
+
+		if (blen > thislen)
+			blen = thislen;
+
+		xferfunc(drive, ptr, blen);
+
+		thislen -= blen;
+		len -= blen;
+		rq->data_len -= blen;
+
+		if (rq->bio)
+			end_that_request_chunk(rq, 1, blen);
+		else
+			rq->data += blen;
+	}
+
+	/*
+	 * pad, if necessary
+	 */
+	if (len > 0) {
+		while (len > 0) {
+			int pad = 0;
+
+			xferfunc(drive, &pad, sizeof(pad));
+			len -= sizeof(pad);
+		}
+	}
+
+	if (HWGROUP(drive)->handler != NULL)
+		BUG();
+
+	ide_set_handler(drive, cdrom_newpc_intr, rq->timeout, NULL);
+	return ide_started;
+
+end_request:
+	if (!rq->data_len)
+		post_transform_command(rq);
+
+	spin_lock_irqsave(&ide_lock, flags);
+	blkdev_dequeue_request(rq);
+	end_that_request_last(rq);
+	HWGROUP(drive)->rq = NULL;
+	spin_unlock_irqrestore(&ide_lock, flags);
+	return ide_stopped;
+}
+
+static ide_startstop_t cdrom_write_intr(ide_drive_t *drive)
+{
+	int stat, ireason, len, sectors_to_transfer, uptodate;
+	struct cdrom_info *info = drive->driver_data;
+	int dma_error = 0, dma = info->dma;
+	u8 lowcyl = 0, highcyl = 0;
+
+	struct request *rq = HWGROUP(drive)->rq;
+
+	/* Check for errors. */
+	if (dma) {
+		info->dma = 0;
+		if ((dma_error = HWIF(drive)->ide_dma_end(drive))) {
+			printk(KERN_ERR "ide-cd: write dma error\n");
+			__ide_dma_off(drive);
+		}
+	}
+
+	if (cdrom_decode_status(drive, 0, &stat))
+		return ide_stopped;
+
+	/*
+	 * using dma, transfer is complete now
+	 */
+	if (dma) {
+		if (dma_error)
+			return ide_error(drive, "dma error", stat);
+
+		ide_end_request(drive, 1, rq->nr_sectors);
+		return ide_stopped;
+	}
+
+	/* Read the interrupt reason and the transfer length. */
+	ireason = HWIF(drive)->INB(IDE_IREASON_REG);
+	lowcyl  = HWIF(drive)->INB(IDE_BCOUNTL_REG);
+	highcyl = HWIF(drive)->INB(IDE_BCOUNTH_REG);
+
+	len = lowcyl + (256 * highcyl);
+
+	/* If DRQ is clear, the command has completed. */
+	if ((stat & DRQ_STAT) == 0) {
+		/* If we're not done writing, complain.
+		 * Otherwise, complete the command normally.
+		 */
+		uptodate = 1;
+		if (rq->current_nr_sectors > 0) {
+			printk(KERN_ERR "%s: write_intr: data underrun (%d blocks)\n",
+			drive->name, rq->current_nr_sectors);
+			uptodate = 0;
+		}
+		cdrom_end_request(drive, uptodate);
+		return ide_stopped;
+	}
+
+	/* Check that the drive is expecting to do the same thing we are. */
+	if (cdrom_write_check_ireason(drive, len, ireason))
+		return ide_stopped;
+
+	sectors_to_transfer = len / SECTOR_SIZE;
+
+	/*
+	 * now loop and write out the data
+	 */
+	while (sectors_to_transfer > 0) {
+		int this_transfer;
+
+		if (!rq->current_nr_sectors) {
+			printk(KERN_ERR "ide-cd: write_intr: oops\n");
+			break;
+		}
+
+		/*
+		 * Figure out how many sectors we can transfer
+		 */
+		this_transfer = min_t(int, sectors_to_transfer, rq->current_nr_sectors);
+
+		while (this_transfer > 0) {
+			HWIF(drive)->atapi_output_bytes(drive, rq->buffer, SECTOR_SIZE);
+			rq->buffer += SECTOR_SIZE;
+			--rq->nr_sectors;
+			--rq->current_nr_sectors;
+			++rq->sector;
+			--this_transfer;
+			--sectors_to_transfer;
+		}
+
+		/*
+		 * current buffer complete, move on
+		 */
+		if (rq->current_nr_sectors == 0 && rq->nr_sectors)
+			cdrom_end_request(drive, 1);
+	}
+
+	/* re-arm handler */
+	ide_set_handler(drive, &cdrom_write_intr, ATAPI_WAIT_PC, NULL);
+	return ide_started;
+}
+
+static ide_startstop_t cdrom_start_write_cont(ide_drive_t *drive)
+{
+	struct request *rq = HWGROUP(drive)->rq;
+
+#if 0	/* the immediate bit */
+	rq->cmd[1] = 1 << 3;
+#endif
+	rq->timeout = ATAPI_WAIT_PC;
+
+	return cdrom_transfer_packet_command(drive, rq, cdrom_write_intr);
+}
+
+static ide_startstop_t cdrom_start_write(ide_drive_t *drive, struct request *rq)
+{
+	struct cdrom_info *info = drive->driver_data;
+	struct gendisk *g = info->disk;
+	unsigned short sectors_per_frame = queue_hardsect_size(drive->queue) >> SECTOR_BITS;
+
+	/*
+	 * writes *must* be hardware frame aligned
+	 */
+	if ((rq->nr_sectors & (sectors_per_frame - 1)) ||
+	    (rq->sector & (sectors_per_frame - 1))) {
+		cdrom_end_request(drive, 0);
+		return ide_stopped;
+	}
+
+	/*
+	 * disk has become write protected
+	 */
+	if (g->policy) {
+		cdrom_end_request(drive, 0);
+		return ide_stopped;
+	}
+
+	/*
+	 * for dvd-ram and such media, it's a really big deal to get
+	 * big writes all the time. so scour the queue and attempt to
+	 * remerge requests, often the plugging will not have had time
+	 * to do this properly
+	 */
+	blk_attempt_remerge(drive->queue, rq);
+
+	info->nsectors_buffered = 0;
+
+	/* use dma, if possible. we don't need to check more, since we
+	 * know that the transfer is always (at least!) frame aligned */
+	info->dma = drive->using_dma ? 1 : 0;
+
+	info->devinfo.media_written = 1;
+
+	/* Start sending the write request to the drive. */
+	return cdrom_start_packet_command(drive, 32768, cdrom_start_write_cont);
+}
+
+static ide_startstop_t cdrom_do_newpc_cont(ide_drive_t *drive)
+{
+	struct request *rq = HWGROUP(drive)->rq;
+
+	if (!rq->timeout)
+		rq->timeout = ATAPI_WAIT_PC;
+
+	return cdrom_transfer_packet_command(drive, rq, cdrom_newpc_intr);
+}
+
+static ide_startstop_t cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
+{
+	struct cdrom_info *info = drive->driver_data;
+
+	rq->flags |= REQ_QUIET;
+
+	info->dma = 0;
+
+	/*
+	 * sg request
+	 */
+	if (rq->bio) {
+		int mask = drive->queue->dma_alignment;
+		unsigned long addr = (unsigned long) page_address(bio_page(rq->bio));
+
+		info->dma = drive->using_dma;
+
+		/*
+		 * check if dma is safe
+		 *
+		 * NOTE! The "len" and "addr" checks should possibly have
+		 * separate masks.
+		 */
+		if ((rq->data_len & 15) || (addr & mask))
+			info->dma = 0;
+	}
+
+	/* Start sending the command to the drive. */
+	return cdrom_start_packet_command(drive, rq->data_len, cdrom_do_newpc_cont);
+}
+
+/****************************************************************************
+ * cdrom driver request routine.
+ */
+static ide_startstop_t
+ide_do_rw_cdrom (ide_drive_t *drive, struct request *rq, sector_t block)
+{
+	ide_startstop_t action;
+	struct cdrom_info *info = drive->driver_data;
+
+	if (blk_fs_request(rq)) {
+		if (CDROM_CONFIG_FLAGS(drive)->seeking) {
+			unsigned long elapsed = jiffies - info->start_seek;
+			int stat = HWIF(drive)->INB(IDE_STATUS_REG);
+
+			if ((stat & SEEK_STAT) != SEEK_STAT) {
+				if (elapsed < IDECD_SEEK_TIMEOUT) {
+					ide_stall_queue(drive, IDECD_SEEK_TIMER);
+					return ide_stopped;
+				}
+				printk (KERN_ERR "%s: DSC timeout\n", drive->name);
+			}
+			CDROM_CONFIG_FLAGS(drive)->seeking = 0;
+		}
+		if ((rq_data_dir(rq) == READ) && IDE_LARGE_SEEK(info->last_block, block, IDECD_SEEK_THRESHOLD) && drive->dsc_overlap) {
+			action = cdrom_start_seek(drive, block);
+		} else {
+			if (rq_data_dir(rq) == READ)
+				action = cdrom_start_read(drive, block);
+			else
+				action = cdrom_start_write(drive, rq);
+		}
+		info->last_block = block;
+		return action;
+	} else if (rq->flags & (REQ_PC | REQ_SENSE)) {
+		return cdrom_do_packet_command(drive);
+	} else if (rq->flags & REQ_BLOCK_PC) {
+		return cdrom_do_block_pc(drive, rq);
+	} else if (rq->flags & REQ_SPECIAL) {
+		/*
+		 * right now this can only be a reset...
+		 */
+		cdrom_end_request(drive, 1);
+		return ide_stopped;
+	}
+
+	blk_dump_rq_flags(rq, "ide-cd bad flags");
+	cdrom_end_request(drive, 0);
+	return ide_stopped;
+}
+
+
+
+/****************************************************************************
+ * Ioctl handling.
+ *
+ * Routines which queue packet commands take as a final argument a pointer
+ * to a request_sense struct.  If execution of the command results
+ * in an error with a CHECK CONDITION status, this structure will be filled
+ * with the results of the subsequent request sense command.  The pointer
+ * can also be NULL, in which case no sense information is returned.
+ */
+
+#if ! STANDARD_ATAPI
+static inline
+int bin2bcd (int x)
+{
+	return (x%10) | ((x/10) << 4);
+}
+
+
+static inline
+int bcd2bin (int x)
+{
+	return (x >> 4) * 10 + (x & 0x0f);
+}
+
+static
+void msf_from_bcd (struct atapi_msf *msf)
+{
+	msf->minute = bcd2bin (msf->minute);
+	msf->second = bcd2bin (msf->second);
+	msf->frame  = bcd2bin (msf->frame);
+}
+
+#endif /* not STANDARD_ATAPI */
+
+
+static inline
+void lba_to_msf (int lba, byte *m, byte *s, byte *f)
+{
+	lba += CD_MSF_OFFSET;
+	lba &= 0xffffff;  /* negative lbas use only 24 bits */
+	*m = lba / (CD_SECS * CD_FRAMES);
+	lba %= (CD_SECS * CD_FRAMES);
+	*s = lba / CD_FRAMES;
+	*f = lba % CD_FRAMES;
+}
+
+
+static inline
+int msf_to_lba (byte m, byte s, byte f)
+{
+	return (((m * CD_SECS) + s) * CD_FRAMES + f) - CD_MSF_OFFSET;
+}
+
+static int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense)
+{
+	struct request req;
+	struct cdrom_info *info = drive->driver_data;
+	struct cdrom_device_info *cdi = &info->devinfo;
+
+	cdrom_prepare_request(drive, &req);
+
+	req.sense = sense;
+	req.cmd[0] = GPCMD_TEST_UNIT_READY;
+	req.flags |= REQ_QUIET;
+
+#if ! STANDARD_ATAPI
+        /* the Sanyo 3 CD changer uses byte 7 of TEST_UNIT_READY to 
+           switch CDs instead of supporting the LOAD_UNLOAD opcode   */
+
+	req.cmd[7] = cdi->sanyo_slot % 3;
+#endif /* not STANDARD_ATAPI */
+
+	return cdrom_queue_packet_command(drive, &req);
+}
+
+
+/* Lock the door if LOCKFLAG is nonzero; unlock it otherwise. */
+static int
+cdrom_lockdoor(ide_drive_t *drive, int lockflag, struct request_sense *sense)
+{
+	struct request_sense my_sense;
+	struct request req;
+	int stat;
+
+	if (sense == NULL)
+		sense = &my_sense;
+
+	/* If the drive cannot lock the door, just pretend. */
+	if (CDROM_CONFIG_FLAGS(drive)->no_doorlock) {
+		stat = 0;
+	} else {
+		cdrom_prepare_request(drive, &req);
+		req.sense = sense;
+		req.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
+		req.cmd[4] = lockflag ? 1 : 0;
+		stat = cdrom_queue_packet_command(drive, &req);
+	}
+
+	/* If we got an illegal field error, the drive
+	   probably cannot lock the door. */
+	if (stat != 0 &&
+	    sense->sense_key == ILLEGAL_REQUEST &&
+	    (sense->asc == 0x24 || sense->asc == 0x20)) {
+		printk (KERN_ERR "%s: door locking not supported\n",
+			drive->name);
+		CDROM_CONFIG_FLAGS(drive)->no_doorlock = 1;
+		stat = 0;
+	}
+	
+	/* no medium, that's alright. */
+	if (stat != 0 && sense->sense_key == NOT_READY && sense->asc == 0x3a)
+		stat = 0;
+
+	if (stat == 0)
+		CDROM_STATE_FLAGS(drive)->door_locked = lockflag;
+
+	return stat;
+}
+
+
+/* Eject the disk if EJECTFLAG is 0.
+   If EJECTFLAG is 1, try to reload the disk. */
+static int cdrom_eject(ide_drive_t *drive, int ejectflag,
+		       struct request_sense *sense)
+{
+	struct request req;
+	char loej = 0x02;
+
+	if (CDROM_CONFIG_FLAGS(drive)->no_eject && !ejectflag)
+		return -EDRIVE_CANT_DO_THIS;
+	
+	/* reload fails on some drives, if the tray is locked */
+	if (CDROM_STATE_FLAGS(drive)->door_locked && ejectflag)
+		return 0;
+
+	cdrom_prepare_request(drive, &req);
+
+	/* only tell drive to close tray if open, if it can do that */
+	if (ejectflag && !CDROM_CONFIG_FLAGS(drive)->close_tray)
+		loej = 0;
+
+	req.sense = sense;
+	req.cmd[0] = GPCMD_START_STOP_UNIT;
+	req.cmd[4] = loej | (ejectflag != 0);
+	return cdrom_queue_packet_command(drive, &req);
+}
+
+static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity,
+			       unsigned long *sectors_per_frame,
+			       struct request_sense *sense)
+{
+	struct {
+		__u32 lba;
+		__u32 blocklen;
+	} capbuf;
+
+	int stat;
+	struct request req;
+
+	cdrom_prepare_request(drive, &req);
+
+	req.sense = sense;
+	req.cmd[0] = GPCMD_READ_CDVD_CAPACITY;
+	req.data = (char *)&capbuf;
+	req.data_len = sizeof(capbuf);
+
+	stat = cdrom_queue_packet_command(drive, &req);
+	if (stat == 0) {
+		*capacity = 1 + be32_to_cpu(capbuf.lba);
+		*sectors_per_frame =
+			be32_to_cpu(capbuf.blocklen) >> SECTOR_BITS;
+	}
+
+	return stat;
+}
+
+static int cdrom_read_tocentry(ide_drive_t *drive, int trackno, int msf_flag,
+				int format, char *buf, int buflen,
+				struct request_sense *sense)
+{
+	struct request req;
+
+	cdrom_prepare_request(drive, &req);
+
+	req.sense = sense;
+	req.data =  buf;
+	req.data_len = buflen;
+	req.flags |= REQ_QUIET;
+	req.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
+	req.cmd[6] = trackno;
+	req.cmd[7] = (buflen >> 8);
+	req.cmd[8] = (buflen & 0xff);
+	req.cmd[9] = (format << 6);
+
+	if (msf_flag)
+		req.cmd[1] = 2;
+
+	return cdrom_queue_packet_command(drive, &req);
+}
+
+
+/* Try to read the entire TOC for the disk into our internal buffer. */
+static int cdrom_read_toc(ide_drive_t *drive, struct request_sense *sense)
+{
+	int stat, ntracks, i;
+	struct cdrom_info *info = drive->driver_data;
+	struct cdrom_device_info *cdi = &info->devinfo;
+	struct atapi_toc *toc = info->toc;
+	struct {
+		struct atapi_toc_header hdr;
+		struct atapi_toc_entry  ent;
+	} ms_tmp;
+	long last_written;
+	unsigned long sectors_per_frame = SECTORS_PER_FRAME;
+
+	if (toc == NULL) {
+		/* Try to allocate space. */
+		toc = kmalloc(sizeof(struct atapi_toc), GFP_KERNEL);
+		if (toc == NULL) {
+			printk (KERN_ERR "%s: No cdrom TOC buffer!\n", drive->name);
+			return -ENOMEM;
+		}
+		info->toc = toc;
+	}
+
+	/* Check to see if the existing data is still valid.
+	   If it is, just return. */
+	(void) cdrom_check_status(drive, sense);
+
+	if (CDROM_STATE_FLAGS(drive)->toc_valid)
+		return 0;
+
+	/* Try to get the total cdrom capacity and sector size. */
+	stat = cdrom_read_capacity(drive, &toc->capacity, &sectors_per_frame,
+				   sense);
+	if (stat)
+		toc->capacity = 0x1fffff;
+
+	set_capacity(info->disk, toc->capacity * sectors_per_frame);
+	blk_queue_hardsect_size(drive->queue,
+				sectors_per_frame << SECTOR_BITS);
+
+	/* First read just the header, so we know how long the TOC is. */
+	stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr,
+				    sizeof(struct atapi_toc_header), sense);
+	if (stat)
+		return stat;
+
+#if ! STANDARD_ATAPI
+	if (CDROM_CONFIG_FLAGS(drive)->toctracks_as_bcd) {
+		toc->hdr.first_track = bcd2bin(toc->hdr.first_track);
+		toc->hdr.last_track  = bcd2bin(toc->hdr.last_track);
+	}
+#endif  /* not STANDARD_ATAPI */
+
+	ntracks = toc->hdr.last_track - toc->hdr.first_track + 1;
+	if (ntracks <= 0)
+		return -EIO;
+	if (ntracks > MAX_TRACKS)
+		ntracks = MAX_TRACKS;
+
+	/* Now read the whole schmeer. */
+	stat = cdrom_read_tocentry(drive, toc->hdr.first_track, 1, 0,
+				  (char *)&toc->hdr,
+				   sizeof(struct atapi_toc_header) +
+				   (ntracks + 1) *
+				   sizeof(struct atapi_toc_entry), sense);
+
+	if (stat && toc->hdr.first_track > 1) {
+		/* Cds with CDI tracks only don't have any TOC entries,
+		   despite of this the returned values are
+		   first_track == last_track = number of CDI tracks + 1,
+		   so that this case is indistinguishable from the same
+		   layout plus an additional audio track.
+		   If we get an error for the regular case, we assume
+		   a CDI without additional audio tracks. In this case
+		   the readable TOC is empty (CDI tracks are not included)
+		   and only holds the Leadout entry. Heiko Eißfeldt */
+		ntracks = 0;
+		stat = cdrom_read_tocentry(drive, CDROM_LEADOUT, 1, 0,
+					   (char *)&toc->hdr,
+					   sizeof(struct atapi_toc_header) +
+					   (ntracks + 1) *
+					   sizeof(struct atapi_toc_entry),
+					   sense);
+		if (stat) {
+			return stat;
+		}
+#if ! STANDARD_ATAPI
+		if (CDROM_CONFIG_FLAGS(drive)->toctracks_as_bcd) {
+			toc->hdr.first_track = bin2bcd(CDROM_LEADOUT);
+			toc->hdr.last_track = bin2bcd(CDROM_LEADOUT);
+		} else
+#endif  /* not STANDARD_ATAPI */
+		{
+			toc->hdr.first_track = CDROM_LEADOUT;
+			toc->hdr.last_track = CDROM_LEADOUT;
+		}
+	}
+
+	if (stat)
+		return stat;
+
+	toc->hdr.toc_length = ntohs (toc->hdr.toc_length);
+
+#if ! STANDARD_ATAPI
+	if (CDROM_CONFIG_FLAGS(drive)->toctracks_as_bcd) {
+		toc->hdr.first_track = bcd2bin(toc->hdr.first_track);
+		toc->hdr.last_track  = bcd2bin(toc->hdr.last_track);
+	}
+#endif  /* not STANDARD_ATAPI */
+
+	for (i=0; i<=ntracks; i++) {
+#if ! STANDARD_ATAPI
+		if (CDROM_CONFIG_FLAGS(drive)->tocaddr_as_bcd) {
+			if (CDROM_CONFIG_FLAGS(drive)->toctracks_as_bcd)
+				toc->ent[i].track = bcd2bin(toc->ent[i].track);
+			msf_from_bcd(&toc->ent[i].addr.msf);
+		}
+#endif  /* not STANDARD_ATAPI */
+		toc->ent[i].addr.lba = msf_to_lba (toc->ent[i].addr.msf.minute,
+						   toc->ent[i].addr.msf.second,
+						   toc->ent[i].addr.msf.frame);
+	}
+
+	/* Read the multisession information. */
+	if (toc->hdr.first_track != CDROM_LEADOUT) {
+		/* Read the multisession information. */
+		stat = cdrom_read_tocentry(drive, 0, 0, 1, (char *)&ms_tmp,
+					   sizeof(ms_tmp), sense);
+		if (stat)
+			return stat;
+
+		toc->last_session_lba = be32_to_cpu(ms_tmp.ent.addr.lba);
+	} else {
+		ms_tmp.hdr.first_track = ms_tmp.hdr.last_track = CDROM_LEADOUT;
+		toc->last_session_lba = msf_to_lba(0, 2, 0); /* 0m 2s 0f */
+	}
+
+#if ! STANDARD_ATAPI
+	if (CDROM_CONFIG_FLAGS(drive)->tocaddr_as_bcd) {
+		/* Re-read multisession information using MSF format */
+		stat = cdrom_read_tocentry(drive, 0, 1, 1, (char *)&ms_tmp,
+					   sizeof(ms_tmp), sense);
+		if (stat)
+			return stat;
+
+		msf_from_bcd (&ms_tmp.ent.addr.msf);
+		toc->last_session_lba = msf_to_lba(ms_tmp.ent.addr.msf.minute,
+					  	   ms_tmp.ent.addr.msf.second,
+						   ms_tmp.ent.addr.msf.frame);
+	}
+#endif  /* not STANDARD_ATAPI */
+
+	toc->xa_flag = (ms_tmp.hdr.first_track != ms_tmp.hdr.last_track);
+
+	/* Now try to get the total cdrom capacity. */
+	stat = cdrom_get_last_written(cdi, &last_written);
+	if (!stat && (last_written > toc->capacity)) {
+		toc->capacity = last_written;
+		set_capacity(info->disk, toc->capacity * sectors_per_frame);
+	}
+
+	/* Remember that we've read this stuff. */
+	CDROM_STATE_FLAGS(drive)->toc_valid = 1;
+
+	return 0;
+}
+
+
+static int cdrom_read_subchannel(ide_drive_t *drive, int format, char *buf,
+				 int buflen, struct request_sense *sense)
+{
+	struct request req;
+
+	cdrom_prepare_request(drive, &req);
+
+	req.sense = sense;
+	req.data = buf;
+	req.data_len = buflen;
+	req.cmd[0] = GPCMD_READ_SUBCHANNEL;
+	req.cmd[1] = 2;     /* MSF addressing */
+	req.cmd[2] = 0x40;  /* request subQ data */
+	req.cmd[3] = format;
+	req.cmd[7] = (buflen >> 8);
+	req.cmd[8] = (buflen & 0xff);
+	return cdrom_queue_packet_command(drive, &req);
+}
+
+/* ATAPI cdrom drives are free to select the speed you request or any slower
+   rate :-( Requesting too fast a speed will _not_ produce an error. */
+static int cdrom_select_speed(ide_drive_t *drive, int speed,
+			      struct request_sense *sense)
+{
+	struct request req;
+	cdrom_prepare_request(drive, &req);
+
+	req.sense = sense;
+	if (speed == 0)
+		speed = 0xffff; /* set to max */
+	else
+		speed *= 177;   /* Nx to kbytes/s */
+
+	req.cmd[0] = GPCMD_SET_SPEED;
+	/* Read Drive speed in kbytes/second MSB */
+	req.cmd[2] = (speed >> 8) & 0xff;	
+	/* Read Drive speed in kbytes/second LSB */
+	req.cmd[3] = speed & 0xff;
+	if (CDROM_CONFIG_FLAGS(drive)->cd_r ||
+	    CDROM_CONFIG_FLAGS(drive)->cd_rw ||
+	    CDROM_CONFIG_FLAGS(drive)->dvd_r) {
+		/* Write Drive speed in kbytes/second MSB */
+		req.cmd[4] = (speed >> 8) & 0xff;
+		/* Write Drive speed in kbytes/second LSB */
+		req.cmd[5] = speed & 0xff;
+       }
+
+	return cdrom_queue_packet_command(drive, &req);
+}
+
+static int cdrom_play_audio(ide_drive_t *drive, int lba_start, int lba_end)
+{
+	struct request_sense sense;
+	struct request req;
+
+	cdrom_prepare_request(drive, &req);
+
+	req.sense = &sense;
+	req.cmd[0] = GPCMD_PLAY_AUDIO_MSF;
+	lba_to_msf(lba_start, &req.cmd[3], &req.cmd[4], &req.cmd[5]);
+	lba_to_msf(lba_end-1, &req.cmd[6], &req.cmd[7], &req.cmd[8]);
+
+	return cdrom_queue_packet_command(drive, &req);
+}
+
+static int cdrom_get_toc_entry(ide_drive_t *drive, int track,
+				struct atapi_toc_entry **ent)
+{
+	struct cdrom_info *info = drive->driver_data;
+	struct atapi_toc *toc = info->toc;
+	int ntracks;
+
+	/*
+	 * don't serve cached data, if the toc isn't valid
+	 */
+	if (!CDROM_STATE_FLAGS(drive)->toc_valid)
+		return -EINVAL;
+
+	/* Check validity of requested track number. */
+	ntracks = toc->hdr.last_track - toc->hdr.first_track + 1;
+	if (toc->hdr.first_track == CDROM_LEADOUT) ntracks = 0;
+	if (track == CDROM_LEADOUT)
+		*ent = &toc->ent[ntracks];
+	else if (track < toc->hdr.first_track ||
+		 track > toc->hdr.last_track)
+		return -EINVAL;
+	else
+		*ent = &toc->ent[track - toc->hdr.first_track];
+
+	return 0;
+}
+
+/* the generic packet interface to cdrom.c */
+static int ide_cdrom_packet(struct cdrom_device_info *cdi,
+			    struct packet_command *cgc)
+{
+	struct request req;
+	ide_drive_t *drive = cdi->handle;
+
+	if (cgc->timeout <= 0)
+		cgc->timeout = ATAPI_WAIT_PC;
+
+	/* here we queue the commands from the uniform CD-ROM
+	   layer. the packet must be complete, as we do not
+	   touch it at all. */
+	cdrom_prepare_request(drive, &req);
+	memcpy(req.cmd, cgc->cmd, CDROM_PACKET_SIZE);
+	if (cgc->sense)
+		memset(cgc->sense, 0, sizeof(struct request_sense));
+	req.data = cgc->buffer;
+	req.data_len = cgc->buflen;
+	req.timeout = cgc->timeout;
+
+	if (cgc->quiet)
+		req.flags |= REQ_QUIET;
+
+	req.sense = cgc->sense;
+	cgc->stat = cdrom_queue_packet_command(drive, &req);
+	if (!cgc->stat)
+		cgc->buflen -= req.data_len;
+	return cgc->stat;
+}
+
+static
+int ide_cdrom_dev_ioctl (struct cdrom_device_info *cdi,
+			 unsigned int cmd, unsigned long arg)
+{
+	struct packet_command cgc;
+	char buffer[16];
+	int stat;
+
+	init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_UNKNOWN);
+
+	/* These will be moved into the Uniform layer shortly... */
+	switch (cmd) {
+ 	case CDROMSETSPINDOWN: {
+ 		char spindown;
+ 
+ 		if (copy_from_user(&spindown, (void __user *) arg, sizeof(char)))
+			return -EFAULT;
+ 
+                if ((stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CDROM_PAGE, 0)))
+			return stat;
+
+ 		buffer[11] = (buffer[11] & 0xf0) | (spindown & 0x0f);
+
+ 		return cdrom_mode_select(cdi, &cgc);
+ 	} 
+ 
+ 	case CDROMGETSPINDOWN: {
+ 		char spindown;
+ 
+                if ((stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CDROM_PAGE, 0)))
+			return stat;
+ 
+ 		spindown = buffer[11] & 0x0f;
+ 
+		if (copy_to_user((void __user *) arg, &spindown, sizeof (char)))
+			return -EFAULT;
+ 
+ 		return 0;
+ 	}
+  
+	default:
+		return -EINVAL;
+	}
+
+}
+
+static
+int ide_cdrom_audio_ioctl (struct cdrom_device_info *cdi,
+			   unsigned int cmd, void *arg)
+			   
+{
+	ide_drive_t *drive = cdi->handle;
+	struct cdrom_info *info = drive->driver_data;
+	int stat;
+
+	switch (cmd) {
+	/*
+	 * emulate PLAY_AUDIO_TI command with PLAY_AUDIO_10, since
+	 * atapi doesn't support it
+	 */
+	case CDROMPLAYTRKIND: {
+		unsigned long lba_start, lba_end;
+		struct cdrom_ti *ti = arg;
+		struct atapi_toc_entry *first_toc, *last_toc;
+
+		stat = cdrom_get_toc_entry(drive, ti->cdti_trk0, &first_toc);
+		if (stat)
+			return stat;
+
+		stat = cdrom_get_toc_entry(drive, ti->cdti_trk1, &last_toc);
+		if (stat)
+			return stat;
+
+		if (ti->cdti_trk1 != CDROM_LEADOUT)
+			++last_toc;
+		lba_start = first_toc->addr.lba;
+		lba_end   = last_toc->addr.lba;
+
+		if (lba_end <= lba_start)
+			return -EINVAL;
+
+		return cdrom_play_audio(drive, lba_start, lba_end);
+	}
+
+	case CDROMREADTOCHDR: {
+		struct cdrom_tochdr *tochdr = arg;
+		struct atapi_toc *toc;
+
+		/* Make sure our saved TOC is valid. */
+		stat = cdrom_read_toc(drive, NULL);
+		if (stat)
+			return stat;
+
+		toc = info->toc;
+		tochdr->cdth_trk0 = toc->hdr.first_track;
+		tochdr->cdth_trk1 = toc->hdr.last_track;
+
+		return 0;
+	}
+
+	case CDROMREADTOCENTRY: {
+		struct cdrom_tocentry *tocentry = arg;
+		struct atapi_toc_entry *toce;
+
+		stat = cdrom_get_toc_entry(drive, tocentry->cdte_track, &toce);
+		if (stat)
+			return stat;
+
+		tocentry->cdte_ctrl = toce->control;
+		tocentry->cdte_adr  = toce->adr;
+		if (tocentry->cdte_format == CDROM_MSF) {
+			lba_to_msf (toce->addr.lba,
+				   &tocentry->cdte_addr.msf.minute,
+				   &tocentry->cdte_addr.msf.second,
+				   &tocentry->cdte_addr.msf.frame);
+		} else
+			tocentry->cdte_addr.lba = toce->addr.lba;
+
+		return 0;
+	}
+
+	default:
+		return -EINVAL;
+	}
+}
+
+static
+int ide_cdrom_reset (struct cdrom_device_info *cdi)
+{
+	ide_drive_t *drive = cdi->handle;
+	struct request_sense sense;
+	struct request req;
+	int ret;
+
+	cdrom_prepare_request(drive, &req);
+	req.flags = REQ_SPECIAL | REQ_QUIET;
+	ret = ide_do_drive_cmd(drive, &req, ide_wait);
+
+	/*
+	 * A reset will unlock the door. If it was previously locked,
+	 * lock it again.
+	 */
+	if (CDROM_STATE_FLAGS(drive)->door_locked)
+		(void) cdrom_lockdoor(drive, 1, &sense);
+
+	return ret;
+}
+
+
+static
+int ide_cdrom_tray_move (struct cdrom_device_info *cdi, int position)
+{
+	ide_drive_t *drive = cdi->handle;
+	struct request_sense sense;
+
+	if (position) {
+		int stat = cdrom_lockdoor(drive, 0, &sense);
+		if (stat)
+			return stat;
+	}
+
+	return cdrom_eject(drive, !position, &sense);
+}
+
+static
+int ide_cdrom_lock_door (struct cdrom_device_info *cdi, int lock)
+{
+	ide_drive_t *drive = cdi->handle;
+	return cdrom_lockdoor(drive, lock, NULL);
+}
+
+static
+int ide_cdrom_get_capabilities(ide_drive_t *drive, struct atapi_capabilities_page *cap)
+{
+	struct cdrom_info *info = drive->driver_data;
+	struct cdrom_device_info *cdi = &info->devinfo;
+	struct packet_command cgc;
+	int stat, attempts = 3, size = sizeof(*cap);
+
+	/*
+	 * ACER50 (and others?) require the full spec length mode sense
+	 * page capabilities size, but older drives break.
+	 */
+	if (!(!strcmp(drive->id->model, "ATAPI CD ROM DRIVE 50X MAX") ||
+	    !strcmp(drive->id->model, "WPI CDS-32X")))
+		size -= sizeof(cap->pad);
+
+	init_cdrom_command(&cgc, cap, size, CGC_DATA_UNKNOWN);
+	do { /* we seem to get stat=0x01,err=0x00 the first time (??) */
+		stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
+		if (!stat)
+			break;
+	} while (--attempts);
+	return stat;
+}
+
+static
+void ide_cdrom_update_speed (ide_drive_t *drive, struct atapi_capabilities_page *cap)
+{
+	/* The ACER/AOpen 24X cdrom has the speed fields byte-swapped */
+	if (!drive->id->model[0] &&
+	    !strncmp(drive->id->fw_rev, "241N", 4)) {
+		CDROM_STATE_FLAGS(drive)->current_speed  =
+			(((unsigned int)cap->curspeed) + (176/2)) / 176;
+		CDROM_CONFIG_FLAGS(drive)->max_speed =
+			(((unsigned int)cap->maxspeed) + (176/2)) / 176;
+	} else {
+		CDROM_STATE_FLAGS(drive)->current_speed  =
+			(ntohs(cap->curspeed) + (176/2)) / 176;
+		CDROM_CONFIG_FLAGS(drive)->max_speed =
+			(ntohs(cap->maxspeed) + (176/2)) / 176;
+	}
+}
+
+static
+int ide_cdrom_select_speed (struct cdrom_device_info *cdi, int speed)
+{
+	ide_drive_t *drive = cdi->handle;
+	struct request_sense sense;
+	struct atapi_capabilities_page cap;
+	int stat;
+
+	if ((stat = cdrom_select_speed(drive, speed, &sense)) < 0)
+		return stat;
+
+	if (!ide_cdrom_get_capabilities(drive, &cap)) {
+		ide_cdrom_update_speed(drive, &cap);
+		cdi->speed = CDROM_STATE_FLAGS(drive)->current_speed;
+	}
+        return 0;
+}
+
+/*
+ * add logic to try GET_EVENT command first to check for media and tray
+ * status. this should be supported by newer cd-r/w and all DVD etc
+ * drives
+ */
+static
+int ide_cdrom_drive_status (struct cdrom_device_info *cdi, int slot_nr)
+{
+	ide_drive_t *drive = cdi->handle;
+	struct media_event_desc med;
+	struct request_sense sense;
+	int stat;
+
+	if (slot_nr != CDSL_CURRENT)
+		return -EINVAL;
+
+	stat = cdrom_check_status(drive, &sense);
+	if (!stat || sense.sense_key == UNIT_ATTENTION)
+		return CDS_DISC_OK;
+
+	if (!cdrom_get_media_event(cdi, &med)) {
+		if (med.media_present)
+			return CDS_DISC_OK;
+		else if (med.door_open)
+			return CDS_TRAY_OPEN;
+		else
+			return CDS_NO_DISC;
+	}
+
+	if (sense.sense_key == NOT_READY && sense.asc == 0x04 && sense.ascq == 0x04)
+		return CDS_DISC_OK;
+
+	/*
+	 * If not using Mt Fuji extended media tray reports,
+	 * just return TRAY_OPEN since ATAPI doesn't provide
+	 * any other way to detect this...
+	 */
+	if (sense.sense_key == NOT_READY) {
+		if (sense.asc == 0x3a) {
+			if (sense.ascq == 1)
+				return CDS_NO_DISC;
+			else if (sense.ascq == 0 || sense.ascq == 2)
+				return CDS_TRAY_OPEN;
+		}
+	}
+
+	return CDS_DRIVE_NOT_READY;
+}
+
+static
+int ide_cdrom_get_last_session (struct cdrom_device_info *cdi,
+				struct cdrom_multisession *ms_info)
+{
+	struct atapi_toc *toc;
+	ide_drive_t *drive = cdi->handle;
+	struct cdrom_info *info = drive->driver_data;
+	struct request_sense sense;
+	int ret;
+
+	if (!CDROM_STATE_FLAGS(drive)->toc_valid || info->toc == NULL)
+		if ((ret = cdrom_read_toc(drive, &sense)))
+			return ret;
+
+	toc = info->toc;
+	ms_info->addr.lba = toc->last_session_lba;
+	ms_info->xa_flag = toc->xa_flag;
+
+	return 0;
+}
+
+static
+int ide_cdrom_get_mcn (struct cdrom_device_info *cdi,
+		       struct cdrom_mcn *mcn_info)
+{
+	int stat;
+	char mcnbuf[24];
+	ide_drive_t *drive = cdi->handle;
+
+/* get MCN */
+	if ((stat = cdrom_read_subchannel(drive, 2, mcnbuf, sizeof (mcnbuf), NULL)))
+		return stat;
+
+	memcpy (mcn_info->medium_catalog_number, mcnbuf+9,
+		sizeof (mcn_info->medium_catalog_number)-1);
+	mcn_info->medium_catalog_number[sizeof (mcn_info->medium_catalog_number)-1]
+		= '\0';
+
+	return 0;
+}
+
+
+
+/****************************************************************************
+ * Other driver requests (open, close, check media change).
+ */
+
+static
+int ide_cdrom_check_media_change_real (struct cdrom_device_info *cdi,
+				       int slot_nr)
+{
+	ide_drive_t *drive = cdi->handle;
+	int retval;
+	
+	if (slot_nr == CDSL_CURRENT) {
+		(void) cdrom_check_status(drive, NULL);
+		retval = CDROM_STATE_FLAGS(drive)->media_changed;
+		CDROM_STATE_FLAGS(drive)->media_changed = 0;
+		return retval;
+	} else {
+		return -EINVAL;
+	}
+}
+
+
+static
+int ide_cdrom_open_real (struct cdrom_device_info *cdi, int purpose)
+{
+	return 0;
+}
+
+/*
+ * Close down the device.  Invalidate all cached blocks.
+ */
+
+static
+void ide_cdrom_release_real (struct cdrom_device_info *cdi)
+{
+	ide_drive_t *drive = cdi->handle;
+
+	if (!cdi->use_count)
+		CDROM_STATE_FLAGS(drive)->toc_valid = 0;
+}
+
+
+
+/****************************************************************************
+ * Device initialization.
+ */
+static struct cdrom_device_ops ide_cdrom_dops = {
+	.open			= ide_cdrom_open_real,
+	.release		= ide_cdrom_release_real,
+	.drive_status		= ide_cdrom_drive_status,
+	.media_changed		= ide_cdrom_check_media_change_real,
+	.tray_move		= ide_cdrom_tray_move,
+	.lock_door		= ide_cdrom_lock_door,
+	.select_speed		= ide_cdrom_select_speed,
+	.get_last_session	= ide_cdrom_get_last_session,
+	.get_mcn		= ide_cdrom_get_mcn,
+	.reset			= ide_cdrom_reset,
+	.audio_ioctl		= ide_cdrom_audio_ioctl,
+	.dev_ioctl		= ide_cdrom_dev_ioctl,
+	.capability		= CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK |
+				CDC_SELECT_SPEED | CDC_SELECT_DISC |
+				CDC_MULTI_SESSION | CDC_MCN |
+				CDC_MEDIA_CHANGED | CDC_PLAY_AUDIO | CDC_RESET |
+				CDC_IOCTLS | CDC_DRIVE_STATUS | CDC_CD_R |
+				CDC_CD_RW | CDC_DVD | CDC_DVD_R| CDC_DVD_RAM |
+				CDC_GENERIC_PACKET | CDC_MO_DRIVE | CDC_MRW |
+				CDC_MRW_W | CDC_RAM,
+	.generic_packet		= ide_cdrom_packet,
+};
+
+static int ide_cdrom_register (ide_drive_t *drive, int nslots)
+{
+	struct cdrom_info *info = drive->driver_data;
+	struct cdrom_device_info *devinfo = &info->devinfo;
+
+	devinfo->ops = &ide_cdrom_dops;
+	devinfo->mask = 0;
+	devinfo->speed = CDROM_STATE_FLAGS(drive)->current_speed;
+	devinfo->capacity = nslots;
+	devinfo->handle = drive;
+	strcpy(devinfo->name, drive->name);
+	
+	/* set capability mask to match the probe. */
+	if (!CDROM_CONFIG_FLAGS(drive)->cd_r)
+		devinfo->mask |= CDC_CD_R;
+	if (!CDROM_CONFIG_FLAGS(drive)->cd_rw)
+		devinfo->mask |= CDC_CD_RW;
+	if (!CDROM_CONFIG_FLAGS(drive)->dvd)
+		devinfo->mask |= CDC_DVD;
+	if (!CDROM_CONFIG_FLAGS(drive)->dvd_r)
+		devinfo->mask |= CDC_DVD_R;
+	if (!CDROM_CONFIG_FLAGS(drive)->dvd_ram)
+		devinfo->mask |= CDC_DVD_RAM;
+	if (!CDROM_CONFIG_FLAGS(drive)->is_changer)
+		devinfo->mask |= CDC_SELECT_DISC;
+	if (!CDROM_CONFIG_FLAGS(drive)->audio_play)
+		devinfo->mask |= CDC_PLAY_AUDIO;
+	if (!CDROM_CONFIG_FLAGS(drive)->close_tray)
+		devinfo->mask |= CDC_CLOSE_TRAY;
+	if (!CDROM_CONFIG_FLAGS(drive)->mo_drive)
+		devinfo->mask |= CDC_MO_DRIVE;
+
+	devinfo->disk = info->disk;
+	return register_cdrom(devinfo);
+}
+
+static
+int ide_cdrom_probe_capabilities (ide_drive_t *drive)
+{
+	struct cdrom_info *info = drive->driver_data;
+	struct cdrom_device_info *cdi = &info->devinfo;
+	struct atapi_capabilities_page cap;
+	int nslots = 1;
+
+	if (drive->media == ide_optical) {
+		CDROM_CONFIG_FLAGS(drive)->mo_drive = 1;
+		CDROM_CONFIG_FLAGS(drive)->ram = 1;
+		printk(KERN_ERR "%s: ATAPI magneto-optical drive\n", drive->name);
+		return nslots;
+	}
+
+	if (CDROM_CONFIG_FLAGS(drive)->nec260 ||
+	    !strcmp(drive->id->model,"STINGRAY 8422 IDE 8X CD-ROM 7-27-95")) {
+		CDROM_CONFIG_FLAGS(drive)->no_eject = 0;
+		CDROM_CONFIG_FLAGS(drive)->audio_play = 1;
+		return nslots;
+	}
+
+	/*
+	 * we have to cheat a little here. the packet will eventually
+	 * be queued with ide_cdrom_packet(), which extracts the
+	 * drive from cdi->handle. Since this device hasn't been
+	 * registered with the Uniform layer yet, it can't do this.
+	 * Same goes for cdi->ops.
+	 */
+	cdi->handle = drive;
+	cdi->ops = &ide_cdrom_dops;
+
+	if (ide_cdrom_get_capabilities(drive, &cap))
+		return 0;
+
+	if (cap.lock == 0)
+		CDROM_CONFIG_FLAGS(drive)->no_doorlock = 1;
+	if (cap.eject)
+		CDROM_CONFIG_FLAGS(drive)->no_eject = 0;
+	if (cap.cd_r_write)
+		CDROM_CONFIG_FLAGS(drive)->cd_r = 1;
+	if (cap.cd_rw_write) {
+		CDROM_CONFIG_FLAGS(drive)->cd_rw = 1;
+		CDROM_CONFIG_FLAGS(drive)->ram = 1;
+	}
+	if (cap.test_write)
+		CDROM_CONFIG_FLAGS(drive)->test_write = 1;
+	if (cap.dvd_ram_read || cap.dvd_r_read || cap.dvd_rom)
+		CDROM_CONFIG_FLAGS(drive)->dvd = 1;
+	if (cap.dvd_ram_write) {
+		CDROM_CONFIG_FLAGS(drive)->dvd_ram = 1;
+		CDROM_CONFIG_FLAGS(drive)->ram = 1;
+	}
+	if (cap.dvd_r_write)
+		CDROM_CONFIG_FLAGS(drive)->dvd_r = 1;
+	if (cap.audio_play)
+		CDROM_CONFIG_FLAGS(drive)->audio_play = 1;
+	if (cap.mechtype == mechtype_caddy || cap.mechtype == mechtype_popup)
+		CDROM_CONFIG_FLAGS(drive)->close_tray = 0;
+
+	/* Some drives used by Apple don't advertise audio play
+	 * but they do support reading TOC & audio datas
+	 */
+	if (strcmp(drive->id->model, "MATSHITADVD-ROM SR-8187") == 0 ||
+	    strcmp(drive->id->model, "MATSHITADVD-ROM SR-8186") == 0 ||
+	    strcmp(drive->id->model, "MATSHITADVD-ROM SR-8176") == 0 ||
+	    strcmp(drive->id->model, "MATSHITADVD-ROM SR-8174") == 0)
+		CDROM_CONFIG_FLAGS(drive)->audio_play = 1;
+
+#if ! STANDARD_ATAPI
+	if (cdi->sanyo_slot > 0) {
+		CDROM_CONFIG_FLAGS(drive)->is_changer = 1;
+		nslots = 3;
+	}
+
+	else
+#endif /* not STANDARD_ATAPI */
+	if (cap.mechtype == mechtype_individual_changer ||
+	    cap.mechtype == mechtype_cartridge_changer) {
+		if ((nslots = cdrom_number_of_slots(cdi)) > 1) {
+			CDROM_CONFIG_FLAGS(drive)->is_changer = 1;
+			CDROM_CONFIG_FLAGS(drive)->supp_disc_present = 1;
+		}
+	}
+
+	ide_cdrom_update_speed(drive, &cap);
+	/* don't print speed if the drive reported 0.
+	 */
+	printk(KERN_INFO "%s: ATAPI", drive->name);
+	if (CDROM_CONFIG_FLAGS(drive)->max_speed)
+		printk(" %dX", CDROM_CONFIG_FLAGS(drive)->max_speed);
+	printk(" %s", CDROM_CONFIG_FLAGS(drive)->dvd ? "DVD-ROM" : "CD-ROM");
+
+	if (CDROM_CONFIG_FLAGS(drive)->dvd_r|CDROM_CONFIG_FLAGS(drive)->dvd_ram)
+        	printk(" DVD%s%s", 
+        	(CDROM_CONFIG_FLAGS(drive)->dvd_r)? "-R" : "", 
+        	(CDROM_CONFIG_FLAGS(drive)->dvd_ram)? "-RAM" : "");
+
+        if (CDROM_CONFIG_FLAGS(drive)->cd_r|CDROM_CONFIG_FLAGS(drive)->cd_rw) 
+        	printk(" CD%s%s", 
+        	(CDROM_CONFIG_FLAGS(drive)->cd_r)? "-R" : "", 
+        	(CDROM_CONFIG_FLAGS(drive)->cd_rw)? "/RW" : "");
+
+        if (CDROM_CONFIG_FLAGS(drive)->is_changer) 
+        	printk(" changer w/%d slots", nslots);
+        else 	
+        	printk(" drive");
+
+	printk(", %dkB Cache", be16_to_cpu(cap.buffer_size));
+
+	if (drive->using_dma)
+		ide_dma_verbose(drive);
+
+	printk("\n");
+
+	return nslots;
+}
+
+static void ide_cdrom_add_settings(ide_drive_t *drive)
+{
+	ide_add_setting(drive,	"dsc_overlap",		SETTING_RW, -1, -1, TYPE_BYTE, 0, 1, 1,	1, &drive->dsc_overlap, NULL);
+}
+
+/*
+ * standard prep_rq_fn that builds 10 byte cmds
+ */
+static int ide_cdrom_prep_fs(request_queue_t *q, struct request *rq)
+{
+	int hard_sect = queue_hardsect_size(q);
+	long block = (long)rq->hard_sector / (hard_sect >> 9);
+	unsigned long blocks = rq->hard_nr_sectors / (hard_sect >> 9);
+
+	memset(rq->cmd, 0, sizeof(rq->cmd));
+
+	if (rq_data_dir(rq) == READ)
+		rq->cmd[0] = GPCMD_READ_10;
+	else
+		rq->cmd[0] = GPCMD_WRITE_10;
+
+	/*
+	 * fill in lba
+	 */
+	rq->cmd[2] = (block >> 24) & 0xff;
+	rq->cmd[3] = (block >> 16) & 0xff;
+	rq->cmd[4] = (block >>  8) & 0xff;
+	rq->cmd[5] = block & 0xff;
+
+	/*
+	 * and transfer length
+	 */
+	rq->cmd[7] = (blocks >> 8) & 0xff;
+	rq->cmd[8] = blocks & 0xff;
+	rq->cmd_len = 10;
+	return BLKPREP_OK;
+}
+
+/*
+ * Most of the SCSI commands are supported directly by ATAPI devices.
+ * This transform handles the few exceptions.
+ */
+static int ide_cdrom_prep_pc(struct request *rq)
+{
+	u8 *c = rq->cmd;
+
+	/*
+	 * Transform 6-byte read/write commands to the 10-byte version
+	 */
+	if (c[0] == READ_6 || c[0] == WRITE_6) {
+		c[8] = c[4];
+		c[5] = c[3];
+		c[4] = c[2];
+		c[3] = c[1] & 0x1f;
+		c[2] = 0;
+		c[1] &= 0xe0;
+		c[0] += (READ_10 - READ_6);
+		rq->cmd_len = 10;
+		return BLKPREP_OK;
+	}
+
+	/*
+	 * it's silly to pretend we understand 6-byte sense commands, just
+	 * reject with ILLEGAL_REQUEST and the caller should take the
+	 * appropriate action
+	 */
+	if (c[0] == MODE_SENSE || c[0] == MODE_SELECT) {
+		rq->errors = ILLEGAL_REQUEST;
+		return BLKPREP_KILL;
+	}
+	
+	return BLKPREP_OK;
+}
+
+static int ide_cdrom_prep_fn(request_queue_t *q, struct request *rq)
+{
+	if (rq->flags & REQ_CMD)
+		return ide_cdrom_prep_fs(q, rq);
+	else if (rq->flags & REQ_BLOCK_PC)
+		return ide_cdrom_prep_pc(rq);
+
+	return 0;
+}
+
+static
+int ide_cdrom_setup (ide_drive_t *drive)
+{
+	struct cdrom_info *info = drive->driver_data;
+	struct cdrom_device_info *cdi = &info->devinfo;
+	int nslots;
+
+	blk_queue_prep_rq(drive->queue, ide_cdrom_prep_fn);
+	blk_queue_dma_alignment(drive->queue, 31);
+	drive->queue->unplug_delay = (1 * HZ) / 1000;
+	if (!drive->queue->unplug_delay)
+		drive->queue->unplug_delay = 1;
+
+	drive->special.all	= 0;
+
+	CDROM_STATE_FLAGS(drive)->media_changed = 1;
+	CDROM_STATE_FLAGS(drive)->toc_valid     = 0;
+	CDROM_STATE_FLAGS(drive)->door_locked   = 0;
+
+#if NO_DOOR_LOCKING
+	CDROM_CONFIG_FLAGS(drive)->no_doorlock = 1;
+#else
+	CDROM_CONFIG_FLAGS(drive)->no_doorlock = 0;
+#endif
+
+	CDROM_CONFIG_FLAGS(drive)->drq_interrupt = ((drive->id->config & 0x0060) == 0x20);
+	CDROM_CONFIG_FLAGS(drive)->is_changer = 0;
+	CDROM_CONFIG_FLAGS(drive)->cd_r = 0;
+	CDROM_CONFIG_FLAGS(drive)->cd_rw = 0;
+	CDROM_CONFIG_FLAGS(drive)->test_write = 0;
+	CDROM_CONFIG_FLAGS(drive)->dvd = 0;
+	CDROM_CONFIG_FLAGS(drive)->dvd_r = 0;
+	CDROM_CONFIG_FLAGS(drive)->dvd_ram = 0;
+	CDROM_CONFIG_FLAGS(drive)->no_eject = 1;
+	CDROM_CONFIG_FLAGS(drive)->supp_disc_present = 0;
+	CDROM_CONFIG_FLAGS(drive)->audio_play = 0;
+	CDROM_CONFIG_FLAGS(drive)->close_tray = 1;
+	
+	/* limit transfer size per interrupt. */
+	CDROM_CONFIG_FLAGS(drive)->limit_nframes = 0;
+	/* a testament to the nice quality of Samsung drives... */
+	if (!strcmp(drive->id->model, "SAMSUNG CD-ROM SCR-2430"))
+		CDROM_CONFIG_FLAGS(drive)->limit_nframes = 1;
+	else if (!strcmp(drive->id->model, "SAMSUNG CD-ROM SCR-2432"))
+		CDROM_CONFIG_FLAGS(drive)->limit_nframes = 1;
+	/* the 3231 model does not support the SET_CD_SPEED command */
+	else if (!strcmp(drive->id->model, "SAMSUNG CD-ROM SCR-3231"))
+		cdi->mask |= CDC_SELECT_SPEED;
+
+#if ! STANDARD_ATAPI
+	/* by default Sanyo 3 CD changer support is turned off and
+           ATAPI Rev 2.2+ standard support for CD changers is used */
+	cdi->sanyo_slot = 0;
+
+	CDROM_CONFIG_FLAGS(drive)->nec260 = 0;
+	CDROM_CONFIG_FLAGS(drive)->toctracks_as_bcd = 0;
+	CDROM_CONFIG_FLAGS(drive)->tocaddr_as_bcd = 0;
+	CDROM_CONFIG_FLAGS(drive)->playmsf_as_bcd = 0;
+	CDROM_CONFIG_FLAGS(drive)->subchan_as_bcd = 0;
+
+	if (strcmp (drive->id->model, "V003S0DS") == 0 &&
+	    drive->id->fw_rev[4] == '1' &&
+	    drive->id->fw_rev[6] <= '2') {
+		/* Vertos 300.
+		   Some versions of this drive like to talk BCD. */
+		CDROM_CONFIG_FLAGS(drive)->toctracks_as_bcd = 1;
+		CDROM_CONFIG_FLAGS(drive)->tocaddr_as_bcd = 1;
+		CDROM_CONFIG_FLAGS(drive)->playmsf_as_bcd = 1;
+		CDROM_CONFIG_FLAGS(drive)->subchan_as_bcd = 1;
+	}
+
+	else if (strcmp (drive->id->model, "V006E0DS") == 0 &&
+	    drive->id->fw_rev[4] == '1' &&
+	    drive->id->fw_rev[6] <= '2') {
+		/* Vertos 600 ESD. */
+		CDROM_CONFIG_FLAGS(drive)->toctracks_as_bcd = 1;
+	}
+	else if (strcmp(drive->id->model, "NEC CD-ROM DRIVE:260") == 0 &&
+		 strncmp(drive->id->fw_rev, "1.01", 4) == 0) { /* FIXME */
+		/* Old NEC260 (not R).
+		   This drive was released before the 1.2 version
+		   of the spec. */
+		CDROM_CONFIG_FLAGS(drive)->tocaddr_as_bcd = 1;
+		CDROM_CONFIG_FLAGS(drive)->playmsf_as_bcd = 1;
+		CDROM_CONFIG_FLAGS(drive)->subchan_as_bcd = 1;
+		CDROM_CONFIG_FLAGS(drive)->nec260         = 1;
+	}
+	else if (strcmp(drive->id->model, "WEARNES CDD-120") == 0 &&
+		 strncmp(drive->id->fw_rev, "A1.1", 4) == 0) { /* FIXME */
+		/* Wearnes */
+		CDROM_CONFIG_FLAGS(drive)->playmsf_as_bcd = 1;
+		CDROM_CONFIG_FLAGS(drive)->subchan_as_bcd = 1;
+	}
+        /* Sanyo 3 CD changer uses a non-standard command
+           for CD changing */
+        else if ((strcmp(drive->id->model, "CD-ROM CDR-C3 G") == 0) ||
+                 (strcmp(drive->id->model, "CD-ROM CDR-C3G") == 0) ||
+                 (strcmp(drive->id->model, "CD-ROM CDR_C36") == 0)) {
+                 /* uses CD in slot 0 when value is set to 3 */
+                 cdi->sanyo_slot = 3;
+        }
+#endif /* not STANDARD_ATAPI */
+
+	info->toc		= NULL;
+	info->buffer		= NULL;
+	info->sector_buffered	= 0;
+	info->nsectors_buffered	= 0;
+	info->changer_info      = NULL;
+	info->last_block	= 0;
+	info->start_seek	= 0;
+
+	nslots = ide_cdrom_probe_capabilities (drive);
+
+	/*
+	 * set correct block size
+	 */
+	blk_queue_hardsect_size(drive->queue, CD_FRAMESIZE);
+
+	if (drive->autotune == IDE_TUNE_DEFAULT ||
+	    drive->autotune == IDE_TUNE_AUTO)
+		drive->dsc_overlap = (drive->next != drive);
+#if 0
+	drive->dsc_overlap = (HWIF(drive)->no_dsc) ? 0 : 1;
+	if (HWIF(drive)->no_dsc) {
+		printk(KERN_INFO "ide-cd: %s: disabling DSC overlap\n",
+			drive->name);
+		drive->dsc_overlap = 0;
+	}
+#endif
+
+	if (ide_cdrom_register(drive, nslots)) {
+		printk (KERN_ERR "%s: ide_cdrom_setup failed to register device with the cdrom driver.\n", drive->name);
+		info->devinfo.handle = NULL;
+		return 1;
+	}
+	ide_cdrom_add_settings(drive);
+	return 0;
+}
+
+#ifdef CONFIG_PROC_FS
+static
+sector_t ide_cdrom_capacity (ide_drive_t *drive)
+{
+	unsigned long capacity, sectors_per_frame;
+
+	if (cdrom_read_capacity(drive, &capacity, &sectors_per_frame, NULL))
+		return 0;
+
+	return capacity * sectors_per_frame;
+}
+#endif
+
+static int ide_cd_remove(struct device *dev)
+{
+	ide_drive_t *drive = to_ide_device(dev);
+	struct cdrom_info *info = drive->driver_data;
+
+	ide_unregister_subdriver(drive, info->driver);
+
+	del_gendisk(info->disk);
+
+	ide_cd_put(info);
+
+	return 0;
+}
+
+static void ide_cd_release(struct kref *kref)
+{
+	struct cdrom_info *info = to_ide_cd(kref);
+	struct cdrom_device_info *devinfo = &info->devinfo;
+	ide_drive_t *drive = info->drive;
+	struct gendisk *g = info->disk;
+
+	kfree(info->buffer);
+	kfree(info->toc);
+	kfree(info->changer_info);
+	if (devinfo->handle == drive && unregister_cdrom(devinfo))
+		printk(KERN_ERR "%s: %s failed to unregister device from the cdrom "
+				"driver.\n", __FUNCTION__, drive->name);
+	drive->dsc_overlap = 0;
+	drive->driver_data = NULL;
+	blk_queue_prep_rq(drive->queue, NULL);
+	g->private_data = NULL;
+	put_disk(g);
+	kfree(info);
+}
+
+static int ide_cd_probe(struct device *);
+
+#ifdef CONFIG_PROC_FS
+static int proc_idecd_read_capacity
+	(char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+	ide_drive_t *drive = data;
+	int len;
+
+	len = sprintf(page,"%llu\n", (long long)ide_cdrom_capacity(drive));
+	PROC_IDE_READ_RETURN(page,start,off,count,eof,len);
+}
+
+static ide_proc_entry_t idecd_proc[] = {
+	{ "capacity", S_IFREG|S_IRUGO, proc_idecd_read_capacity, NULL },
+	{ NULL, 0, NULL, NULL }
+};
+#else
+# define idecd_proc	NULL
+#endif
+
+static ide_driver_t ide_cdrom_driver = {
+	.gen_driver = {
+		.owner		= THIS_MODULE,
+		.name		= "ide-cdrom",
+		.bus		= &ide_bus_type,
+		.probe		= ide_cd_probe,
+		.remove		= ide_cd_remove,
+	},
+	.version		= IDECD_VERSION,
+	.media			= ide_cdrom,
+	.supports_dsc_overlap	= 1,
+	.do_request		= ide_do_rw_cdrom,
+	.end_request		= ide_end_request,
+	.error			= __ide_error,
+	.abort			= __ide_abort,
+	.proc			= idecd_proc,
+};
+
+static int idecd_open(struct inode * inode, struct file * file)
+{
+	struct gendisk *disk = inode->i_bdev->bd_disk;
+	struct cdrom_info *info;
+	ide_drive_t *drive;
+	int rc = -ENOMEM;
+
+	if (!(info = ide_cd_get(disk)))
+		return -ENXIO;
+
+	drive = info->drive;
+
+	drive->usage++;
+
+	if (!info->buffer)
+		info->buffer = kmalloc(SECTOR_BUFFER_SIZE,
+					GFP_KERNEL|__GFP_REPEAT);
+        if (!info->buffer || (rc = cdrom_open(&info->devinfo, inode, file)))
+		drive->usage--;
+
+	if (rc < 0)
+		ide_cd_put(info);
+
+	return rc;
+}
+
+static int idecd_release(struct inode * inode, struct file * file)
+{
+	struct gendisk *disk = inode->i_bdev->bd_disk;
+	struct cdrom_info *info = ide_cd_g(disk);
+	ide_drive_t *drive = info->drive;
+
+	cdrom_release (&info->devinfo, file);
+	drive->usage--;
+
+	ide_cd_put(info);
+
+	return 0;
+}
+
+static int idecd_ioctl (struct inode *inode, struct file *file,
+			unsigned int cmd, unsigned long arg)
+{
+	struct block_device *bdev = inode->i_bdev;
+	struct cdrom_info *info = ide_cd_g(bdev->bd_disk);
+	int err;
+
+	err  = generic_ide_ioctl(info->drive, file, bdev, cmd, arg);
+	if (err == -EINVAL)
+		err = cdrom_ioctl(file, &info->devinfo, inode, cmd, arg);
+
+	return err;
+}
+
+static int idecd_media_changed(struct gendisk *disk)
+{
+	struct cdrom_info *info = ide_cd_g(disk);
+	return cdrom_media_changed(&info->devinfo);
+}
+
+static int idecd_revalidate_disk(struct gendisk *disk)
+{
+	struct cdrom_info *info = ide_cd_g(disk);
+	struct request_sense sense;
+	cdrom_read_toc(info->drive, &sense);
+	return  0;
+}
+
+static struct block_device_operations idecd_ops = {
+	.owner		= THIS_MODULE,
+	.open		= idecd_open,
+	.release	= idecd_release,
+	.ioctl		= idecd_ioctl,
+	.media_changed	= idecd_media_changed,
+	.revalidate_disk= idecd_revalidate_disk
+};
+
+/* options */
+static char *ignore = NULL;
+
+module_param(ignore, charp, 0400);
+MODULE_DESCRIPTION("ATAPI CD-ROM Driver");
+
+static int ide_cd_probe(struct device *dev)
+{
+	ide_drive_t *drive = to_ide_device(dev);
+	struct cdrom_info *info;
+	struct gendisk *g;
+	struct request_sense sense;
+
+	if (!strstr("ide-cdrom", drive->driver_req))
+		goto failed;
+	if (!drive->present)
+		goto failed;
+	if (drive->media != ide_cdrom && drive->media != ide_optical)
+		goto failed;
+	/* skip drives that we were told to ignore */
+	if (ignore != NULL) {
+		if (strstr(ignore, drive->name)) {
+			printk(KERN_INFO "ide-cd: ignoring drive %s\n", drive->name);
+			goto failed;
+		}
+	}
+	if (drive->scsi) {
+		printk(KERN_INFO "ide-cd: passing drive %s to ide-scsi emulation.\n", drive->name);
+		goto failed;
+	}
+	info = kzalloc(sizeof(struct cdrom_info), GFP_KERNEL);
+	if (info == NULL) {
+		printk(KERN_ERR "%s: Can't allocate a cdrom structure\n", drive->name);
+		goto failed;
+	}
+
+	g = alloc_disk(1 << PARTN_BITS);
+	if (!g)
+		goto out_free_cd;
+
+	ide_init_disk(g, drive);
+
+	ide_register_subdriver(drive, &ide_cdrom_driver);
+
+	kref_init(&info->kref);
+
+	info->drive = drive;
+	info->driver = &ide_cdrom_driver;
+	info->disk = g;
+
+	g->private_data = &info->driver;
+
+	drive->driver_data = info;
+
+	g->minors = 1;
+	snprintf(g->devfs_name, sizeof(g->devfs_name),
+			"%s/cd", drive->devfs_name);
+	g->driverfs_dev = &drive->gendev;
+	g->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE;
+	if (ide_cdrom_setup(drive)) {
+		struct cdrom_device_info *devinfo = &info->devinfo;
+		ide_unregister_subdriver(drive, &ide_cdrom_driver);
+		kfree(info->buffer);
+		kfree(info->toc);
+		kfree(info->changer_info);
+		if (devinfo->handle == drive && unregister_cdrom(devinfo))
+			printk (KERN_ERR "%s: ide_cdrom_cleanup failed to unregister device from the cdrom driver.\n", drive->name);
+		kfree(info);
+		drive->driver_data = NULL;
+		goto failed;
+	}
+
+	cdrom_read_toc(drive, &sense);
+	g->fops = &idecd_ops;
+	g->flags |= GENHD_FL_REMOVABLE;
+	add_disk(g);
+	return 0;
+
+out_free_cd:
+	kfree(info);
+failed:
+	return -ENODEV;
+}
+
+static void __exit ide_cdrom_exit(void)
+{
+	driver_unregister(&ide_cdrom_driver.gen_driver);
+}
+
+static int __init ide_cdrom_init(void)
+{
+	return driver_register(&ide_cdrom_driver.gen_driver);
+}
+
+module_init(ide_cdrom_init);
+module_exit(ide_cdrom_exit);
+MODULE_LICENSE("GPL");
diff -urN oldtree/drivers/ide/pci/sis5513.c newtree/drivers/ide/pci/sis5513.c
--- oldtree/drivers/ide/pci/sis5513.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/ide/pci/sis5513.c	2006-01-29 11:18:10.813644872 +0000
@@ -112,6 +112,7 @@
 
 	{ "SiS5596",	PCI_DEVICE_ID_SI_5596,	ATA_16   },
 	{ "SiS5571",	PCI_DEVICE_ID_SI_5571,	ATA_16   },
+	{ "SiS5517",	PCI_DEVICE_ID_SI_5517,	ATA_16   },
 	{ "SiS551x",	PCI_DEVICE_ID_SI_5511,	ATA_16   },
 };
 
@@ -524,6 +525,7 @@
 			case 3:		test1 = 0x30|0x03; break;
 			case 2:		test1 = 0x40|0x04; break;
 			case 1:		test1 = 0x60|0x07; break;
+			case 0:		test1 = 0x00; break;
 			default:	break;
 		}
 		pci_write_config_byte(dev, drive_pci, test1);
diff -urN oldtree/drivers/input/mouse/Makefile newtree/drivers/input/mouse/Makefile
--- oldtree/drivers/input/mouse/Makefile	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/input/mouse/Makefile	2006-01-29 11:18:10.814644720 +0000
@@ -15,4 +15,5 @@
 obj-$(CONFIG_MOUSE_HIL)		+= hil_ptr.o
 obj-$(CONFIG_MOUSE_VSXXXAA)	+= vsxxxaa.o
 
-psmouse-objs  := psmouse-base.o alps.o logips2pp.o synaptics.o lifebook.o trackpoint.o
+psmouse-objs  := psmouse-base.o alps.o logips2pp.o synaptics.o lifebook.o \
+		trackpoint.o touchkit_ps2.o
diff -urN oldtree/drivers/input/mouse/alps.c newtree/drivers/input/mouse/alps.c
--- oldtree/drivers/input/mouse/alps.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/input/mouse/alps.c	2006-01-29 11:18:10.814644720 +0000
@@ -347,6 +347,39 @@
 	return 0;
 }
 
+/*
+ * alps_poll() - poll the touchpad for current motion packet.
+ * Used in resync.
+ */
+static int alps_poll(struct psmouse *psmouse)
+{
+	struct alps_data *priv = psmouse->private;
+	unsigned char buf[6];
+
+	if (priv->i->flags & ALPS_PASS)
+		alps_passthrough_mode(psmouse, 1);
+
+	if (ps2_command(&psmouse->ps2dev, buf, PSMOUSE_CMD_POLL | (psmouse->pktsize << 8)))
+		return -1;
+
+	if ((buf[0] & priv->i->mask0) != priv->i->byte0)
+		return -1;
+
+	if (priv->i->flags & ALPS_PASS)
+		alps_passthrough_mode(psmouse, 0);
+
+	if ((psmouse->badbyte & 0xc8) == 0x08) {
+/*
+ * Poll the track stick ...
+ */
+		if (ps2_command(&psmouse->ps2dev, buf, PSMOUSE_CMD_POLL | (3 << 8)))
+			return -1;
+	}
+
+	memcpy(psmouse->packet, buf, sizeof(buf));
+	return 0;
+}
+
 static int alps_reconnect(struct psmouse *psmouse)
 {
 	struct alps_data *priv = psmouse->private;
@@ -450,10 +483,14 @@
 	input_register_device(priv->dev2);
 
 	psmouse->protocol_handler = alps_process_byte;
+	psmouse->poll = alps_poll;
 	psmouse->disconnect = alps_disconnect;
 	psmouse->reconnect = alps_reconnect;
 	psmouse->pktsize = 6;
 
+	/* We are having trouble resyncing ALPS touchpads so disable it for now */
+	psmouse->resync_time = 0;
+
 	return 0;
 
 init_fail:
diff -urN oldtree/drivers/input/mouse/logips2pp.c newtree/drivers/input/mouse/logips2pp.c
--- oldtree/drivers/input/mouse/logips2pp.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/input/mouse/logips2pp.c	2006-01-29 11:18:10.815644568 +0000
@@ -117,7 +117,7 @@
 	if (psmouse_sliced_command(psmouse, command))
 		return -1;
 
-	if (ps2_command(&psmouse->ps2dev, param, PSMOUSE_CMD_POLL))
+	if (ps2_command(&psmouse->ps2dev, param, PSMOUSE_CMD_POLL | 0x0300))
 		return -1;
 
 	return 0;
diff -urN oldtree/drivers/input/mouse/psmouse-base.c newtree/drivers/input/mouse/psmouse-base.c
--- oldtree/drivers/input/mouse/psmouse-base.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/input/mouse/psmouse-base.c	2006-01-29 11:18:10.816644416 +0000
@@ -26,6 +26,7 @@
 #include "alps.h"
 #include "lifebook.h"
 #include "trackpoint.h"
+#include "touchkit_ps2.h"
 
 #define DRIVER_DESC	"PS/2 mouse driver"
 
@@ -58,6 +59,10 @@
 module_param_named(resetafter, psmouse_resetafter, uint, 0644);
 MODULE_PARM_DESC(resetafter, "Reset device after so many bad packets (0 = never).");
 
+static unsigned int psmouse_resync_time = 5;
+module_param_named(resync_time, psmouse_resync_time, uint, 0644);
+MODULE_PARM_DESC(resync_time, "How long can mouse stay idle before forcing resync (in seconds, 0 = never).");
+
 PSMOUSE_DEFINE_ATTR(protocol, S_IWUSR | S_IRUGO,
 			NULL,
 			psmouse_attr_show_protocol, psmouse_attr_set_protocol);
@@ -70,12 +75,16 @@
 PSMOUSE_DEFINE_ATTR(resetafter, S_IWUSR | S_IRUGO,
 			(void *) offsetof(struct psmouse, resetafter),
 			psmouse_show_int_attr, psmouse_set_int_attr);
+PSMOUSE_DEFINE_ATTR(resync_time, S_IWUSR | S_IRUGO,
+			(void *) offsetof(struct psmouse, resync_time),
+			psmouse_show_int_attr, psmouse_set_int_attr);
 
 static struct attribute *psmouse_attributes[] = {
 	&psmouse_attr_protocol.dattr.attr,
 	&psmouse_attr_rate.dattr.attr,
 	&psmouse_attr_resolution.dattr.attr,
 	&psmouse_attr_resetafter.dattr.attr,
+	&psmouse_attr_resync_time.dattr.attr,
 	NULL
 };
 
@@ -98,6 +107,8 @@
  */
 static DECLARE_MUTEX(psmouse_sem);
 
+static struct workqueue_struct *kpsmoused_wq;
+
 struct psmouse_protocol {
 	enum psmouse_type type;
 	char *name;
@@ -178,15 +189,79 @@
 }
 
 /*
- * psmouse_interrupt() handles incoming characters, either gathering them into
- * packets or passing them to the command routine as command output.
+ * __psmouse_set_state() sets new psmouse state and resets all flags.
+ */
+
+static inline void __psmouse_set_state(struct psmouse *psmouse, enum psmouse_state new_state)
+{
+	psmouse->state = new_state;
+	psmouse->pktcnt = psmouse->out_of_sync = 0;
+	psmouse->ps2dev.flags = 0;
+	psmouse->last = jiffies;
+}
+
+
+/*
+ * psmouse_set_state() sets new psmouse state and resets all flags and
+ * counters while holding serio lock so fighting with interrupt handler
+ * is not a concern.
+ */
+
+static void psmouse_set_state(struct psmouse *psmouse, enum psmouse_state new_state)
+{
+	serio_pause_rx(psmouse->ps2dev.serio);
+	__psmouse_set_state(psmouse, new_state);
+	serio_continue_rx(psmouse->ps2dev.serio);
+}
+
+/*
+ * psmouse_handle_byte() processes one byte of the input data stream
+ * by calling corresponding protocol handler.
+ */
+
+static int psmouse_handle_byte(struct psmouse *psmouse, struct pt_regs *regs)
+{
+	psmouse_ret_t rc = psmouse->protocol_handler(psmouse, regs);
+
+	switch (rc) {
+		case PSMOUSE_BAD_DATA:
+			if (psmouse->state == PSMOUSE_ACTIVATED) {
+				printk(KERN_WARNING "psmouse.c: %s at %s lost sync at byte %d\n",
+					psmouse->name, psmouse->phys, psmouse->pktcnt);
+				if (++psmouse->out_of_sync == psmouse->resetafter) {
+					__psmouse_set_state(psmouse, PSMOUSE_IGNORE);
+					printk(KERN_NOTICE "psmouse.c: issuing reconnect request\n");
+					serio_reconnect(psmouse->ps2dev.serio);
+					return -1;
+				}
+			}
+			psmouse->pktcnt = 0;
+			break;
+
+		case PSMOUSE_FULL_PACKET:
+			psmouse->pktcnt = 0;
+			if (psmouse->out_of_sync) {
+				psmouse->out_of_sync = 0;
+				printk(KERN_NOTICE "psmouse.c: %s at %s - driver resynched.\n",
+					psmouse->name, psmouse->phys);
+			}
+			break;
+
+		case PSMOUSE_GOOD_DATA:
+			break;
+	}
+	return 0;
+}
+
+/*
+ * psmouse_interrupt() handles incoming characters, either passing them
+ * for normal processing or gathering them as command response.
  */
 
 static irqreturn_t psmouse_interrupt(struct serio *serio,
 		unsigned char data, unsigned int flags, struct pt_regs *regs)
 {
 	struct psmouse *psmouse = serio_get_drvdata(serio);
-	psmouse_ret_t rc;
 
 	if (psmouse->state == PSMOUSE_IGNORE)
 		goto out;
@@ -208,67 +283,58 @@
 		if  (ps2_handle_response(&psmouse->ps2dev, data))
 			goto out;
 
-	if (psmouse->state == PSMOUSE_INITIALIZING)
+	if (psmouse->state <= PSMOUSE_RESYNCING)
 		goto out;
 
 	if (psmouse->state == PSMOUSE_ACTIVATED &&
 	    psmouse->pktcnt && time_after(jiffies, psmouse->last + HZ/2)) {
-		printk(KERN_WARNING "psmouse.c: %s at %s lost synchronization, throwing %d bytes away.\n",
+		printk(KERN_INFO "psmouse.c: %s at %s lost synchronization, throwing %d bytes away.\n",
 		       psmouse->name, psmouse->phys, psmouse->pktcnt);
-		psmouse->pktcnt = 0;
+		psmouse->badbyte = psmouse->packet[0];
+		__psmouse_set_state(psmouse, PSMOUSE_RESYNCING);
+		queue_work(kpsmoused_wq, &psmouse->resync_work);
+		goto out;
 	}
 
-	psmouse->last = jiffies;
 	psmouse->packet[psmouse->pktcnt++] = data;
-
-	if (psmouse->packet[0] == PSMOUSE_RET_BAT) {
+/*
+ * Check if this is a new device announcement (0xAA 0x00)
+ */
+	if (unlikely(psmouse->packet[0] == PSMOUSE_RET_BAT && psmouse->pktcnt <= 2)) {
 		if (psmouse->pktcnt == 1)
 			goto out;
 
-		if (psmouse->pktcnt == 2) {
-			if (psmouse->packet[1] == PSMOUSE_RET_ID) {
-				psmouse->state = PSMOUSE_IGNORE;
-				serio_reconnect(serio);
-				goto out;
-			}
-			if (psmouse->type == PSMOUSE_SYNAPTICS) {
-				/* neither 0xAA nor 0x00 are valid first bytes
-				 * for a packet in absolute mode
-				 */
-				psmouse->pktcnt = 0;
-				goto out;
-			}
+		if (psmouse->packet[1] == PSMOUSE_RET_ID) {
+			__psmouse_set_state(psmouse, PSMOUSE_IGNORE);
+			serio_reconnect(serio);
+			goto out;
 		}
-	}
-
-	rc = psmouse->protocol_handler(psmouse, regs);
+/*
+ * Not a new device, try processing first byte normally
+ */
+		psmouse->pktcnt = 1;
+		if (psmouse_handle_byte(psmouse, regs))
+			goto out;
 
-	switch (rc) {
-		case PSMOUSE_BAD_DATA:
-			printk(KERN_WARNING "psmouse.c: %s at %s lost sync at byte %d\n",
-				psmouse->name, psmouse->phys, psmouse->pktcnt);
-			psmouse->pktcnt = 0;
+		psmouse->packet[psmouse->pktcnt++] = data;
+	}
 
-			if (++psmouse->out_of_sync == psmouse->resetafter) {
-				psmouse->state = PSMOUSE_IGNORE;
-				printk(KERN_NOTICE "psmouse.c: issuing reconnect request\n");
-				serio_reconnect(psmouse->ps2dev.serio);
-			}
-			break;
+/*
+ * See if we need to force resync because mouse was idle for too long
+ */
+	if (psmouse->state == PSMOUSE_ACTIVATED &&
+	    psmouse->pktcnt == 1 && psmouse->resync_time &&
+	    time_after(jiffies, psmouse->last + psmouse->resync_time * HZ)) {
+		psmouse->badbyte = psmouse->packet[0];
+		__psmouse_set_state(psmouse, PSMOUSE_RESYNCING);
+		queue_work(kpsmoused_wq, &psmouse->resync_work);
+		goto out;
+	}
 
-		case PSMOUSE_FULL_PACKET:
-			psmouse->pktcnt = 0;
-			if (psmouse->out_of_sync) {
-				psmouse->out_of_sync = 0;
-				printk(KERN_NOTICE "psmouse.c: %s at %s - driver resynched.\n",
-					psmouse->name, psmouse->phys);
-			}
-			break;
+	psmouse->last = jiffies;
+	psmouse_handle_byte(psmouse, regs);
 
-		case PSMOUSE_GOOD_DATA:
-			break;
-	}
-out:
+ out:
 	return IRQ_HANDLED;
 }
 
@@ -545,6 +611,13 @@
 	if (max_proto > PSMOUSE_IMEX && trackpoint_detect(psmouse, set_properties) == 0)
 		return PSMOUSE_TRACKPOINT;
 
+	if (touchkit_ps2_detect(psmouse, set_properties) == 0) {
+		if (max_proto >= PSMOUSE_TOUCHKIT_PS2) {
+			if (!set_properties || touchkit_ps2_init(psmouse) == 0)
+				return PSMOUSE_TOUCHKIT_PS2;
+		}
+	}
+
 /*
  * Okay, all failed, we have a standard mouse here. The number of the buttons
  * is still a question, though. We assume 3.
@@ -632,6 +705,13 @@
 		.detect		= trackpoint_detect,
 	},
 	{
+		.type		= PSMOUSE_TOUCHKIT_PS2,
+		.name		= "touchkitPS/2",
+		.alias		= "touchkit",
+		.detect		= touchkit_ps2_detect,
+		.init		= touchkit_ps2_init,
+	},
+	{
 		.type		= PSMOUSE_AUTO,
 		.name		= "auto",
 		.alias		= "any",
@@ -755,21 +835,6 @@
 }
 
 /*
- * psmouse_set_state() sets new psmouse state and resets all flags and
- * counters while holding serio lock so fighting with interrupt handler
- * is not a concern.
- */
-
-static void psmouse_set_state(struct psmouse *psmouse, enum psmouse_state new_state)
-{
-	serio_pause_rx(psmouse->ps2dev.serio);
-	psmouse->state = new_state;
-	psmouse->pktcnt = psmouse->out_of_sync = 0;
-	psmouse->ps2dev.flags = 0;
-	serio_continue_rx(psmouse->ps2dev.serio);
-}
-
-/*
  * psmouse_activate() enables the mouse so that we get motion reports from it.
  */
 
@@ -797,6 +862,100 @@
 	psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
 }
 
+/*
+ * psmouse_poll() - default poll hanlder. Everyone except for ALPS uses it.
+ */
+
+static int psmouse_poll(struct psmouse *psmouse)
+{
+	return ps2_command(&psmouse->ps2dev, psmouse->packet,
+			   PSMOUSE_CMD_POLL | (psmouse->pktsize << 8));
+}
+
+
+/*
+ * psmouse_resync() attempts to re-validate current protocol.
+ */
+
+static void psmouse_resync(void *p)
+{
+	struct psmouse *psmouse = p, *parent = NULL;
+	struct serio *serio = psmouse->ps2dev.serio;
+	psmouse_ret_t rc = PSMOUSE_GOOD_DATA;
+	int failed = 0, enabled = 0;
+	int i;
+
+	down(&psmouse_sem);
+
+	if (psmouse->state != PSMOUSE_RESYNCING)
+		goto out;
+
+	if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) {
+		parent = serio_get_drvdata(serio->parent);
+		psmouse_deactivate(parent);
+	}
+
+/*
+ * Some mice don't ACK commands sent while they are in the middle of
+ * transmitting motion packet. To avoid delay we use ps2_sendbyte()
+ * instead of ps2_command() which would wait for 200ms for an ACK
+ * that may never come.
+ */
+	ps2_sendbyte(&psmouse->ps2dev, PSMOUSE_CMD_DISABLE, 20);
+
+/*
+ * Poll the mouse. If it was reset the packet will be shorter than
+ * psmouse->pktsize and ps2_command will fail. We do not expect and
+ * do not handle scenario when mouse "upgrades" its protocol while
+ * disconnected since it would require additional delay. If we ever
+ * see a mouse that does it we'll adjust the code.
+ */
+	if (psmouse->poll(psmouse))
+		failed = 1;
+	else {
+		psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
+		for (i = 0; i < psmouse->pktsize; i++) {
+			psmouse->pktcnt++;
+			rc = psmouse->protocol_handler(psmouse, NULL);
+			if (rc != PSMOUSE_GOOD_DATA)
+				break;
+		}
+		if (rc != PSMOUSE_FULL_PACKET)
+			failed = 1;
+		psmouse_set_state(psmouse, PSMOUSE_RESYNCING);
+	}
+
+/*
+ * Now try to enable mouse. We try to do that even if poll failed and also
+ * repeat our attempts 5 times, otherwise we may be left out with disabled
+ * mouse.
+ */
+	for (i = 0; i < 5; i++) {
+		if (!ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_ENABLE)) {
+			enabled = 1;
+			break;
+		}
+		msleep(200);
+	}
+
+	if (!enabled) {
+		printk(KERN_WARNING "psmouse.c: failed to re-enable mouse on %s\n",
+			psmouse->ps2dev.serio->phys);
+		failed = 1;
+	}
+
+	if (failed) {
+		psmouse_set_state(psmouse, PSMOUSE_IGNORE);
+		printk(KERN_INFO "psmouse.c: resync failed, issuing reconnect request\n");
+		serio_reconnect(serio);
+	} else
+		psmouse_set_state(psmouse, PSMOUSE_ACTIVATED);
+
+	if (parent)
+		psmouse_activate(parent);
+ out:
+	up(&psmouse_sem);
+}
 
 /*
  * psmouse_cleanup() resets the mouse into power-on state.
@@ -825,6 +984,11 @@
 
 	psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
 
+	/* make sure we don't have a resync in progress */
+	up(&psmouse_sem);
+	flush_workqueue(kpsmoused_wq);
+	down(&psmouse_sem);
+
 	if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) {
 		parent = serio_get_drvdata(serio->parent);
 		psmouse_deactivate(parent);
@@ -862,6 +1026,7 @@
 
 	psmouse->set_rate = psmouse_set_rate;
 	psmouse->set_resolution = psmouse_set_resolution;
+	psmouse->poll = psmouse_poll;
 	psmouse->protocol_handler = psmouse_process_byte;
 	psmouse->pktsize = 3;
 
@@ -917,6 +1082,7 @@
 		goto out;
 
 	ps2_init(&psmouse->ps2dev, serio);
+	INIT_WORK(&psmouse->resync_work, psmouse_resync, psmouse);
 	psmouse->dev = input_dev;
 	sprintf(psmouse->phys, "%s/input0", serio->phys);
 
@@ -937,6 +1103,7 @@
 	psmouse->rate = psmouse_rate;
 	psmouse->resolution = psmouse_resolution;
 	psmouse->resetafter = psmouse_resetafter;
+	psmouse->resync_time = parent ? 0 : psmouse_resync_time;
 	psmouse->smartscroll = psmouse_smartscroll;
 
 	psmouse_switch_protocol(psmouse, NULL);
@@ -1281,13 +1448,21 @@
 
 static int __init psmouse_init(void)
 {
+	kpsmoused_wq = create_singlethread_workqueue("kpsmoused");
+	if (!kpsmoused_wq) {
+		printk(KERN_ERR "psmouse: failed to create kpsmoused workqueue\n");
+		return -ENOMEM;
+	}
+
 	serio_register_driver(&psmouse_drv);
+
 	return 0;
 }
 
 static void __exit psmouse_exit(void)
 {
 	serio_unregister_driver(&psmouse_drv);
+	destroy_workqueue(kpsmoused_wq);
 }
 
 module_init(psmouse_init);
diff -urN oldtree/drivers/input/mouse/psmouse.h newtree/drivers/input/mouse/psmouse.h
--- oldtree/drivers/input/mouse/psmouse.h	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/input/mouse/psmouse.h	2006-01-29 11:18:10.817644264 +0000
@@ -7,7 +7,7 @@
 #define PSMOUSE_CMD_GETINFO	0x03e9
 #define PSMOUSE_CMD_SETSTREAM	0x00ea
 #define PSMOUSE_CMD_SETPOLL	0x00f0
-#define PSMOUSE_CMD_POLL	0x03eb
+#define PSMOUSE_CMD_POLL	0x00eb	/* caller sets number of bytes to receive */
 #define PSMOUSE_CMD_GETID	0x02f2
 #define PSMOUSE_CMD_SETRATE	0x10f3
 #define PSMOUSE_CMD_ENABLE	0x00f4
@@ -23,6 +23,7 @@
 enum psmouse_state {
 	PSMOUSE_IGNORE,
 	PSMOUSE_INITIALIZING,
+	PSMOUSE_RESYNCING,
 	PSMOUSE_CMD_MODE,
 	PSMOUSE_ACTIVATED,
 };
@@ -38,9 +39,11 @@
 	void *private;
 	struct input_dev *dev;
 	struct ps2dev ps2dev;
+	struct work_struct resync_work;
 	char *vendor;
 	char *name;
 	unsigned char packet[8];
+	unsigned char badbyte;
 	unsigned char pktcnt;
 	unsigned char pktsize;
 	unsigned char type;
@@ -54,6 +57,7 @@
 	unsigned int rate;
 	unsigned int resolution;
 	unsigned int resetafter;
+	unsigned int resync_time;
 	unsigned int smartscroll;	/* Logitech only */
 
 	psmouse_ret_t (*protocol_handler)(struct psmouse *psmouse, struct pt_regs *regs);
@@ -62,6 +66,7 @@
 
 	int (*reconnect)(struct psmouse *psmouse);
 	void (*disconnect)(struct psmouse *psmouse);
+	int (*poll)(struct psmouse *psmouse);
 
 	void (*pt_activate)(struct psmouse *psmouse);
 	void (*pt_deactivate)(struct psmouse *psmouse);
@@ -79,6 +84,7 @@
 	PSMOUSE_ALPS,
 	PSMOUSE_LIFEBOOK,
 	PSMOUSE_TRACKPOINT,
+	PSMOUSE_TOUCHKIT_PS2,
 	PSMOUSE_AUTO		/* This one should always be last */
 };
 
diff -urN oldtree/drivers/input/mouse/synaptics.c newtree/drivers/input/mouse/synaptics.c
--- oldtree/drivers/input/mouse/synaptics.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/input/mouse/synaptics.c	2006-01-29 11:18:10.817644264 +0000
@@ -652,6 +652,8 @@
 	psmouse->disconnect = synaptics_disconnect;
 	psmouse->reconnect = synaptics_reconnect;
 	psmouse->pktsize = 6;
+	/* Synaptics can usually stay in sync without extra help */
+	psmouse->resync_time = 0;
 
 	if (SYN_CAP_PASS_THROUGH(priv->capabilities))
 		synaptics_pt_create(psmouse);
diff -urN oldtree/drivers/input/mouse/touchkit_ps2.c newtree/drivers/input/mouse/touchkit_ps2.c
--- oldtree/drivers/input/mouse/touchkit_ps2.c	1970-01-01 00:00:00.000000000 +0000
+++ newtree/drivers/input/mouse/touchkit_ps2.c	2006-01-29 11:18:10.818644112 +0000
@@ -0,0 +1,195 @@
+/* ----------------------------------------------------------------------------
+ * touchkit_ps2.c  --  Driver for eGalax TouchKit PS/2 Touchscreens
+ *
+ * Copyright (C) 2005 by Stefan Lucke
+ * Copyright (C) 2004 by Daniel Ritz
+ * Copyright (C) by Todd E. Johnson (mtouchusb.c)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Based upon touchkitusb.c
+ *
+ * Vendor documentation is available in support section of:
+ * http://www.egalax.com.tw/
+ *
+ */
+
+/*
+ * Changelog:
+ *
+ * 2005-10-14:	whitespace & indentaion cleanup
+ *		touchkit commands defined
+ * initial version 0.1
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include <linux/input.h>
+#include <linux/serio.h>
+#include <linux/libps2.h>
+#include <linux/dmi.h>
+
+#include "psmouse.h"
+#include "touchkit_ps2.h"
+
+#define TOUCHKIT_MIN_XC			0x0
+#define TOUCHKIT_MAX_XC			0x07ff
+#define TOUCHKIT_XC_FUZZ		0x0
+#define TOUCHKIT_XC_FLAT		0x0
+#define TOUCHKIT_MIN_YC			0x0
+#define TOUCHKIT_MAX_YC			0x07ff
+#define TOUCHKIT_YC_FUZZ		0x0
+#define TOUCHKIT_YC_FLAT		0x0
+#define TOUCHKIT_REPORT_DATA_SIZE	8
+
+#define TOUCHKIT_CMD			0x0A
+#define TOUCHKIT_CMD_LENGTH		1
+
+#define TOUCHKIT_CMD_ACTIVE		'A'
+#define TOUCHKIT_CMD_FIRMWARE_VERSION	'D'
+#define TOUCHKIT_CMD_CONTROLLER_TYPE	'E'
+
+#define TOUCHKIT_SEND_PARMS(s,r,c)	((s) << 12 | (r) << 8 | (c))
+
+#define TOUCHKIT_DOWN			0x01
+#define TOUCHKIT_POINT_TOUCH		0x81
+#define TOUCHKIT_POINT_NOTOUCH		0x80
+
+#define TOUCHKIT_GET_TOUCHED(dat)	((((dat)[0]) & TOUCHKIT_DOWN) ? 1 : 0)
+#define TOUCHKIT_GET_X(dat)		(((dat)[1] << 7) | (dat)[2])
+#define TOUCHKIT_GET_Y(dat)		(((dat)[3] << 7) | (dat)[4])
+
+#define DRIVER_VERSION			"v0.2"
+#define DRIVER_AUTHOR			"Stefan Lucke <stefan@lucke.in-berlin.de>"
+#define DRIVER_DESC			"eGalax TouchKit PS/2 Touchscreen Driver"
+
+static int xyswap = 0;
+module_param(xyswap, bool, 0644);
+MODULE_PARM_DESC(xyswap, "If set X and Y axes are swapped.");
+
+static int  xinvert = 0;
+module_param(xinvert, bool, 0644);
+MODULE_PARM_DESC(xinvert, "Invert direction of x axis.");
+
+static int  yinvert = 0;
+module_param(yinvert, bool, 0644);
+MODULE_PARM_DESC(yinvert, "Invert direction of y axis.");
+
+static int  xfuzz = 0;
+module_param(xfuzz, uint, 0644);
+MODULE_PARM_DESC(xinvert, "Fuzz value for x axis.");
+
+static int  yfuzz = 0;
+module_param(yfuzz, uint, 0644);
+MODULE_PARM_DESC(yfuzz, "Fuzz value for y axis.");
+
+static int  smartpad = 0;
+module_param(smartpad, bool, 0644);
+MODULE_PARM_DESC(smartpad, "Act as a smartpad device.");
+
+static int  mouse = 0;
+module_param(mouse, bool, 0644);
+MODULE_PARM_DESC(mouse, "Report mouse button");
+
+static psmouse_ret_t touchkit_ps2_process_byte(struct psmouse *psmouse, struct pt_regs *regs)
+{
+	unsigned char 		*packet = psmouse->packet;
+	struct input_dev 	*dev = psmouse->dev;
+	int 			x,y;
+
+	if (psmouse->pktcnt != 5)
+		return PSMOUSE_GOOD_DATA;
+
+	input_regs(dev, regs);
+
+	if (xyswap) {
+		y = TOUCHKIT_GET_X(packet);
+		x = TOUCHKIT_GET_Y(packet);
+	} else {
+		x = TOUCHKIT_GET_X(packet);
+		y = TOUCHKIT_GET_Y(packet);
+	}
+
+	y = (yinvert) ? TOUCHKIT_MAX_YC - y : y;
+	x = (xinvert) ? TOUCHKIT_MAX_XC - x : x;
+
+	input_report_key(dev,
+			 (mouse) ? BTN_MOUSE : BTN_TOUCH,
+			 TOUCHKIT_GET_TOUCHED(packet));
+
+	if (smartpad)
+		input_report_key(dev, BTN_TOOL_FINGER, 1);
+
+	input_report_abs(dev, ABS_X, x);
+	input_report_abs(dev, ABS_Y, y);
+
+	input_sync(dev);
+
+	return PSMOUSE_FULL_PACKET;
+}
+
+int touchkit_ps2_detect(struct psmouse *psmouse, int set_properties)
+{
+	unsigned char	param[3];
+	int		command;
+
+	param[0] = TOUCHKIT_CMD_LENGTH;
+	param[1] = TOUCHKIT_CMD_ACTIVE;
+	command = TOUCHKIT_SEND_PARMS(2, 3, TOUCHKIT_CMD);
+
+	if (ps2_command(&psmouse->ps2dev, param, command) == 0 &&
+	    param[0] == TOUCHKIT_CMD &&
+	    param[1] == 0x01 &&
+	    param[2] == TOUCHKIT_CMD_ACTIVE){
+		printk(KERN_INFO "touchkit_ps2: device detected\n");
+		if (set_properties) {
+			psmouse->vendor = "eGalax";
+			psmouse->name = "Touchscreen";
+		}
+		return 0;
+	}
+	return -1;
+}
+
+int touchkit_ps2_init(struct psmouse *psmouse)
+{
+	psmouse->dev->evbit[0] = BIT(EV_KEY) | BIT(EV_ABS);
+
+	set_bit((mouse) ? BTN_MOUSE : BTN_TOUCH,psmouse->dev->keybit);
+	if (smartpad)
+		set_bit(BTN_TOOL_FINGER,psmouse->dev->keybit);
+
+	psmouse->dev->absbit[0] = BIT(ABS_X) | BIT(ABS_Y);
+
+	/* Used to Scale Compensated Data */
+	psmouse->dev->absmin[ABS_X] = TOUCHKIT_MIN_XC;
+	psmouse->dev->absmax[ABS_X] = TOUCHKIT_MAX_XC;
+	psmouse->dev->absfuzz[ABS_X] = xfuzz;
+	psmouse->dev->absflat[ABS_X] = TOUCHKIT_XC_FLAT;
+	psmouse->dev->absmin[ABS_Y] = TOUCHKIT_MIN_YC;
+	psmouse->dev->absmax[ABS_Y] = TOUCHKIT_MAX_YC;
+	psmouse->dev->absfuzz[ABS_Y] = yfuzz;
+	psmouse->dev->absflat[ABS_Y] = TOUCHKIT_YC_FLAT;
+
+	input_set_abs_params(psmouse->dev, ABS_X, 0, 0x07ff, xfuzz, 0);
+	input_set_abs_params(psmouse->dev, ABS_Y, 0, 0x07ff, yfuzz, 0);
+
+	psmouse->protocol_handler = touchkit_ps2_process_byte;
+	psmouse->pktsize = 5;
+
+	return 0;
+}
diff -urN oldtree/drivers/input/mouse/touchkit_ps2.h newtree/drivers/input/mouse/touchkit_ps2.h
--- oldtree/drivers/input/mouse/touchkit_ps2.h	1970-01-01 00:00:00.000000000 +0000
+++ newtree/drivers/input/mouse/touchkit_ps2.h	2006-01-29 11:18:10.818644112 +0000
@@ -0,0 +1,18 @@
+/* ----------------------------------------------------------------------------
+ * touchkit_ps2.h  --  Driver for eGalax TouchKit PS/2 Touchscreens
+ *
+ * Copyright (C) 2005 by Stefan Lucke
+ * Copyright (c) 2005 Vojtech Pavlik
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _TOUCHKIT_PS2_H
+#define _TOUCHKIT_PS2_H
+
+int touchkit_ps2_detect(struct psmouse *psmouse, int set_properties);
+int touchkit_ps2_init(struct psmouse *psmouse);
+
+#endif
diff -urN oldtree/drivers/mtd/chips/Kconfig newtree/drivers/mtd/chips/Kconfig
--- oldtree/drivers/mtd/chips/Kconfig	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/mtd/chips/Kconfig	2006-01-29 11:18:20.614154968 +0000
@@ -259,7 +259,7 @@
 	  with this driver will return -ENODEV upon access.
 
 config MTD_OBSOLETE_CHIPS
-	depends on MTD && BROKEN
+	depends on MTD
 	bool "Older (theoretically obsoleted now) drivers for non-CFI chips"
 	help
 	  This option does not enable any code directly, but will allow you to
@@ -272,7 +272,7 @@
 
 config MTD_AMDSTD
 	tristate "AMD compatible flash chip support (non-CFI)"
-	depends on MTD && MTD_OBSOLETE_CHIPS
+	depends on MTD && MTD_OBSOLETE_CHIPS && BROKEN
 	help
 	  This option enables support for flash chips using AMD-compatible
 	  commands, including some which are not CFI-compatible and hence
@@ -290,7 +290,7 @@
 
 config MTD_JEDEC
 	tristate "JEDEC device support"
-	depends on MTD && MTD_OBSOLETE_CHIPS
+	depends on MTD && MTD_OBSOLETE_CHIPS && BROKEN
 	help
 	  Enable older older JEDEC flash interface devices for self
 	  programming flash.  It is commonly used in older AMD chips.  It is
diff -urN oldtree/drivers/net/3c59x.c newtree/drivers/net/3c59x.c
--- oldtree/drivers/net/3c59x.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/net/3c59x.c	2006-01-29 11:18:20.540166216 +0000
@@ -789,7 +789,7 @@
 	int options;						/* User-settable misc. driver options. */
 	unsigned int media_override:4, 		/* Passed-in media type. */
 		default_media:4,				/* Read from the EEPROM/Wn3_Config. */
-		full_duplex:1, force_fd:1, autoselect:1,
+		full_duplex:1, autoselect:1,
 		bus_master:1,					/* Vortex can only do a fragment bus-m. */
 		full_bus_master_tx:1, full_bus_master_rx:2, /* Boomerang  */
 		flow_ctrl:1,					/* Use 802.3x flow control (PAUSE only) */
@@ -1326,7 +1326,7 @@
 			vp->enable_wol = 1;
 	}
 
-	vp->force_fd = vp->full_duplex;
+	vp->mii.force_media = vp->full_duplex;
 	vp->options = option;
 	/* Read the station address from the EEPROM. */
 	EL3WINDOW(0);
@@ -1616,6 +1616,48 @@
 }
 
 static void
+vortex_set_duplex(struct net_device *dev)
+{
+	int mii_reg1, mii_reg5;
+	struct vortex_private *vp = netdev_priv(dev);
+	long *ioaddr = vp->ioaddr;
+
+	EL3WINDOW(4);
+	mii_reg1 = mdio_read(dev, vp->phys[0], 1);
+	mii_reg5 = mdio_read(dev, vp->phys[0], 5);
+	vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0);
+	EL3WINDOW(3);
+	if (vortex_debug > 1)
+		printk(KERN_INFO "%s: MII #%d status %4.4x, link partner capability %4.4x,"
+			   " info1 %04x, setting %s-duplex.\n",
+				dev->name, vp->phys[0],
+				mii_reg1, mii_reg5,
+				vp->info1, ((vp->info1 & 0x8000) || vp->full_duplex) ? "full" : "half");
+	/* Set the full-duplex bit. */
+	iowrite16(	((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) |
+		 	(vp->large_frames ? 0x40 : 0) |
+			((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 0x100 : 0),
+			ioaddr + Wn3_MAC_Ctrl);
+	/* AKPM: bug: should reset Tx and Rx after setting Duplex.  Page 180 */
+}
+
+static void
+vortex_check_media(struct net_device *dev, unsigned int init)
+{
+	struct vortex_private *vp = netdev_priv(dev);
+	unsigned int ok_to_print = 0;
+
+	if (vortex_debug > 3)
+		ok_to_print = 1;
+
+	if (mii_check_media(&vp->mii, ok_to_print, init)) {
+		vp->full_duplex = vp->mii.full_duplex;
+		vortex_set_duplex(dev);
+	} else if (init)
+		vortex_set_duplex(dev);
+}
+
+static void
 vortex_up(struct net_device *dev)
 {
 	struct vortex_private *vp = netdev_priv(dev);
@@ -1675,55 +1717,15 @@
 		printk(KERN_DEBUG "%s: Initial media type %s.\n",
 			   dev->name, media_tbl[dev->if_port].name);
 
-	vp->full_duplex = vp->force_fd;
+	vp->full_duplex = vp->mii.force_media;
 	config = BFINS(config, dev->if_port, 20, 4);
 	if (vortex_debug > 6)
 		printk(KERN_DEBUG "vortex_up(): writing 0x%x to InternalConfig\n", config);
 	iowrite32(config, ioaddr + Wn3_Config);
 
-	if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
-		int mii_reg1, mii_reg5;
-		EL3WINDOW(4);
-		/* Read BMSR (reg1) only to clear old status. */
-		mii_reg1 = mdio_read(dev, vp->phys[0], MII_BMSR);
-		mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA);
-		if (mii_reg5 == 0xffff  ||  mii_reg5 == 0x0000) {
-			netif_carrier_off(dev); /* No MII device or no link partner report */
-		} else {
-			mii_reg5 &= vp->advertising;
-			if ((mii_reg5 & 0x0100) != 0	/* 100baseTx-FD */
-				 || (mii_reg5 & 0x00C0) == 0x0040) /* 10T-FD, but not 100-HD */
-			vp->full_duplex = 1;
-			netif_carrier_on(dev);
-		}
-		vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0);
-		if (vortex_debug > 1)
-			printk(KERN_INFO "%s: MII #%d status %4.4x, link partner capability %4.4x,"
-				   " info1 %04x, setting %s-duplex.\n",
-					dev->name, vp->phys[0],
-					mii_reg1, mii_reg5,
-					vp->info1, ((vp->info1 & 0x8000) || vp->full_duplex) ? "full" : "half");
-		EL3WINDOW(3);
-	}
-
-	/* Set the full-duplex bit. */
-	iowrite16(	((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) |
-		 	(vp->large_frames ? 0x40 : 0) |
-			((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 0x100 : 0),
-			ioaddr + Wn3_MAC_Ctrl);
-
-	if (vortex_debug > 1) {
-		printk(KERN_DEBUG "%s: vortex_up() InternalConfig %8.8x.\n",
-			dev->name, config);
-	}
-
-	issue_and_wait(dev, TxReset);
-	/*
-	 * Don't reset the PHY - that upsets autonegotiation during DHCP operations.
-	 */
-	issue_and_wait(dev, RxReset|0x04);
-
-	iowrite16(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
+	netif_carrier_off(dev);
+	if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY)
+		vortex_check_media(dev, 1);
 
 	if (vortex_debug > 1) {
 		EL3WINDOW(4);
@@ -1883,7 +1885,7 @@
 	void __iomem *ioaddr = vp->ioaddr;
 	int next_tick = 60*HZ;
 	int ok = 0;
-	int media_status, mii_status, old_window;
+	int media_status, old_window;
 
 	if (vortex_debug > 2) {
 		printk(KERN_DEBUG "%s: Media selection timer tick happened, %s.\n",
@@ -1891,8 +1893,6 @@
 		printk(KERN_DEBUG "dev->watchdog_timeo=%d\n", dev->watchdog_timeo);
 	}
 
-	if (vp->medialock)
-		goto leave_media_alone;
 	disable_irq(dev->irq);
 	old_window = ioread16(ioaddr + EL3_CMD) >> 13;
 	EL3WINDOW(4);
@@ -1915,44 +1915,9 @@
 		break;
 	case XCVR_MII: case XCVR_NWAY:
 		{
-			spin_lock_bh(&vp->lock);
-			mii_status = mdio_read(dev, vp->phys[0], MII_BMSR);
-			if (!(mii_status & BMSR_LSTATUS)) {
-				/* Re-read to get actual link status */
-				mii_status = mdio_read(dev, vp->phys[0], MII_BMSR);
-			}
 			ok = 1;
-			if (vortex_debug > 2)
-				printk(KERN_DEBUG "%s: MII transceiver has status %4.4x.\n",
-					dev->name, mii_status);
-			if (mii_status & BMSR_LSTATUS) {
-				int mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA);
-				if (! vp->force_fd  &&  mii_reg5 != 0xffff) {
-					int duplex;
-
-					mii_reg5 &= vp->advertising;
-					duplex = (mii_reg5&0x0100) || (mii_reg5 & 0x01C0) == 0x0040;
-					if (vp->full_duplex != duplex) {
-						vp->full_duplex = duplex;
-						printk(KERN_INFO "%s: Setting %s-duplex based on MII "
-							"#%d link partner capability of %4.4x.\n",
-							dev->name, vp->full_duplex ? "full" : "half",
-							vp->phys[0], mii_reg5);
-						/* Set the full-duplex bit. */
-						EL3WINDOW(3);
-						iowrite16(	(vp->full_duplex ? 0x20 : 0) |
-								(vp->large_frames ? 0x40 : 0) |
-								((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 0x100 : 0),
-								ioaddr + Wn3_MAC_Ctrl);
-						if (vortex_debug > 1)
-							printk(KERN_DEBUG "Setting duplex in Wn3_MAC_Ctrl\n");
-						/* AKPM: bug: should reset Tx and Rx after setting Duplex.  Page 180 */
-					}
-				}
-				netif_carrier_on(dev);
-			} else {
-				netif_carrier_off(dev);
-			}
+			spin_lock_bh(&vp->lock);
+			vortex_check_media(dev, 0);
 			spin_unlock_bh(&vp->lock);
 		}
 		break;
@@ -1962,7 +1927,14 @@
 				 dev->name, media_tbl[dev->if_port].name, media_status);
 		ok = 1;
 	}
-	if ( ! ok) {
+
+	if (!netif_carrier_ok(dev))
+		next_tick = 5*HZ;
+
+	if (vp->medialock)
+		goto leave_media_alone;
+
+	if (!ok) {
 		unsigned int config;
 
 		do {
@@ -1995,10 +1967,10 @@
 			printk(KERN_DEBUG "wrote 0x%08x to Wn3_Config\n", config);
 		/* AKPM: FIXME: Should reset Rx & Tx here.  P60 of 3c90xc.pdf */
 	}
-	EL3WINDOW(old_window);
-	enable_irq(dev->irq);
 
 leave_media_alone:
+	EL3WINDOW(old_window);
+	enable_irq(dev->irq);
 	if (vortex_debug > 2)
 	  printk(KERN_DEBUG "%s: Media selection timer finished, %s.\n",
 			 dev->name, media_tbl[dev->if_port].name);
@@ -2767,6 +2739,8 @@
 	del_timer_sync(&vp->rx_oom_timer);
 	del_timer_sync(&vp->timer);
 
+	netif_carrier_off(dev);
+
 	/* Turn off statistics ASAP.  We update vp->stats below. */
 	iowrite16(StatsDisable, ioaddr + EL3_CMD);
 
@@ -2967,20 +2941,6 @@
 	return rc;
 }
 
-static u32 vortex_get_link(struct net_device *dev)
-{
-	struct vortex_private *vp = netdev_priv(dev);
-	void __iomem *ioaddr = vp->ioaddr;
-	unsigned long flags;
-	int rc;
-
-	spin_lock_irqsave(&vp->lock, flags);
-	EL3WINDOW(4);
-	rc = mii_link_ok(&vp->mii);
-	spin_unlock_irqrestore(&vp->lock, flags);
-	return rc;
-}
-
 static int vortex_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
 	struct vortex_private *vp = netdev_priv(dev);
@@ -3080,7 +3040,7 @@
 	.get_stats_count        = vortex_get_stats_count,
 	.get_settings           = vortex_get_settings,
 	.set_settings           = vortex_set_settings,
-	.get_link               = vortex_get_link,
+	.get_link               = ethtool_op_get_link,
 	.nway_reset             = vortex_nway_reset,
 	.get_perm_addr			= ethtool_op_get_perm_addr,
 };
diff -urN oldtree/drivers/net/8139cp.c newtree/drivers/net/8139cp.c
--- oldtree/drivers/net/8139cp.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/net/8139cp.c	2006-01-29 11:18:20.541166064 +0000
@@ -1196,20 +1196,11 @@
 
 	cp_init_hw(cp);
 
-	rc = request_irq(dev->irq, cp_interrupt, SA_SHIRQ, dev->name, dev);
-	if (rc)
-		goto err_out_hw;
-
 	netif_carrier_off(dev);
 	mii_check_media(&cp->mii_if, netif_msg_link(cp), TRUE);
 	netif_start_queue(dev);
 
 	return 0;
-
-err_out_hw:
-	cp_stop_hw(cp);
-	cp_free_rings(cp);
-	return rc;
 }
 
 static int cp_close (struct net_device *dev)
@@ -1230,7 +1221,6 @@
 	spin_unlock_irqrestore(&cp->lock, flags);
 
 	synchronize_irq(dev->irq);
-	free_irq(dev->irq, dev);
 
 	cp_free_rings(cp);
 	return 0;
@@ -1814,6 +1804,10 @@
 	if (rc)
 		goto err_out_iomap;
 
+	rc = request_irq(dev->irq, cp_interrupt, SA_SHIRQ, dev->name, dev);
+	if (rc)
+		goto err_out_unreg;
+
 	printk (KERN_INFO "%s: RTL-8139C+ at 0x%lx, "
 		"%02x:%02x:%02x:%02x:%02x:%02x, "
 		"IRQ %d\n",
@@ -1833,6 +1827,8 @@
 
 	return 0;
 
+err_out_unreg:
+	unregister_netdev(dev);
 err_out_iomap:
 	iounmap(regs);
 err_out_res:
@@ -1853,6 +1849,7 @@
 
 	if (!dev)
 		BUG();
+	free_irq(dev->irq, dev);
 	unregister_netdev(dev);
 	iounmap(cp->regs);
 	if (cp->wol_enabled) pci_set_power_state (pdev, PCI_D0);
diff -urN oldtree/drivers/net/mii.c newtree/drivers/net/mii.c
--- oldtree/drivers/net/mii.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/net/mii.c	2006-01-29 11:18:20.620154056 +0000
@@ -267,8 +267,10 @@
 	int lpa2 = 0;
 
 	/* if forced media, go no further */
-	if (mii->force_media)
+	if (mii->force_media) {
+		netif_carrier_on(mii->dev);
 		return 0; /* duplex did not change */
+	}
 
 	/* check current and old link status */
 	old_carrier = netif_carrier_ok(mii->dev) ? 1 : 0;
diff -urN oldtree/drivers/net/sk98lin/h/skaddr.h newtree/drivers/net/sk98lin/h/skaddr.h
--- oldtree/drivers/net/sk98lin/h/skaddr.h	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/net/sk98lin/h/skaddr.h	2006-01-29 11:18:20.546165304 +0000
@@ -236,18 +236,6 @@
 	SK_U32	PortNumber,
 	int	Flags);
 
-extern	int	SkAddrXmacMcClear(
-	SK_AC	*pAC,
-	SK_IOC	IoC,
-	SK_U32	PortNumber,
-	int	Flags);
-
-extern	int	SkAddrGmacMcClear(
-	SK_AC	*pAC,
-	SK_IOC	IoC,
-	SK_U32	PortNumber,
-	int	Flags);
-
 extern	int	SkAddrMcAdd(
 	SK_AC		*pAC,
 	SK_IOC		IoC,
@@ -255,35 +243,11 @@
 	SK_MAC_ADDR	*pMc,
 	int		Flags);
 
-extern	int	SkAddrXmacMcAdd(
-	SK_AC		*pAC,
-	SK_IOC		IoC,
-	SK_U32		PortNumber,
-	SK_MAC_ADDR	*pMc,
-	int		Flags);
-
-extern	int	SkAddrGmacMcAdd(
-	SK_AC		*pAC,
-	SK_IOC		IoC,
-	SK_U32		PortNumber,
-	SK_MAC_ADDR	*pMc,
-	int		Flags);
-
 extern	int	SkAddrMcUpdate(
 	SK_AC	*pAC,
 	SK_IOC	IoC,
 	SK_U32	PortNumber);
 
-extern	int	SkAddrXmacMcUpdate(
-	SK_AC	*pAC,
-	SK_IOC	IoC,
-	SK_U32	PortNumber);
-
-extern	int	SkAddrGmacMcUpdate(
-	SK_AC	*pAC,
-	SK_IOC	IoC,
-	SK_U32	PortNumber);
-
 extern	int	SkAddrOverride(
 	SK_AC		*pAC,
 	SK_IOC		IoC,
@@ -297,18 +261,6 @@
 	SK_U32	PortNumber,
 	int	NewPromMode);
 
-extern	int	SkAddrXmacPromiscuousChange(
-	SK_AC	*pAC,
-	SK_IOC	IoC,
-	SK_U32	PortNumber,
-	int	NewPromMode);
-
-extern	int	SkAddrGmacPromiscuousChange(
-	SK_AC	*pAC,
-	SK_IOC	IoC,
-	SK_U32	PortNumber,
-	int	NewPromMode);	
-
 #ifndef SK_SLIM
 extern	int	SkAddrSwap(
 	SK_AC	*pAC,
diff -urN oldtree/drivers/net/sk98lin/h/skcsum.h newtree/drivers/net/sk98lin/h/skcsum.h
--- oldtree/drivers/net/sk98lin/h/skcsum.h	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/net/sk98lin/h/skcsum.h	2006-01-29 11:18:20.547165152 +0000
@@ -203,12 +203,6 @@
 	unsigned	Checksum2,
 	int			NetNumber);
 
-extern void SkCsGetSendInfo(
-	SK_AC				*pAc,
-	void				*pIpHeader,
-	SKCS_PACKET_INFO	*pPacketInfo,
-	int					NetNumber);
-
 extern void SkCsSetReceiveFlags(
 	SK_AC		*pAc,
 	unsigned	ReceiveFlags,
diff -urN oldtree/drivers/net/sk98lin/h/skgeinit.h newtree/drivers/net/sk98lin/h/skgeinit.h
--- oldtree/drivers/net/sk98lin/h/skgeinit.h	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/net/sk98lin/h/skgeinit.h	2006-01-29 11:18:20.547165152 +0000
@@ -464,12 +464,6 @@
 /*
  * public functions in skgeinit.c
  */
-extern void	SkGePollRxD(
-	SK_AC	*pAC,
-	SK_IOC	IoC,
-	int		Port,
-	SK_BOOL	PollRxD);
-
 extern void	SkGePollTxD(
 	SK_AC	*pAC,
 	SK_IOC	IoC,
@@ -522,10 +516,6 @@
 	int		Led,
 	int		Mode);
 
-extern void	SkGeInitRamIface(
-	SK_AC	*pAC,
-	SK_IOC	IoC);
-
 extern int	SkGeInitAssignRamToQueues(
 	SK_AC	*pAC,
 	int		ActivePort,
@@ -549,11 +539,6 @@
 	SK_IOC	IoC,
 	int		Port);
 
-extern void	SkMacClearRst(
-	SK_AC	*pAC,
-	SK_IOC	IoC,
-	int		Port);
-
 extern void	SkXmInitMac(
 	SK_AC	*pAC,
 	SK_IOC	IoC,
@@ -580,11 +565,6 @@
 	SK_IOC	IoC,
 	int		Port);
 
-extern void	SkMacFlushRxFifo(
-	SK_AC	*pAC,
-	SK_IOC	IoC,
-	int		Port);
-
 extern void	SkMacIrq(
 	SK_AC	*pAC,
 	SK_IOC	IoC,
@@ -601,12 +581,6 @@
 	int		Port,
 	SK_U16	IStatus);
 
-extern void  SkMacSetRxTxEn(
-	SK_AC	*pAC,
-	SK_IOC	IoC,
-	int		Port,
-	int		Para);
-
 extern int  SkMacRxTxEnable(
 	SK_AC	*pAC,
 	SK_IOC	IoC,
@@ -659,16 +633,6 @@
 	int		StartNum,
 	int		StopNum);
 
-extern void	SkXmInitDupMd(
-	SK_AC	*pAC,
-	SK_IOC	IoC,
-	int		Port);
-
-extern void	SkXmInitPauseMd(
-	SK_AC	*pAC,
-	SK_IOC	IoC,
-	int		Port);
-
 extern void	SkXmAutoNegLipaXmac(
 	SK_AC	*pAC,
 	SK_IOC	IoC,
@@ -729,17 +693,6 @@
 	int		Port,
 	SK_BOOL	StartTest);
 
-extern int SkGmEnterLowPowerMode(
-	SK_AC	*pAC,
-	SK_IOC	IoC,
-	int		Port,
-	SK_U8	Mode);
-
-extern int SkGmLeaveLowPowerMode(
-	SK_AC	*pAC,
-	SK_IOC	IoC,
-	int		Port);
-
 #ifdef SK_DIAG
 extern void	SkGePhyRead(
 	SK_AC	*pAC,
@@ -782,7 +735,6 @@
 /*
  * public functions in skgeinit.c
  */
-extern void	SkGePollRxD();
 extern void	SkGePollTxD();
 extern void	SkGeYellowLED();
 extern int	SkGeCfgSync();
@@ -792,7 +744,6 @@
 extern void	SkGeDeInit();
 extern int	SkGeInitPort();
 extern void	SkGeXmitLED();
-extern void	SkGeInitRamIface();
 extern int	SkGeInitAssignRamToQueues();
 
 /*
@@ -801,18 +752,15 @@
 extern void SkMacRxTxDisable();
 extern void	SkMacSoftRst();
 extern void	SkMacHardRst();
-extern void	SkMacClearRst();
 extern void SkMacInitPhy();
 extern int  SkMacRxTxEnable();
 extern void SkMacPromiscMode();
 extern void SkMacHashing();
 extern void SkMacIrqDisable();
 extern void	SkMacFlushTxFifo();
-extern void	SkMacFlushRxFifo();
 extern void	SkMacIrq();
 extern int	SkMacAutoNegDone();
 extern void	SkMacAutoNegLipaPhy();
-extern void SkMacSetRxTxEn();
 extern void	SkXmInitMac();
 extern void	SkXmPhyRead();
 extern void	SkXmPhyWrite();
@@ -820,8 +768,6 @@
 extern void	SkGmPhyRead();
 extern void	SkGmPhyWrite();
 extern void	SkXmClrExactAddr();
-extern void	SkXmInitDupMd();
-extern void	SkXmInitPauseMd();
 extern void	SkXmAutoNegLipaXmac();
 extern int	SkXmUpdateStats();
 extern int	SkGmUpdateStats();
@@ -832,8 +778,6 @@
 extern int	SkXmOverflowStatus();
 extern int	SkGmOverflowStatus();
 extern int	SkGmCableDiagStatus();
-extern int	SkGmEnterLowPowerMode();
-extern int	SkGmLeaveLowPowerMode();
 
 #ifdef SK_DIAG
 extern void	SkGePhyRead();
diff -urN oldtree/drivers/net/sk98lin/h/skgepnmi.h newtree/drivers/net/sk98lin/h/skgepnmi.h
--- oldtree/drivers/net/sk98lin/h/skgepnmi.h	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/net/sk98lin/h/skgepnmi.h	2006-01-29 11:18:20.548165000 +0000
@@ -946,10 +946,6 @@
  * Function prototypes
  */
 extern int SkPnmiInit(SK_AC *pAC, SK_IOC IoC, int Level);
-extern int SkPnmiGetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void* pBuf,
-	unsigned int* pLen, SK_U32 Instance, SK_U32 NetIndex);
-extern int SkPnmiPreSetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id,
-	void* pBuf, unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex);
 extern int SkPnmiSetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void* pBuf,
 	unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex);
 extern int SkPnmiGetStruct(SK_AC *pAC, SK_IOC IoC, void* pBuf,
diff -urN oldtree/drivers/net/sk98lin/h/skgesirq.h newtree/drivers/net/sk98lin/h/skgesirq.h
--- oldtree/drivers/net/sk98lin/h/skgesirq.h	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/net/sk98lin/h/skgesirq.h	2006-01-29 11:18:20.548165000 +0000
@@ -105,7 +105,6 @@
 
 extern void SkGeSirqIsr(SK_AC *pAC, SK_IOC IoC, SK_U32 Istatus);
 extern int  SkGeSirqEvent(SK_AC *pAC, SK_IOC IoC, SK_U32 Event, SK_EVPARA Para);
-extern void SkHWLinkUp(SK_AC *pAC, SK_IOC IoC, int Port);
 extern void SkHWLinkDown(SK_AC *pAC, SK_IOC IoC, int Port);
 
 #endif	/* _INC_SKGESIRQ_H_ */
diff -urN oldtree/drivers/net/sk98lin/h/ski2c.h newtree/drivers/net/sk98lin/h/ski2c.h
--- oldtree/drivers/net/sk98lin/h/ski2c.h	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/net/sk98lin/h/ski2c.h	2006-01-29 11:18:20.549164848 +0000
@@ -162,9 +162,6 @@
 } SK_I2C;
 
 extern int SkI2cInit(SK_AC *pAC, SK_IOC IoC, int Level);
-extern int SkI2cWrite(SK_AC *pAC, SK_IOC IoC, SK_U32 Data, int Dev, int Size,
-					   int Reg, int Burst);
-extern int SkI2cReadSensor(SK_AC *pAC, SK_IOC IoC, SK_SENSOR *pSen);
 #ifdef SK_DIAG
 extern	SK_U32 SkI2cRead(SK_AC *pAC, SK_IOC IoC, int Dev, int Size, int Reg,
 						 int Burst);
diff -urN oldtree/drivers/net/sk98lin/h/skvpd.h newtree/drivers/net/sk98lin/h/skvpd.h
--- oldtree/drivers/net/sk98lin/h/skvpd.h	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/net/sk98lin/h/skvpd.h	2006-01-29 11:18:20.549164848 +0000
@@ -191,14 +191,6 @@
 	int			addr);
 #endif	/* SKDIAG */
 
-extern int	VpdSetupPara(
-	SK_AC		*pAC,
-	const char	*key,
-	const char	*buf,
-	int			len,
-	int			type,
-	int			op);
-
 extern SK_VPD_STATUS	*VpdStat(
 	SK_AC		*pAC,
 	SK_IOC		IoC);
@@ -235,11 +227,6 @@
 	SK_AC		*pAC,
 	SK_IOC		IoC);
 
-extern void	VpdErrLog(
-	SK_AC		*pAC,
-	SK_IOC		IoC,
-	char		*msg);
-
 #ifdef	SKDIAG
 extern int	VpdReadBlock(
 	SK_AC		*pAC,
@@ -257,7 +244,6 @@
 #endif	/* SKDIAG */
 #else	/* SK_KR_PROTO */
 extern SK_U32	VpdReadDWord();
-extern int	VpdSetupPara();
 extern SK_VPD_STATUS	*VpdStat();
 extern int	VpdKeys();
 extern int	VpdRead();
@@ -265,7 +251,6 @@
 extern int	VpdWrite();
 extern int	VpdDelete();
 extern int	VpdUpdate();
-extern void	VpdErrLog();
 #endif	/* SK_KR_PROTO */
 
 #endif	/* __INC_SKVPD_H_ */
diff -urN oldtree/drivers/net/sk98lin/h/skvpd.h.orig newtree/drivers/net/sk98lin/h/skvpd.h.orig
--- oldtree/drivers/net/sk98lin/h/skvpd.h.orig	1970-01-01 00:00:00.000000000 +0000
+++ newtree/drivers/net/sk98lin/h/skvpd.h.orig	2006-01-28 19:08:42.000000000 +0000
@@ -0,0 +1,271 @@
+/******************************************************************************
+ *
+ * Name:	skvpd.h
+ * Project:	GEnesis, PCI Gigabit Ethernet Adapter
+ * Version:	$Revision: 1.15 $
+ * Date:	$Date: 2003/01/13 10:39:38 $
+ * Purpose:	Defines and Macros for VPD handling
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ *	(C)Copyright 1998-2003 SysKonnect GmbH.
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License as published by
+ *	the Free Software Foundation; either version 2 of the License, or
+ *	(at your option) any later version.
+ *
+ *	The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ * skvpd.h	contains Diagnostic specific defines for VPD handling
+ */
+
+#ifndef __INC_SKVPD_H_
+#define __INC_SKVPD_H_
+
+/*
+ * Define Resource Type Identifiers and VPD keywords
+ */
+#define	RES_ID		0x82	/* Resource Type ID String (Product Name) */
+#define RES_VPD_R	0x90	/* start of VPD read only area */
+#define RES_VPD_W	0x91	/* start of VPD read/write area */
+#define RES_END		0x78	/* Resource Type End Tag */
+
+#ifndef VPD_NAME
+#define VPD_NAME	"Name"	/* Product Name, VPD name of RES_ID */
+#endif	/* VPD_NAME */
+#define VPD_PN		"PN"	/* Adapter Part Number */
+#define	VPD_EC		"EC"	/* Adapter Engineering Level */
+#define VPD_MN		"MN"	/* Manufacture ID */
+#define VPD_SN		"SN"	/* Serial Number */
+#define VPD_CP		"CP"	/* Extended Capability */
+#define VPD_RV		"RV"	/* Checksum and Reserved */
+#define	VPD_YA		"YA"	/* Asset Tag Identifier */
+#define VPD_VL		"VL"	/* First Error Log Message (SK specific) */
+#define VPD_VF		"VF"	/* Second Error Log Message (SK specific) */
+#define VPD_RW		"RW"	/* Remaining Read / Write Area */
+
+/* 'type' values for vpd_setup_para() */
+#define VPD_RO_KEY	1	/* RO keys are "PN", "EC", "MN", "SN", "RV" */
+#define VPD_RW_KEY	2	/* RW keys are "Yx", "Vx", and "RW" */
+
+/* 'op' values for vpd_setup_para() */
+#define	ADD_KEY		1	/* add the key at the pos "RV" or "RW" */
+#define OWR_KEY		2	/* overwrite key if already exists */
+
+/*
+ * Define READ and WRITE Constants.
+ */
+
+#define VPD_DEV_ID_GENESIS 	0x4300
+
+#define	VPD_SIZE_YUKON		256
+#define	VPD_SIZE_GENESIS	512
+#define	VPD_SIZE			512
+#define VPD_READ	0x0000
+#define VPD_WRITE	0x8000
+
+#define VPD_STOP(pAC,IoC)	VPD_OUT16(pAC,IoC,PCI_VPD_ADR_REG,VPD_WRITE)
+
+#define VPD_GET_RES_LEN(p)	((unsigned int) \
+					(* (SK_U8 *)&(p)[1]) |\
+					((* (SK_U8 *)&(p)[2]) << 8))
+#define VPD_GET_VPD_LEN(p)	((unsigned int)(* (SK_U8 *)&(p)[2]))
+#define VPD_GET_VAL(p)		((char *)&(p)[3])
+
+#define VPD_MAX_LEN	50
+
+/* VPD status */
+	/* bit 7..1 reserved */
+#define VPD_VALID	(1<<0)	/* VPD data buffer, vpd_free_ro, */
+							/* and vpd_free_rw valid	 */
+
+/*
+ * VPD structs
+ */
+typedef	struct s_vpd_status {
+	unsigned short	Align01;			/* Alignment */
+	unsigned short	vpd_status;			/* VPD status, description see above */
+	int				vpd_free_ro;		/* unused bytes in read only area */
+	int				vpd_free_rw;		/* bytes available in read/write area */
+} SK_VPD_STATUS;
+
+typedef	struct s_vpd {
+	SK_VPD_STATUS	v;					/* VPD status structure */
+	char			vpd_buf[VPD_SIZE];	/* VPD buffer */
+	int				rom_size;			/* VPD ROM Size from PCI_OUR_REG_2 */
+	int				vpd_size;			/* saved VPD-size */
+} SK_VPD;
+
+typedef	struct s_vpd_para {
+	unsigned int	p_len;	/* parameter length */
+	char			*p_val;	/* points to the value */
+} SK_VPD_PARA;
+
+/*
+ * structure of Large Resource Type Identifiers
+ */
+
+/* was removed because of alignment problems */
+
+/*
+ * structure of VPD keywords
+ */
+typedef	struct s_vpd_key {
+	char			p_key[2];	/* 2 bytes ID string */
+	unsigned char	p_len;		/* 1 byte length */
+	char			p_val;		/* start of the value string */
+} SK_VPD_KEY;
+
+
+/*
+ * System specific VPD macros
+ */
+#ifndef SKDIAG
+#ifndef VPD_DO_IO
+#define VPD_OUT8(pAC,IoC,Addr,Val)	(void)SkPciWriteCfgByte(pAC,Addr,Val)
+#define VPD_OUT16(pAC,IoC,Addr,Val)	(void)SkPciWriteCfgWord(pAC,Addr,Val)
+#define VPD_OUT32(pAC,IoC,Addr,Val)	(void)SkPciWriteCfgDWord(pAC,Addr,Val)
+#define VPD_IN8(pAC,IoC,Addr,pVal)	(void)SkPciReadCfgByte(pAC,Addr,pVal)
+#define VPD_IN16(pAC,IoC,Addr,pVal)	(void)SkPciReadCfgWord(pAC,Addr,pVal)
+#define VPD_IN32(pAC,IoC,Addr,pVal)	(void)SkPciReadCfgDWord(pAC,Addr,pVal)
+#else	/* VPD_DO_IO */
+#define VPD_OUT8(pAC,IoC,Addr,Val)	SK_OUT8(IoC,PCI_C(Addr),Val)
+#define VPD_OUT16(pAC,IoC,Addr,Val)	SK_OUT16(IoC,PCI_C(Addr),Val)
+#define VPD_OUT32(pAC,IoC,Addr,Val)	SK_OUT32(IoC,PCI_C(Addr),Val)
+#define VPD_IN8(pAC,IoC,Addr,pVal)	SK_IN8(IoC,PCI_C(Addr),pVal)
+#define VPD_IN16(pAC,IoC,Addr,pVal)	SK_IN16(IoC,PCI_C(Addr),pVal)
+#define VPD_IN32(pAC,IoC,Addr,pVal)	SK_IN32(IoC,PCI_C(Addr),pVal)
+#endif	/* VPD_DO_IO */
+#else	/* SKDIAG */
+#define VPD_OUT8(pAC,Ioc,Addr,Val) {			\
+		if ((pAC)->DgT.DgUseCfgCycle)			\
+			SkPciWriteCfgByte(pAC,Addr,Val);	\
+		else									\
+			SK_OUT8(pAC,PCI_C(Addr),Val);		\
+		}
+#define VPD_OUT16(pAC,Ioc,Addr,Val) {			\
+		if ((pAC)->DgT.DgUseCfgCycle)			\
+			SkPciWriteCfgWord(pAC,Addr,Val);	\
+		else						\
+			SK_OUT16(pAC,PCI_C(Addr),Val);		\
+		}
+#define VPD_OUT32(pAC,Ioc,Addr,Val) {			\
+		if ((pAC)->DgT.DgUseCfgCycle)			\
+			SkPciWriteCfgDWord(pAC,Addr,Val);	\
+		else						\
+			SK_OUT32(pAC,PCI_C(Addr),Val); 		\
+		}
+#define VPD_IN8(pAC,Ioc,Addr,pVal) {			\
+		if ((pAC)->DgT.DgUseCfgCycle) 			\
+			SkPciReadCfgByte(pAC,Addr,pVal);	\
+		else						\
+			SK_IN8(pAC,PCI_C(Addr),pVal); 		\
+		}
+#define VPD_IN16(pAC,Ioc,Addr,pVal) {			\
+		if ((pAC)->DgT.DgUseCfgCycle) 			\
+			SkPciReadCfgWord(pAC,Addr,pVal);	\
+		else						\
+			SK_IN16(pAC,PCI_C(Addr),pVal); 		\
+		}
+#define VPD_IN32(pAC,Ioc,Addr,pVal) {			\
+		if ((pAC)->DgT.DgUseCfgCycle)			\
+			SkPciReadCfgDWord(pAC,Addr,pVal);	\
+		else						\
+			SK_IN32(pAC,PCI_C(Addr),pVal);		\
+		}
+#endif	/* nSKDIAG */
+
+/* function prototypes ********************************************************/
+
+#ifndef	SK_KR_PROTO
+#ifdef SKDIAG
+extern SK_U32	VpdReadDWord(
+	SK_AC		*pAC,
+	SK_IOC		IoC,
+	int			addr);
+#endif	/* SKDIAG */
+
+extern int	VpdSetupPara(
+	SK_AC		*pAC,
+	const char	*key,
+	const char	*buf,
+	int			len,
+	int			type,
+	int			op);
+
+extern SK_VPD_STATUS	*VpdStat(
+	SK_AC		*pAC,
+	SK_IOC		IoC);
+
+extern int	VpdKeys(
+	SK_AC		*pAC,
+	SK_IOC		IoC,
+	char		*buf,
+	int			*len,
+	int			*elements);
+
+extern int	VpdRead(
+	SK_AC		*pAC,
+	SK_IOC		IoC,
+	const char	*key,
+	char		*buf,
+	int			*len);
+
+extern SK_BOOL	VpdMayWrite(
+	char		*key);
+
+extern int	VpdWrite(
+	SK_AC		*pAC,
+	SK_IOC		IoC,
+	const char	*key,
+	const char	*buf);
+
+extern int	VpdDelete(
+	SK_AC		*pAC,
+	SK_IOC		IoC,
+	char		*key);
+
+extern int	VpdUpdate(
+	SK_AC		*pAC,
+	SK_IOC		IoC);
+
+extern void	VpdErrLog(
+	SK_AC		*pAC,
+	SK_IOC		IoC,
+	char		*msg);
+
+#ifdef	SKDIAG
+extern int	VpdReadBlock(
+	SK_AC		*pAC,
+	SK_IOC		IoC,
+	char		*buf,
+	int			addr,
+	int			len);
+
+extern int	VpdWriteBlock(
+	SK_AC		*pAC,
+	SK_IOC		IoC,
+	char		*buf,
+	int			addr,
+	int			len);
+#endif	/* SKDIAG */
+#else	/* SK_KR_PROTO */
+extern SK_U32	VpdReadDWord();
+extern int	VpdSetupPara();
+extern SK_VPD_STATUS	*VpdStat();
+extern int	VpdKeys();
+extern int	VpdRead();
+extern SK_BOOL	VpdMayWrite();
+extern int	VpdWrite();
+extern int	VpdDelete();
+extern int	VpdUpdate();
+extern void	VpdErrLog();
+#endif	/* SK_KR_PROTO */
+
+#endif	/* __INC_SKVPD_H_ */
diff -urN oldtree/drivers/net/sk98lin/skaddr.c newtree/drivers/net/sk98lin/skaddr.c
--- oldtree/drivers/net/sk98lin/skaddr.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/net/sk98lin/skaddr.c	2006-01-29 11:18:20.550164696 +0000
@@ -87,6 +87,21 @@
 static int	Next0[SK_MAX_MACS] = {0};
 #endif	/* DEBUG */
 
+static int SkAddrGmacMcAdd(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber,
+			   SK_MAC_ADDR *pMc, int Flags);
+static int SkAddrGmacMcClear(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber,
+			     int Flags);
+static int SkAddrGmacMcUpdate(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber);
+static int SkAddrGmacPromiscuousChange(SK_AC *pAC, SK_IOC IoC,
+				       SK_U32 PortNumber, int NewPromMode);
+static int SkAddrXmacMcAdd(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber,
+			   SK_MAC_ADDR *pMc, int Flags);
+static int SkAddrXmacMcClear(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber,
+			     int Flags);
+static int SkAddrXmacMcUpdate(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber);
+static int SkAddrXmacPromiscuousChange(SK_AC *pAC, SK_IOC IoC,
+				       SK_U32 PortNumber, int NewPromMode);
+
 /* functions ******************************************************************/
 
 /******************************************************************************
@@ -372,7 +387,7 @@
  *	SK_ADDR_SUCCESS
  *	SK_ADDR_ILLEGAL_PORT
  */
-int	SkAddrXmacMcClear(
+static int	SkAddrXmacMcClear(
 SK_AC	*pAC,		/* adapter context */
 SK_IOC	IoC,		/* I/O context */
 SK_U32	PortNumber,	/* Index of affected port */
@@ -429,7 +444,7 @@
  *	SK_ADDR_SUCCESS
  *	SK_ADDR_ILLEGAL_PORT
  */
-int	SkAddrGmacMcClear(
+static int	SkAddrGmacMcClear(
 SK_AC	*pAC,		/* adapter context */
 SK_IOC	IoC,		/* I/O context */
 SK_U32	PortNumber,	/* Index of affected port */
@@ -519,7 +534,7 @@
  * Returns:
  *	Hash value of multicast address.
  */
-SK_U32 SkXmacMcHash(
+static SK_U32 SkXmacMcHash(
 unsigned char *pMc)	/* Multicast address */
 {
 	SK_U32 Idx;
@@ -557,7 +572,7 @@
  * Returns:
  *	Hash value of multicast address.
  */
-SK_U32 SkGmacMcHash(
+static SK_U32 SkGmacMcHash(
 unsigned char *pMc)	/* Multicast address */
 {
 	SK_U32 Data;
@@ -672,7 +687,7 @@
  *	SK_MC_ILLEGAL_ADDRESS
  *	SK_MC_RLMT_OVERFLOW
  */
-int	SkAddrXmacMcAdd(
+static int	SkAddrXmacMcAdd(
 SK_AC		*pAC,		/* adapter context */
 SK_IOC		IoC,		/* I/O context */
 SK_U32		PortNumber,	/* Port Number */
@@ -778,7 +793,7 @@
  *	SK_MC_FILTERING_INEXACT
  *	SK_MC_ILLEGAL_ADDRESS
  */
-int	SkAddrGmacMcAdd(
+static int	SkAddrGmacMcAdd(
 SK_AC		*pAC,		/* adapter context */
 SK_IOC		IoC,		/* I/O context */
 SK_U32		PortNumber,	/* Port Number */
@@ -937,7 +952,7 @@
  *	SK_MC_FILTERING_INEXACT
  *	SK_ADDR_ILLEGAL_PORT
  */
-int	SkAddrXmacMcUpdate(
+static int	SkAddrXmacMcUpdate(
 SK_AC	*pAC,		/* adapter context */
 SK_IOC	IoC,		/* I/O context */
 SK_U32	PortNumber)	/* Port Number */
@@ -1082,7 +1097,7 @@
  *	SK_MC_FILTERING_INEXACT
  *	SK_ADDR_ILLEGAL_PORT
  */
-int	SkAddrGmacMcUpdate(
+static int	SkAddrGmacMcUpdate(
 SK_AC	*pAC,		/* adapter context */
 SK_IOC	IoC,		/* I/O context */
 SK_U32	PortNumber)	/* Port Number */
@@ -1468,7 +1483,7 @@
  *	SK_ADDR_SUCCESS
  *	SK_ADDR_ILLEGAL_PORT
  */
-int	SkAddrXmacPromiscuousChange(
+static int	SkAddrXmacPromiscuousChange(
 SK_AC	*pAC,			/* adapter context */
 SK_IOC	IoC,			/* I/O context */
 SK_U32	PortNumber,		/* port whose promiscuous mode changes */
@@ -1585,7 +1600,7 @@
  *	SK_ADDR_SUCCESS
  *	SK_ADDR_ILLEGAL_PORT
  */
-int	SkAddrGmacPromiscuousChange(
+static int	SkAddrGmacPromiscuousChange(
 SK_AC	*pAC,			/* adapter context */
 SK_IOC	IoC,			/* I/O context */
 SK_U32	PortNumber,		/* port whose promiscuous mode changes */
diff -urN oldtree/drivers/net/sk98lin/skgeinit.c newtree/drivers/net/sk98lin/skgeinit.c
--- oldtree/drivers/net/sk98lin/skgeinit.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/net/sk98lin/skgeinit.c	2006-01-29 11:18:20.551164544 +0000
@@ -59,34 +59,6 @@
 
 /******************************************************************************
  *
- *	SkGePollRxD() - Enable / Disable Descriptor Polling of RxD Ring
- *
- * Description:
- *	Enable or disable the descriptor polling of the receive descriptor
- *	ring (RxD) for port 'Port'.
- *	The new configuration is *not* saved over any SkGeStopPort() and
- *	SkGeInitPort() calls.
- *
- * Returns:
- *	nothing
- */
-void SkGePollRxD(
-SK_AC	*pAC,		/* adapter context */
-SK_IOC	IoC,		/* IO context */
-int		Port,		/* Port Index (MAC_1 + n) */
-SK_BOOL PollRxD)	/* SK_TRUE (enable pol.), SK_FALSE (disable pol.) */
-{
-	SK_GEPORT *pPrt;
-
-	pPrt = &pAC->GIni.GP[Port];
-
-	SK_OUT32(IoC, Q_ADDR(pPrt->PRxQOff, Q_CSR), (PollRxD) ?
-		CSR_ENA_POL : CSR_DIS_POL);
-}	/* SkGePollRxD */
-
-
-/******************************************************************************
- *
  *	SkGePollTxD() - Enable / Disable Descriptor Polling of TxD Rings
  *
  * Description:
@@ -952,7 +924,7 @@
  * Returns:
  *	nothing
  */
-void SkGeInitRamIface(
+static void SkGeInitRamIface(
 SK_AC	*pAC,		/* adapter context */
 SK_IOC	IoC)		/* IO context */
 {
@@ -1409,83 +1381,6 @@
 
 }	/* SkGeInit0*/
 
-#ifdef SK_PCI_RESET
-
-/******************************************************************************
- *
- *	SkGePciReset() - Reset PCI interface
- *
- * Description:
- *	o Read PCI configuration.
- *	o Change power state to 3.
- *	o Change power state to 0.
- *	o Restore PCI configuration.
- *
- * Returns:
- *	0:	Success.
- *	1:	Power state could not be changed to 3.
- */
-static int SkGePciReset(
-SK_AC	*pAC,		/* adapter context */
-SK_IOC	IoC)		/* IO context */
-{
-	int		i;
-	SK_U16	PmCtlSts;
-	SK_U32	Bp1;
-	SK_U32	Bp2;
-	SK_U16	PciCmd;
-	SK_U8	Cls;
-	SK_U8	Lat;
-	SK_U8	ConfigSpace[PCI_CFG_SIZE];
-
-	/*
-	 * Note: Switching to D3 state is like a software reset.
-	 *		 Switching from D3 to D0 is a hardware reset.
-	 *		 We have to save and restore the configuration space.
-	 */
-	for (i = 0; i < PCI_CFG_SIZE; i++) {
-		SkPciReadCfgDWord(pAC, i*4, &ConfigSpace[i]);
-	}
-
-	/* We know the RAM Interface Arbiter is enabled. */
-	SkPciWriteCfgWord(pAC, PCI_PM_CTL_STS, PCI_PM_STATE_D3);
-	SkPciReadCfgWord(pAC, PCI_PM_CTL_STS, &PmCtlSts);
-	
-	if ((PmCtlSts & PCI_PM_STATE_MSK) != PCI_PM_STATE_D3) {
-		return(1);
-	}
-
-	/* Return to D0 state. */
-	SkPciWriteCfgWord(pAC, PCI_PM_CTL_STS, PCI_PM_STATE_D0);
-
-	/* Check for D0 state. */
-	SkPciReadCfgWord(pAC, PCI_PM_CTL_STS, &PmCtlSts);
-	
-	if ((PmCtlSts & PCI_PM_STATE_MSK) != PCI_PM_STATE_D0) {
-		return(1);
-	}
-
-	/* Check PCI Config Registers. */
-	SkPciReadCfgWord(pAC, PCI_COMMAND, &PciCmd);
-	SkPciReadCfgByte(pAC, PCI_CACHE_LSZ, &Cls);
-	SkPciReadCfgDWord(pAC, PCI_BASE_1ST, &Bp1);
-	SkPciReadCfgDWord(pAC, PCI_BASE_2ND, &Bp2);
-	SkPciReadCfgByte(pAC, PCI_LAT_TIM, &Lat);
-	
-	if (PciCmd != 0 || Cls != (SK_U8)0 || Lat != (SK_U8)0 ||
-		(Bp1 & 0xfffffff0L) != 0 || Bp2 != 1) {
-		return(1);
-	}
-
-	/* Restore PCI Config Space. */
-	for (i = 0; i < PCI_CFG_SIZE; i++) {
-		SkPciWriteCfgDWord(pAC, i*4, ConfigSpace[i]);
-	}
-
-	return(0);
-}	/* SkGePciReset */
-
-#endif /* SK_PCI_RESET */
 
 /******************************************************************************
  *
@@ -1524,10 +1419,6 @@
 	/* save CLK_RUN bits (YUKON-Lite) */
 	SK_IN16(IoC, B0_CTST, &CtrlStat);
 
-#ifdef SK_PCI_RESET
-	(void)SkGePciReset(pAC, IoC);
-#endif /* SK_PCI_RESET */
-
 	/* do the SW-reset */
 	SK_OUT8(IoC, B0_CTST, CS_RST_SET);
 
@@ -1991,11 +1882,6 @@
 	int	i;
 	SK_U16	Word;
 
-#ifdef SK_PHY_LP_MODE
-	SK_U8	Byte;
-	SK_U16	PmCtlSts;
-#endif /* SK_PHY_LP_MODE */
-
 #if (!defined(SK_SLIM) && !defined(VCPU))
 	/* ensure I2C is ready */
 	SkI2cWaitIrq(pAC, IoC);
@@ -2010,38 +1896,6 @@
 		}
 	}
 
-#ifdef SK_PHY_LP_MODE
-    /*
-	 * for power saving purposes within mobile environments
-	 * we set the PHY to coma mode and switch to D3 power state.
-	 */
-	if (pAC->GIni.GIYukonLite &&
-		pAC->GIni.GIChipRev >= CHIP_REV_YU_LITE_A3) {
-
-		/* for all ports switch PHY to coma mode */
-		for (i = 0; i < pAC->GIni.GIMacsFound; i++) {
-			
-			SkGmEnterLowPowerMode(pAC, IoC, i, PHY_PM_DEEP_SLEEP);
-		}
-
-		if (pAC->GIni.GIVauxAvail) {
-			/* switch power to VAUX */
-			Byte = PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF;
-
-			SK_OUT8(IoC, B0_POWER_CTRL, Byte);
-		}
-		
-		/* switch to D3 state */
-		SK_IN16(IoC, PCI_C(PCI_PM_CTL_STS), &PmCtlSts);
-
-		PmCtlSts |= PCI_PM_STATE_D3;
-
-		SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_ON);
-
-		SK_OUT16(IoC, PCI_C(PCI_PM_CTL_STS), PmCtlSts);
-	}
-#endif /* SK_PHY_LP_MODE */
-
 	/* Reset all bits in the PCI STATUS register */
 	/*
 	 * Note: PCI Cfg cycles cannot be used, because they are not
diff -urN oldtree/drivers/net/sk98lin/skgemib.c newtree/drivers/net/sk98lin/skgemib.c
--- oldtree/drivers/net/sk98lin/skgemib.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/net/sk98lin/skgemib.c	2006-01-29 11:18:20.552164392 +0000
@@ -871,13 +871,6 @@
 		sizeof(SK_PNMI_CONF),
 		SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfPhyType),
 		SK_PNMI_RO, MacPrivateConf, 0},
-#ifdef SK_PHY_LP_MODE
-		{OID_SKGE_PHY_LP_MODE,
-		SK_PNMI_MAC_ENTRIES,
-		sizeof(SK_PNMI_CONF),
-		SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfPhyMode),
-		SK_PNMI_RW, MacPrivateConf, 0},
-#endif	
 	{OID_SKGE_LINK_CAP,
 		SK_PNMI_MAC_ENTRIES,
 		sizeof(SK_PNMI_CONF),
diff -urN oldtree/drivers/net/sk98lin/skgepnmi.c newtree/drivers/net/sk98lin/skgepnmi.c
--- oldtree/drivers/net/sk98lin/skgepnmi.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/net/sk98lin/skgepnmi.c	2006-01-29 11:18:20.556163784 +0000
@@ -56,10 +56,6 @@
  * Public Function prototypes
  */
 int SkPnmiInit(SK_AC *pAC, SK_IOC IoC, int level);
-int SkPnmiGetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void *pBuf,
-	unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex);
-int SkPnmiPreSetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void *pBuf,
-	unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex);
 int SkPnmiSetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void *pBuf,
 	unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex);
 int SkPnmiGetStruct(SK_AC *pAC, SK_IOC IoC, void *pBuf,
@@ -587,7 +583,7 @@
  *                           exist (e.g. port instance 3 on a two port
  *	                         adapter.
  */
-int SkPnmiGetVar(
+static int SkPnmiGetVar(
 SK_AC *pAC,		/* Pointer to adapter context */
 SK_IOC IoC,		/* IO context handle */
 SK_U32 Id,		/* Object ID that is to be processed */
@@ -629,7 +625,7 @@
  *                           exist (e.g. port instance 3 on a two port
  *	                         adapter.
  */
-int SkPnmiPreSetVar(
+static int SkPnmiPreSetVar(
 SK_AC *pAC,		/* Pointer to adapter context */
 SK_IOC IoC,		/* IO context handle */
 SK_U32 Id,		/* Object ID that is to be processed */
@@ -5062,9 +5058,6 @@
 		case OID_SKGE_SPEED_CAP:
 		case OID_SKGE_SPEED_MODE:
 		case OID_SKGE_SPEED_STATUS:
-#ifdef SK_PHY_LP_MODE
-		case OID_SKGE_PHY_LP_MODE:
-#endif
 			if (*pLen < (Limit - LogPortIndex) * sizeof(SK_U8)) {
 
 				*pLen = (Limit - LogPortIndex) * sizeof(SK_U8);
@@ -5140,28 +5133,6 @@
 				Offset += sizeof(SK_U32);
 				break;
 
-#ifdef SK_PHY_LP_MODE
-			case OID_SKGE_PHY_LP_MODE:
-				if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
-					if (LogPortIndex == 0) {
-						continue;
-					}
-					else {
-						/* Get value for physical ports */
-						PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(pAC, LogPortIndex);
-						Val8 = (SK_U8) pAC->GIni.GP[PhysPortIndex].PPhyPowerState;
-						*pBufPtr = Val8;
-					}
-				}
-				else { /* DualNetMode */
-					
-					Val8 = (SK_U8) pAC->GIni.GP[PhysPortIndex].PPhyPowerState;
-					*pBufPtr = Val8;
-				}
-				Offset += sizeof(SK_U8);
-				break;
-#endif
-
 			case OID_SKGE_LINK_CAP:
 				if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
 					if (LogPortIndex == 0) {
@@ -5478,16 +5449,6 @@
 		}
 		break;
 
-#ifdef SK_PHY_LP_MODE
-	case OID_SKGE_PHY_LP_MODE:
-		if (*pLen < Limit - LogPortIndex) {
-
-			*pLen = Limit - LogPortIndex;
-			return (SK_PNMI_ERR_TOO_SHORT);
-		}
-		break;
-#endif
-
 	case OID_SKGE_MTU:
 		if (*pLen < sizeof(SK_U32)) {
 
@@ -5845,116 +5806,6 @@
 			Offset += sizeof(SK_U32);
 			break;
 		
-#ifdef SK_PHY_LP_MODE
-		case OID_SKGE_PHY_LP_MODE:
-			/* The preset ends here */
-			if (Action == SK_PNMI_PRESET) {
-
-				return (SK_PNMI_ERR_OK);
-			}
-
-			if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
-				if (LogPortIndex == 0) {
-					Offset = 0;
-					continue;
-				}
-				else {
-					/* Set value for physical ports */
-					PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(pAC, LogPortIndex);
-
-					switch (*(pBuf + Offset)) {
-						case 0:
-							/* If LowPowerMode is active, we can leave it. */
-							if (pAC->GIni.GP[PhysPortIndex].PPhyPowerState) {
-
-								Val32 = SkGmLeaveLowPowerMode(pAC, IoC, PhysPortIndex);
-								
-								if (pAC->GIni.GP[PhysPortIndex].PPhyPowerState < 3)	{
-									
-									SkDrvInitAdapter(pAC);
-								}
-								break;
-							}
-							else {
-								*pLen = 0;
-								return (SK_PNMI_ERR_GENERAL);
-							}
-						case 1:
-						case 2:
-						case 3:
-						case 4:
-							/* If no LowPowerMode is active, we can enter it. */
-							if (!pAC->GIni.GP[PhysPortIndex].PPhyPowerState) {
-
-								if ((*(pBuf + Offset)) < 3)	{
-								
-									SkDrvDeInitAdapter(pAC);
-								}
-
-								Val32 = SkGmEnterLowPowerMode(pAC, IoC, PhysPortIndex, *pBuf);
-								break;
-							}
-							else {
-								*pLen = 0;
-								return (SK_PNMI_ERR_GENERAL);
-							}
-						default:
-							*pLen = 0;
-							return (SK_PNMI_ERR_BAD_VALUE);
-					}
-				}
-			}
-			else { /* DualNetMode */
-				
-				switch (*(pBuf + Offset)) {
-					case 0:
-						/* If we are in a LowPowerMode, we can leave it. */
-						if (pAC->GIni.GP[PhysPortIndex].PPhyPowerState) {
-
-							Val32 = SkGmLeaveLowPowerMode(pAC, IoC, PhysPortIndex);
-							
-							if (pAC->GIni.GP[PhysPortIndex].PPhyPowerState < 3)	{
-
-								SkDrvInitAdapter(pAC);
-							}
-							break;
-						}
-						else {
-							*pLen = 0;
-							return (SK_PNMI_ERR_GENERAL);
-						}
-					
-					case 1:
-					case 2:
-					case 3:
-					case 4:
-						/* If we are not already in LowPowerMode, we can enter it. */
-						if (!pAC->GIni.GP[PhysPortIndex].PPhyPowerState) {
-
-							if ((*(pBuf + Offset)) < 3)	{
-
-								SkDrvDeInitAdapter(pAC);
-							}
-							else {
-
-								Val32 = SkGmEnterLowPowerMode(pAC, IoC, PhysPortIndex, *pBuf);
-							}
-							break;
-						}
-						else {
-							*pLen = 0;
-							return (SK_PNMI_ERR_GENERAL);
-						}
-					
-					default:
-						*pLen = 0;
-						return (SK_PNMI_ERR_BAD_VALUE);
-				}
-			}
-			Offset += sizeof(SK_U8);
-			break;
-#endif
-
 		default:
             SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_ERR,
                 ("MacPrivateConf: Unknown OID should be handled before set"));
diff -urN oldtree/drivers/net/sk98lin/skgesirq.c newtree/drivers/net/sk98lin/skgesirq.c
--- oldtree/drivers/net/sk98lin/skgesirq.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/net/sk98lin/skgesirq.c	2006-01-29 11:18:20.557163632 +0000
@@ -265,7 +265,7 @@
  *
  * Returns: N/A
  */
-void SkHWLinkUp(
+static void SkHWLinkUp(
 SK_AC	*pAC,	/* adapter context */
 SK_IOC	IoC,	/* IO context */
 int		Port)	/* Port Index (MAC_1 + n) */
@@ -612,14 +612,6 @@
 				 * we ignore those
 				 */
 				pPrt->HalfDupTimerActive = SK_TRUE;
-#ifdef XXX
-				Len = sizeof(SK_U64);
-				SkPnmiGetVar(pAC, IoC, OID_SKGE_STAT_TX_OCTETS, (char *)&Octets,
-					&Len, (SK_U32)SK_PNMI_PORT_PHYS2INST(pAC, 0),
-					pAC->Rlmt.Port[0].Net->NetNumber);
-				
-				pPrt->LastOctets = Octets;
-#endif /* XXX */
 				/* Snap statistic counters */
 				(void)SkXmUpdateStats(pAC, IoC, 0);
 
@@ -653,14 +645,6 @@
 				 pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOHALF) &&
 				!pPrt->HalfDupTimerActive) {
 				pPrt->HalfDupTimerActive = SK_TRUE;
-#ifdef XXX
-				Len = sizeof(SK_U64);
-				SkPnmiGetVar(pAC, IoC, OID_SKGE_STAT_TX_OCTETS, (char *)&Octets,
-					&Len, (SK_U32)SK_PNMI_PORT_PHYS2INST(pAC, 1),
-					pAC->Rlmt.Port[1].Net->NetNumber);
-				
-				pPrt->LastOctets = Octets;
-#endif /* XXX */
 				/* Snap statistic counters */
 				(void)SkXmUpdateStats(pAC, IoC, 1);
 
@@ -2085,12 +2069,6 @@
 			pPrt->HalfDupTimerActive = SK_FALSE;
 			if (pPrt->PLinkModeStatus == SK_LMODE_STAT_HALF ||
 				pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOHALF) {
-#ifdef XXX
-				Len = sizeof(SK_U64);
-				SkPnmiGetVar(pAC, IoC, OID_SKGE_STAT_TX_OCTETS, (char *)&Octets,
-					&Len, (SK_U32)SK_PNMI_PORT_PHYS2INST(pAC, Port),
-					pAC->Rlmt.Port[Port].Net->NetNumber);
-#endif /* XXX */
 				/* Snap statistic counters */
 				(void)SkXmUpdateStats(pAC, IoC, Port);
 
diff -urN oldtree/drivers/net/sk98lin/ski2c.c newtree/drivers/net/sk98lin/ski2c.c
--- oldtree/drivers/net/sk98lin/ski2c.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/net/sk98lin/ski2c.c	2006-01-29 11:18:20.558163480 +0000
@@ -396,7 +396,7 @@
  *			1:	error,	 transfer does not complete, I2C transfer
  *						 killed, wait loop terminated.
  */
-int	SkI2cWait(
+static int	SkI2cWait(
 SK_AC	*pAC,	/* Adapter Context */
 SK_IOC	IoC,	/* I/O Context */
 int		Event)	/* complete event to wait for (I2C_READ or I2C_WRITE) */
@@ -481,7 +481,7 @@
  * returns	0:	success
  *			1:	error
  */
-int SkI2cWrite(
+static int SkI2cWrite(
 SK_AC	*pAC,		/* Adapter Context */
 SK_IOC	IoC,		/* I/O Context */
 SK_U32	I2cData,	/* I2C Data to write */
@@ -538,7 +538,7 @@
  *		1 if the read is completed
  *		0 if the read must be continued (I2C Bus still allocated)
  */
-int	SkI2cReadSensor(
+static int	SkI2cReadSensor(
 SK_AC		*pAC,	/* Adapter Context */
 SK_IOC		IoC,	/* I/O Context */
 SK_SENSOR	*pSen)	/* Sensor to be read */
diff -urN oldtree/drivers/net/sk98lin/sklm80.c newtree/drivers/net/sk98lin/sklm80.c
--- oldtree/drivers/net/sk98lin/sklm80.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/net/sk98lin/sklm80.c	2006-01-29 11:18:20.558163480 +0000
@@ -34,79 +34,7 @@
 #include "h/lm80.h"
 #include "h/skdrv2nd.h"		/* Adapter Control- and Driver specific Def. */
 
-#ifdef	SK_DIAG
-#define	BREAK_OR_WAIT(pAC,IoC,Event)	SkI2cWait(pAC,IoC,Event)
-#else	/* nSK_DIAG */
 #define	BREAK_OR_WAIT(pAC,IoC,Event)	break
-#endif	/* nSK_DIAG */
-
-#ifdef	SK_DIAG
-/*
- * read the register 'Reg' from the device 'Dev'
- *
- * return 	read error	-1
- *		success		the read value
- */
-int	SkLm80RcvReg(
-SK_IOC	IoC,		/* Adapter Context */
-int		Dev,		/* I2C device address */
-int		Reg)		/* register to read */
-{
-	int	Val = 0;
-	int	TempExt;
-
-	/* Signal device number */
-	if (SkI2cSndDev(IoC, Dev, I2C_WRITE)) {
-		return(-1);
-	}
-
-	if (SkI2cSndByte(IoC, Reg)) {
-		return(-1);
-	}
-
-	/* repeat start */
-	if (SkI2cSndDev(IoC, Dev, I2C_READ)) {
-		return(-1);
-	}
-
-	switch (Reg) {
-	case LM80_TEMP_IN:
-		Val = (int)SkI2cRcvByte(IoC, 1);
-
-		/* First: correct the value: it might be negative */
-		if ((Val & 0x80) != 0) {
-			/* Value is negative */
-			Val = Val - 256;
-		}
-		Val = Val * SK_LM80_TEMP_LSB;
-		SkI2cStop(IoC);
-		
-		TempExt = (int)SkLm80RcvReg(IoC, LM80_ADDR, LM80_TEMP_CTRL);
-		
-		if (Val > 0) {
-			Val += ((TempExt >> 7) * SK_LM80_TEMPEXT_LSB);
-		}
-		else {
-			Val -= ((TempExt >> 7) * SK_LM80_TEMPEXT_LSB);
-		}
-		return(Val);
-		break;
-	case LM80_VT0_IN:
-	case LM80_VT1_IN:
-	case LM80_VT2_IN:
-	case LM80_VT3_IN:
-		Val = (int)SkI2cRcvByte(IoC, 1) * SK_LM80_VT_LSB;
-		break;
-	
-	default:
-		Val = (int)SkI2cRcvByte(IoC, 1);
-		break;
-	}
-
-	SkI2cStop(IoC);
-	return(Val);
-}
-#endif	/* SK_DIAG */
 
 /*
  * read a sensors value (LM80 specific)
diff -urN oldtree/drivers/net/sk98lin/skrlmt.c newtree/drivers/net/sk98lin/skrlmt.c
--- oldtree/drivers/net/sk98lin/skrlmt.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/net/sk98lin/skrlmt.c	2006-01-29 11:18:20.561163024 +0000
@@ -282,7 +282,6 @@
 
 SK_MAC_ADDR	SkRlmtMcAddr =	{{0x01,  0x00,  0x5A,  0x52,  0x4C,  0x4D}};
 SK_MAC_ADDR	BridgeMcAddr =	{{0x01,  0x80,  0xC2,  0x00,  0x00,  0x00}};
-SK_MAC_ADDR	BcAddr = 		{{0xFF,  0xFF,  0xFF,  0xFF,  0xFF,  0xFF}};
 
 /* local variables ************************************************************/
 
diff -urN oldtree/drivers/net/sk98lin/skvpd.c newtree/drivers/net/sk98lin/skvpd.c
--- oldtree/drivers/net/sk98lin/skvpd.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/net/sk98lin/skvpd.c	2006-01-29 11:18:20.562162872 +0000
@@ -132,65 +132,6 @@
 
 #endif	/* SKDIAG */
 
-#if 0
-
-/*
-	Write the dword 'data' at address 'addr' into the VPD EEPROM, and
-	verify that the data is written.
-
- Needed Time:
-
-.				MIN		MAX
-. -------------------------------------------------------------------
-. write				1.8 ms		3.6 ms
-. internal write cyles		0.7 ms		7.0 ms
-. -------------------------------------------------------------------
-. over all program time	 	2.5 ms		10.6 ms
-. read				1.3 ms		2.6 ms
-. -------------------------------------------------------------------
-. over all 			3.8 ms		13.2 ms
-.
-
-
- Returns	0:	success
-			1:	error,	I2C transfer does not terminate
-			2:	error,	data verify error
-
- */
-static int VpdWriteDWord(
-SK_AC	*pAC,	/* pAC pointer */
-SK_IOC	IoC,	/* IO Context */
-int		addr,	/* VPD address */
-SK_U32	data)	/* VPD data to write */
-{
-	/* start VPD write */
-	/* Don't swap here, it's a data stream of bytes */
-	SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL,
-		("VPD write dword at addr 0x%x, data = 0x%x\n",addr,data));
-	VPD_OUT32(pAC, IoC, PCI_VPD_DAT_REG, (SK_U32)data);
-	/* But do it here */
-	addr |= VPD_WRITE;
-
-	VPD_OUT16(pAC, IoC, PCI_VPD_ADR_REG, (SK_U16)(addr | VPD_WRITE));
-
-	/* this may take up to 10,6 ms */
-	if (VpdWait(pAC, IoC, VPD_WRITE)) {
-		SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
-			("Write Timed Out\n"));
-		return(1);
-	};
-
-	/* verify data */
-	if (VpdReadDWord(pAC, IoC, addr) != data) {
-		SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR | SK_DBGCAT_FATAL,
-			("Data Verify Error\n"));
-		return(2);
-	}
-	return(0);
-}	/* VpdWriteDWord */
-
-#endif	/* 0 */
-
 /*
  *	Read one Stream of 'len' bytes of VPD data, starting at 'addr' from
  *	or to the I2C EEPROM.
@@ -728,7 +669,7 @@
  *		6:	fatal VPD error
  *
  */
-int	VpdSetupPara(
+static int	VpdSetupPara(
 SK_AC	*pAC,		/* common data base */
 const char	*key,	/* keyword to insert */
 const char	*buf,	/* buffer with the keyword value */
@@ -1148,50 +1089,3 @@
 	return(0);
 }
 
-
-
-/*
- *	Read the contents of the VPD EEPROM and copy it to the VPD buffer
- *	if not already done. If the keyword "VF" is not present it will be
- *	created and the error log message will be stored to this keyword.
- *	If "VF" is not present the error log message will be stored to the
- *	keyword "VL". "VL" will created or overwritten if "VF" is present.
- *	The VPD read/write area is saved to the VPD EEPROM.
- *
- * returns nothing, errors will be ignored.
- */
-void VpdErrLog(
-SK_AC	*pAC,	/* common data base */
-SK_IOC	IoC,	/* IO Context */
-char	*msg)	/* error log message */
-{
-	SK_VPD_PARA *v, vf;	/* VF */
-	int len;
-
-	SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_TX,
-		("VPD error log msg %s\n", msg));
-	if ((pAC->vpd.v.vpd_status & VPD_VALID) == 0) {
-		if (VpdInit(pAC, IoC) != 0) {
-			SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
-				("VPD init error\n"));
-			return;
-		}
-	}
-
-	len = strlen(msg);
-	if (len > VPD_MAX_LEN) {
-		/* cut it */
-		len = VPD_MAX_LEN;
-	}
-	if ((v = vpd_find_para(pAC, VPD_VF, &vf)) != NULL) {
-		SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_TX, ("overwrite VL\n"));
-		(void)VpdSetupPara(pAC, VPD_VL, msg, len, VPD_RW_KEY, OWR_KEY);
-	}
-	else {
-		SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_TX, ("write VF\n"));
-		(void)VpdSetupPara(pAC, VPD_VF, msg, len, VPD_RW_KEY, ADD_KEY);
-	}
-
-	(void)VpdUpdate(pAC, IoC);
-}
-
diff -urN oldtree/drivers/net/sk98lin/skxmac2.c newtree/drivers/net/sk98lin/skxmac2.c
--- oldtree/drivers/net/sk98lin/skxmac2.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/net/sk98lin/skxmac2.c	2006-01-29 11:18:20.565162416 +0000
@@ -41,13 +41,13 @@
 #endif
 
 #ifdef GENESIS
-BCOM_HACK BcomRegA1Hack[] = {
+static BCOM_HACK BcomRegA1Hack[] = {
  { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
  { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
  { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
  { 0, 0 }
 };
-BCOM_HACK BcomRegC0Hack[] = {
+static BCOM_HACK BcomRegC0Hack[] = {
  { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 }, { 0x17, 0x0013 },
  { 0x15, 0x0A04 }, { 0x18, 0x0420 },
  { 0, 0 }
@@ -790,7 +790,7 @@
  * Returns:
  *	nothing
  */
-void SkMacFlushRxFifo(
+static void SkMacFlushRxFifo(
 SK_AC	*pAC,	/* adapter context */
 SK_IOC	IoC,	/* IO context */
 int		Port)	/* Port Index (MAC_1 + n) */
@@ -1231,38 +1231,6 @@
 }	/* SkMacHardRst */
 
 
-/******************************************************************************
- *
- *	SkMacClearRst() - Clear the MAC reset
- *
- * Description:	calls a clear MAC reset routine dep. on board type
- *
- * Returns:
- *	nothing
- */
-void SkMacClearRst(
-SK_AC	*pAC,	/* adapter context */
-SK_IOC	IoC,	/* IO context */
-int		Port)	/* Port Index (MAC_1 + n) */
-{
-	
-#ifdef GENESIS
-	if (pAC->GIni.GIGenesis) {
-		
-		SkXmClearRst(pAC, IoC, Port);
-	}
-#endif /* GENESIS */
-	
-#ifdef YUKON
-	if (pAC->GIni.GIYukon) {
-		
-		SkGmClearRst(pAC, IoC, Port);
-	}
-#endif /* YUKON */
-
-}	/* SkMacClearRst */
-
-
 #ifdef GENESIS
 /******************************************************************************
  *
@@ -1713,7 +1681,7 @@
  * Returns:
  *	nothing
  */
-void SkXmInitDupMd(
+static void SkXmInitDupMd(
 SK_AC	*pAC,		/* adapter context */
 SK_IOC	IoC,		/* IO context */
 int		Port)		/* Port Index (MAC_1 + n) */
@@ -1761,7 +1729,7 @@
  * Returns:
  *	nothing
  */
-void SkXmInitPauseMd(
+static void SkXmInitPauseMd(
 SK_AC	*pAC,		/* adapter context */
 SK_IOC	IoC,		/* IO context */
 int		Port)		/* Port Index (MAC_1 + n) */
@@ -2076,283 +2044,7 @@
 }	/* SkXmInitPhyBcom */
 #endif /* GENESIS */
 
-
 #ifdef YUKON
-#ifndef SK_SLIM
-/******************************************************************************
- *
- *	SkGmEnterLowPowerMode()
- *
- * Description:	
- *	This function sets the Marvell Alaska PHY to the low power mode
- *	given by parameter mode.
- *	The following low power modes are available:
- *		
- *		- Coma Mode (Deep Sleep):
- *			Power consumption: ~15 - 30 mW
- *			The PHY cannot wake up on its own.
- *
- *		- IEEE 22.2.4.1.5 compatible power down mode
- *			Power consumption: ~240 mW
- *			The PHY cannot wake up on its own.
- *
- *		- energy detect mode
- *			Power consumption: ~160 mW
- *			The PHY can wake up on its own by detecting activity
- *			on the CAT 5 cable.
- *
- *		- energy detect plus mode
- *			Power consumption: ~150 mW
- *			The PHY can wake up on its own by detecting activity
- *			on the CAT 5 cable.
- *			Connected devices can be woken up by sending normal link
- *			pulses every one second.
- *
- * Note:
- *
- * Returns:
- *		0: ok
- *		1: error
- */
-int SkGmEnterLowPowerMode(
-SK_AC	*pAC,		/* adapter context */
-SK_IOC	IoC,		/* IO context */
-int		Port,		/* Port Index (e.g. MAC_1) */
-SK_U8	Mode)		/* low power mode */
-{
-	SK_U16	Word;
-	SK_U32	DWord;
-	SK_U8	LastMode;
-	int		Ret = 0;
-
-	if (pAC->GIni.GIYukonLite &&
-	    pAC->GIni.GIChipRev >= CHIP_REV_YU_LITE_A3) {
-
-		/* save current power mode */
-		LastMode = pAC->GIni.GP[Port].PPhyPowerState;
-		pAC->GIni.GP[Port].PPhyPowerState = Mode;
-
-		switch (Mode) {
-			/* coma mode (deep sleep) */
-			case PHY_PM_DEEP_SLEEP:
-				/* setup General Purpose Control Register */
-				GM_OUT16(IoC, 0, GM_GP_CTRL, GM_GPCR_FL_PASS |
-					GM_GPCR_SPEED_100 | GM_GPCR_AU_ALL_DIS);
-
-				/* apply COMA mode workaround */
-				SkGmPhyWrite(pAC, IoC, Port, 29, 0x001f);
-				SkGmPhyWrite(pAC, IoC, Port, 30, 0xfff3);
-
-				SK_IN32(IoC, PCI_C(PCI_OUR_REG_1), &DWord);
-
-				SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_ON);
-				
-				/* Set PHY to Coma Mode */
-				SK_OUT32(IoC, PCI_C(PCI_OUR_REG_1), DWord | PCI_PHY_COMA);
-				
-				SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
-
-			break;
-			
-			/* IEEE 22.2.4.1.5 compatible power down mode */
-			case PHY_PM_IEEE_POWER_DOWN:
-				/*
-				 * - disable MAC 125 MHz clock
-				 * - allow MAC power down
-				 */
-				SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_CTRL, &Word);
-				Word |= PHY_M_PC_DIS_125CLK;
-				Word &=	~PHY_M_PC_MAC_POW_UP;
-				SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, Word);
-
-				/*
-				 * register changes must be followed by a software
-				 * reset to take effect
-				 */
-				SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CTRL, &Word);
-				Word |= PHY_CT_RESET;
-				SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CTRL, Word);
-
-				/* switch IEEE compatible power down mode on */
-				SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CTRL, &Word);
-				Word |= PHY_CT_PDOWN;
-				SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CTRL, Word);
-			break;
-
-			/* energy detect and energy detect plus mode */
-			case PHY_PM_ENERGY_DETECT:
-			case PHY_PM_ENERGY_DETECT_PLUS:
-				/*
-				 * - disable MAC 125 MHz clock
-				 */
-				SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_CTRL, &Word);
-				Word |= PHY_M_PC_DIS_125CLK;
-				SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, Word);
-				
-				/* activate energy detect mode 1 */
-				SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_CTRL, &Word);
-
-				/* energy detect mode */
-				if (Mode == PHY_PM_ENERGY_DETECT) {
-					Word |= PHY_M_PC_EN_DET;
-				}
-				/* energy detect plus mode */
-				else {
-					Word |= PHY_M_PC_EN_DET_PLUS;
-				}
-
-				SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, Word);
-
-				/*
-				 * reinitialize the PHY to force a software reset
-				 * which is necessary after the register settings
-				 * for the energy detect modes.
-				 * Furthermore reinitialisation prevents that the
-				 * PHY is running out of a stable state.
-				 */
-				SkGmInitPhyMarv(pAC, IoC, Port, SK_FALSE);
-			break;
-
-			/* don't change current power mode */
-			default:
-				pAC->GIni.GP[Port].PPhyPowerState = LastMode;
-				Ret = 1;
-			break;
-		}
-	}
-	/* low power modes are not supported by this chip */
-	else {
-		Ret = 1;
-	}
-
-	return(Ret);
-
-}	/* SkGmEnterLowPowerMode */
-
-/******************************************************************************
- *
- *	SkGmLeaveLowPowerMode()
- *
- * Description:	
- *	Leave the current low power mode and switch to normal mode
- *
- * Note:
- *
- * Returns:
- *		0:	ok
- *		1:	error
- */
-int SkGmLeaveLowPowerMode(
-SK_AC	*pAC,		/* adapter context */
-SK_IOC	IoC,		/* IO context */
-int		Port)		/* Port Index (e.g. MAC_1) */
-{
-	SK_U32	DWord;
-	SK_U16	Word;
-	SK_U8	LastMode;
-	int		Ret = 0;
-
-	if (pAC->GIni.GIYukonLite &&
-		pAC->GIni.GIChipRev >= CHIP_REV_YU_LITE_A3) {
-
-		/* save current power mode */
-		LastMode = pAC->GIni.GP[Port].PPhyPowerState;
-		pAC->GIni.GP[Port].PPhyPowerState = PHY_PM_OPERATIONAL_MODE;
-
-		switch (LastMode) {
-			/* coma mode (deep sleep) */
-			case PHY_PM_DEEP_SLEEP:
-				SK_IN32(IoC, PCI_C(PCI_OUR_REG_1), &DWord);
-
-				SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_ON);
-				
-				/* Release PHY from Coma Mode */
-				SK_OUT32(IoC, PCI_C(PCI_OUR_REG_1), DWord & ~PCI_PHY_COMA);
-				
-				SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
-				
-				SK_IN32(IoC, B2_GP_IO, &DWord);
-
-				/* set to output */
-				DWord |= (GP_DIR_9 | GP_IO_9);
-
-				/* set PHY reset */
-				SK_OUT32(IoC, B2_GP_IO, DWord);
-
-				DWord &= ~GP_IO_9; /* clear PHY reset (active high) */
-
-				/* clear PHY reset */
-				SK_OUT32(IoC, B2_GP_IO, DWord);
-			break;
-			
-			/* IEEE 22.2.4.1.5 compatible power down mode */
-			case PHY_PM_IEEE_POWER_DOWN:
-				/*
-				 * - enable MAC 125 MHz clock
-				 * - set MAC power up
-				 */
-				SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_CTRL, &Word);
-				Word &= ~PHY_M_PC_DIS_125CLK;
-				Word |=	PHY_M_PC_MAC_POW_UP;
-				SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, Word);
-
-				/*
-				 * register changes must be followed by a software
-				 * reset to take effect
-				 */
-				SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CTRL, &Word);
-				Word |= PHY_CT_RESET;
-				SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CTRL, Word);
-
-				/* switch IEEE compatible power down mode off */
-				SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CTRL, &Word);
-				Word &= ~PHY_CT_PDOWN;
-				SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CTRL, Word);
-			break;
-
-			/* energy detect and energy detect plus mode */
-			case PHY_PM_ENERGY_DETECT:
-			case PHY_PM_ENERGY_DETECT_PLUS:
-				/*
-				 * - enable MAC 125 MHz clock
-				 */
-				SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_CTRL, &Word);
-				Word &= ~PHY_M_PC_DIS_125CLK;
-				SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, Word);
-				
-				/* disable energy detect mode */
-				SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_CTRL, &Word);
-				Word &= ~PHY_M_PC_EN_DET_MSK;
-				SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, Word);
-
-				/*
-				 * reinitialize the PHY to force a software reset
-				 * which is necessary after the register settings
-				 * for the energy detect modes.
-				 * Furthermore reinitialisation prevents that the
-				 * PHY is running out of a stable state.
-				 */
-				SkGmInitPhyMarv(pAC, IoC, Port, SK_FALSE);
-			break;
-
-			/* don't change current power mode */
-			default:
-				pAC->GIni.GP[Port].PPhyPowerState = LastMode;
-				Ret = 1;
-			break;
-		}
-	}
-	/* low power modes are not supported by this chip */
-	else {
-		Ret = 1;
-	}
-
-	return(Ret);
-
-}	/* SkGmLeaveLowPowerMode */
-#endif /* !SK_SLIM */
-
-
 /******************************************************************************
  *
  *	SkGmInitPhyMarv() - Initialize the Marvell Phy registers
@@ -3420,145 +3112,6 @@
 }	/* SkMacAutoNegDone */
 
 
-#ifdef GENESIS
-/******************************************************************************
- *
- *	SkXmSetRxTxEn() - Special Set Rx/Tx Enable and some features in XMAC
- *
- * Description:
- *  sets MAC or PHY LoopBack and Duplex Mode in the MMU Command Reg.
- *  enables Rx/Tx
- *
- * Returns: N/A
- */
-static void SkXmSetRxTxEn(
-SK_AC	*pAC,		/* Adapter Context */
-SK_IOC	IoC,		/* IO context */
-int		Port,		/* Port Index (MAC_1 + n) */
-int		Para)		/* Parameter to set: MAC or PHY LoopBack, Duplex Mode */
-{
-	SK_U16	Word;
-
-	XM_IN16(IoC, Port, XM_MMU_CMD, &Word);
-
-	switch (Para & (SK_MAC_LOOPB_ON | SK_MAC_LOOPB_OFF)) {
-	case SK_MAC_LOOPB_ON:
-		Word |= XM_MMU_MAC_LB;
-		break;
-	case SK_MAC_LOOPB_OFF:
-		Word &= ~XM_MMU_MAC_LB;
-		break;
-	}
-
-	switch (Para & (SK_PHY_LOOPB_ON | SK_PHY_LOOPB_OFF)) {
-	case SK_PHY_LOOPB_ON:
-		Word |= XM_MMU_GMII_LOOP;
-		break;
-	case SK_PHY_LOOPB_OFF:
-		Word &= ~XM_MMU_GMII_LOOP;
-		break;
-	}
-	
-	switch (Para & (SK_PHY_FULLD_ON | SK_PHY_FULLD_OFF)) {
-	case SK_PHY_FULLD_ON:
-		Word |= XM_MMU_GMII_FD;
-		break;
-	case SK_PHY_FULLD_OFF:
-		Word &= ~XM_MMU_GMII_FD;
-		break;
-	}
-	
-	XM_OUT16(IoC, Port, XM_MMU_CMD, Word | XM_MMU_ENA_RX | XM_MMU_ENA_TX);
-
-	/* dummy read to ensure writing */
-	XM_IN16(IoC, Port, XM_MMU_CMD, &Word);
-
-}	/* SkXmSetRxTxEn */
-#endif /* GENESIS */
-
-
-#ifdef YUKON
-/******************************************************************************
- *
- *	SkGmSetRxTxEn() - Special Set Rx/Tx Enable and some features in GMAC
- *
- * Description:
- *  sets MAC LoopBack and Duplex Mode in the General Purpose Control Reg.
- *  enables Rx/Tx
- *
- * Returns: N/A
- */
-static void SkGmSetRxTxEn(
-SK_AC	*pAC,		/* Adapter Context */
-SK_IOC	IoC,		/* IO context */
-int		Port,		/* Port Index (MAC_1 + n) */
-int		Para)		/* Parameter to set: MAC LoopBack, Duplex Mode */
-{
-	SK_U16	Ctrl;
-	
-	GM_IN16(IoC, Port, GM_GP_CTRL, &Ctrl);
-
-	switch (Para & (SK_MAC_LOOPB_ON | SK_MAC_LOOPB_OFF)) {
-	case SK_MAC_LOOPB_ON:
-		Ctrl |= GM_GPCR_LOOP_ENA;
-		break;
-	case SK_MAC_LOOPB_OFF:
-		Ctrl &= ~GM_GPCR_LOOP_ENA;
-		break;
-	}
-
-	switch (Para & (SK_PHY_FULLD_ON | SK_PHY_FULLD_OFF)) {
-	case SK_PHY_FULLD_ON:
-		Ctrl |= GM_GPCR_DUP_FULL;
-		break;
-	case SK_PHY_FULLD_OFF:
-		Ctrl &= ~GM_GPCR_DUP_FULL;
-		break;
-	}
-	
-    GM_OUT16(IoC, Port, GM_GP_CTRL, (SK_U16)(Ctrl | GM_GPCR_RX_ENA |
-		GM_GPCR_TX_ENA));
-
-	/* dummy read to ensure writing */
-	GM_IN16(IoC, Port, GM_GP_CTRL, &Ctrl);
-
-}	/* SkGmSetRxTxEn */
-#endif /* YUKON */
-
-
-#ifndef SK_SLIM
-/******************************************************************************
- *
- *	SkMacSetRxTxEn() - Special Set Rx/Tx Enable and parameters
- *
- * Description:	calls the Special Set Rx/Tx Enable routines dep. on board type
- *
- * Returns: N/A
- */
-void SkMacSetRxTxEn(
-SK_AC	*pAC,		/* Adapter Context */
-SK_IOC	IoC,		/* IO context */
-int		Port,		/* Port Index (MAC_1 + n) */
-int		Para)
-{
-#ifdef GENESIS
-	if (pAC->GIni.GIGenesis) {
-		
-		SkXmSetRxTxEn(pAC, IoC, Port, Para);
-	}
-#endif /* GENESIS */
-	
-#ifdef YUKON
-	if (pAC->GIni.GIYukon) {
-		
-		SkGmSetRxTxEn(pAC, IoC, Port, Para);
-	}
-#endif /* YUKON */
-
-}	/* SkMacSetRxTxEn */
-#endif /* !SK_SLIM */
-
-
 /******************************************************************************
  *
  *	SkMacRxTxEnable() - Enable Rx/Tx activity if port is up
@@ -3976,7 +3529,7 @@
  * Returns:
  *	nothing
  */
-void SkXmIrq(
+static void SkXmIrq(
 SK_AC	*pAC,		/* adapter context */
 SK_IOC	IoC,		/* IO context */
 int		Port)		/* Port Index (MAC_1 + n) */
@@ -4112,7 +3665,7 @@
  * Returns:
  *	nothing
  */
-void SkGmIrq(
+static void SkGmIrq(
 SK_AC	*pAC,		/* adapter context */
 SK_IOC	IoC,		/* IO context */
 int		Port)		/* Port Index (MAC_1 + n) */
diff -urN oldtree/drivers/net/sunhme.c newtree/drivers/net/sunhme.c
--- oldtree/drivers/net/sunhme.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/net/sunhme.c	2006-01-29 11:18:20.567162112 +0000
@@ -3013,7 +3013,7 @@
 }
 #endif /* !(__sparc__) */
 
-static int __init happy_meal_pci_init(struct pci_dev *pdev)
+static int __devinit happy_meal_pci_init(struct pci_dev *pdev)
 {
 	struct quattro *qp = NULL;
 #ifdef __sparc__
@@ -3073,6 +3073,7 @@
 	memset(hp, 0, sizeof(*hp));
 
 	hp->happy_dev = pdev;
+	pci_dev_get(pdev);
 
 	spin_lock_init(&hp->happy_lock);
 
@@ -3260,6 +3261,7 @@
 	pci_release_regions(pdev);
 
 err_out_clear_quattro:
+	pci_dev_put(pdev);
 	if (qp != NULL)
 		qp->happy_meals[qfe_slot] = NULL;
 
@@ -3304,21 +3306,58 @@
 #endif
 
 #ifdef CONFIG_PCI
-static int __init happy_meal_pci_probe(void)
+static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
+	const struct pci_device_id *id)
 {
-	struct pci_dev *pdev = NULL;
-	int cards = 0;
+	int retval;
+
+	retval = pci_enable_device(pdev);
+	if (retval < 0)
+		goto err;
+
+	pci_set_master(pdev);
+	happy_meal_pci_init(pdev);
+
+	return 0;
+err:
+	return retval;
+}
 
-	while ((pdev = pci_find_device(PCI_VENDOR_ID_SUN,
-				       PCI_DEVICE_ID_SUN_HAPPYMEAL, pdev)) != NULL) {
-		if (pci_enable_device(pdev))
-			continue;
-		pci_set_master(pdev);
-		cards++;
-		happy_meal_pci_init(pdev);
+static void __devexit happy_meal_pci_remove(struct pci_dev *pdev)
+{
+	struct quattro *tmp, *qp = qfe_pci_list;
+	struct pci_dev *bdev = pdev->bus->self;
+
+	if (qp->quattro_dev == bdev) { /* is it the 1st one? */
+		qfe_pci_list = qp->next;
+		kfree(qp);
+		goto end;
 	}
-	return cards;
+
+	for (; qp->next != NULL; qp = qp->next) /* some further? */
+		if (qp->next->quattro_dev == bdev)
+			break;
+
+	tmp = qp->next; /* kill it, but preserve list */
+	qp->next = qp->next->next;
+	kfree(tmp);
+end:
+	pci_dev_put(pdev);
 }
+
+static struct pci_device_id happy_meal_pci_tbl[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) },
+	{ 0 }
+};
+MODULE_DEVICE_TABLE(pci, happy_meal_pci_tbl);
+
+static struct pci_driver happy_meal_pci_driver = {
+	.name		= "happy_meal_pci",
+	.id_table	= happy_meal_pci_tbl,
+	.probe		= happy_meal_pci_probe,
+	.remove		= __devexit_p(happy_meal_pci_remove)
+};
+
 #endif
 
 static int __init happy_meal_probe(void)
@@ -3337,11 +3376,10 @@
 	cards += happy_meal_sbus_probe();
 #endif
 #ifdef CONFIG_PCI
-	cards += happy_meal_pci_probe();
+	return pci_register_driver(&happy_meal_pci_driver);
+#else
+	return cards ? 0 : -ENODEV;
 #endif
-	if (!cards)
-		return -ENODEV;
-	return 0;
 }
 
 
@@ -3408,14 +3446,7 @@
 	}
 #endif
 #ifdef CONFIG_PCI
-	while (qfe_pci_list) {
-		struct quattro *qfe = qfe_pci_list;
-		struct quattro *next = qfe->next;
-
-		kfree(qfe);
-
-		qfe_pci_list = next;
-	}
+	pci_unregister_driver(&happy_meal_pci_driver);
 #endif
 }
 
diff -urN oldtree/drivers/net/tulip/media.c newtree/drivers/net/tulip/media.c
--- oldtree/drivers/net/tulip/media.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/net/tulip/media.c	2006-01-29 11:18:20.617154512 +0000
@@ -44,8 +44,10 @@
 
 /* MII transceiver control section.
    Read and write the MII registers using software-generated serial
-   MDIO protocol.  See the MII specifications or DP83840A data sheet
-   for details. */
+   MDIO protocol.
+   See IEEE 802.3-2002.pdf (Section 2, Chapter "22.2.4 Management functions")
+   or DP83840A data sheet for more details.
+   */
 
 int tulip_mdio_read(struct net_device *dev, int phy_id, int location)
 {
@@ -272,13 +274,29 @@
 				int reset_length = p[2 + init_length];
 				misc_info = (u16*)(reset_sequence + reset_length);
 				if (startup) {
+					int timeout = 10;	/* max 1 ms */
 					iowrite32(mtable->csr12dir | 0x100, ioaddr + CSR12);
 					for (i = 0; i < reset_length; i++)
 						iowrite32(reset_sequence[i], ioaddr + CSR12);
+
+					/* flush posted writes */
+					ioread32(ioaddr + CSR12);
+
+					/* Sect 3.10.3 in DP83840A.pdf (p39) */
+					udelay(500);
+
+					/* Section 4.2 in DP83840A.pdf (p43) */
+					/* and IEEE 802.3 "22.2.4.1.1 Reset" */
+					while (timeout-- &&
+						(tulip_mdio_read (dev, phy_num, MII_BMCR) & BMCR_RESET))
+						udelay(100);
 				}
 				for (i = 0; i < init_length; i++)
 					iowrite32(init_sequence[i], ioaddr + CSR12);
+
+				ioread32(ioaddr + CSR12);	/* flush posted writes */
 			}
+
 			tmp_info = get_u16(&misc_info[1]);
 			if (tmp_info)
 				tp->advertising[phy_num] = tmp_info | 1;
diff -urN oldtree/drivers/net/tulip/tulip.h newtree/drivers/net/tulip/tulip.h
--- oldtree/drivers/net/tulip/tulip.h	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/net/tulip/tulip.h	2006-01-29 11:18:20.619154208 +0000
@@ -474,8 +474,11 @@
 			udelay(10);
 
 		if (!i)
-			printk(KERN_DEBUG "%s: tulip_stop_rxtx() failed\n",
-					pci_name(tp->pdev));
+			printk(KERN_DEBUG "%s: tulip_stop_rxtx() failed"
+					" (CSR5 0x%x CSR6 0x%x)\n",
+					pci_name(tp->pdev),
+					ioread32(ioaddr + CSR5),
+					ioread32(ioaddr + CSR6));
 	}
 }
 
diff -urN oldtree/drivers/net/tulip/tulip_core.c newtree/drivers/net/tulip/tulip_core.c
--- oldtree/drivers/net/tulip/tulip_core.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/net/tulip/tulip_core.c	2006-01-29 11:18:20.618154360 +0000
@@ -22,7 +22,7 @@
 #else
 #define DRV_VERSION	"1.1.13"
 #endif
-#define DRV_RELDATE	"May 11, 2002"
+#define DRV_RELDATE	"December 15, 2004"
 
 
 #include <linux/module.h>
diff -urN oldtree/drivers/net/wireless/ipw2200.c newtree/drivers/net/wireless/ipw2200.c
--- oldtree/drivers/net/wireless/ipw2200.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/net/wireless/ipw2200.c	2006-01-29 11:18:10.823643352 +0000
@@ -1870,7 +1870,8 @@
 }
 
 #define HOST_COMPLETE_TIMEOUT HZ
-static int ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
+
+static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
 {
 	int rc = 0;
 	unsigned long flags;
@@ -1899,7 +1900,7 @@
 		     priv->status);
 	printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
 
-	rc = ipw_queue_tx_hcmd(priv, cmd->cmd, &cmd->param, cmd->len, 0);
+	rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
 	if (rc) {
 		priv->status &= ~STATUS_HCMD_ACTIVE;
 		IPW_ERROR("Failed to send %s: Reason %d\n",
@@ -1934,7 +1935,7 @@
 		goto exit;
 	}
 
-      exit:
+exit:
 	if (priv->cmdlog) {
 		priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
 		priv->cmdlog_pos %= priv->cmdlog_len;
@@ -1942,61 +1943,62 @@
 	return rc;
 }
 
-static int ipw_send_host_complete(struct ipw_priv *priv)
+static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
+{
+	struct host_cmd cmd = {
+		.cmd = command,
+	};
+
+	return __ipw_send_cmd(priv, &cmd);
+}
+
+static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
+			    void *data)
 {
 	struct host_cmd cmd = {
-		.cmd = IPW_CMD_HOST_COMPLETE,
-		.len = 0
+		.cmd = command,
+		.len = len,
+		.param = data,
 	};
 
+	return __ipw_send_cmd(priv, &cmd);
+}
+
+static int ipw_send_host_complete(struct ipw_priv *priv)
+{
 	if (!priv) {
 		IPW_ERROR("Invalid args\n");
 		return -1;
 	}
 
-	return ipw_send_cmd(priv, &cmd);
+	return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
 }
 
 static int ipw_send_system_config(struct ipw_priv *priv,
 				  struct ipw_sys_config *config)
 {
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_SYSTEM_CONFIG,
-		.len = sizeof(*config)
-	};
-
 	if (!priv || !config) {
 		IPW_ERROR("Invalid args\n");
 		return -1;
 	}
 
-	memcpy(cmd.param, config, sizeof(*config));
-	return ipw_send_cmd(priv, &cmd);
+	return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG, sizeof(*config),
+					config);
 }
 
 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
 {
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_SSID,
-		.len = min(len, IW_ESSID_MAX_SIZE)
-	};
-
 	if (!priv || !ssid) {
 		IPW_ERROR("Invalid args\n");
 		return -1;
 	}
 
-	memcpy(cmd.param, ssid, cmd.len);
-	return ipw_send_cmd(priv, &cmd);
+	return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
+					ssid);
 }
 
 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
 {
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_ADAPTER_ADDRESS,
-		.len = ETH_ALEN
-	};
-
 	if (!priv || !mac) {
 		IPW_ERROR("Invalid args\n");
 		return -1;
@@ -2005,8 +2007,8 @@
 	IPW_DEBUG_INFO("%s: Setting MAC to " MAC_FMT "\n",
 		       priv->net_dev->name, MAC_ARG(mac));
 
-	memcpy(cmd.param, mac, ETH_ALEN);
-	return ipw_send_cmd(priv, &cmd);
+	return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN,
+					mac);
 }
 
 /*
@@ -2065,51 +2067,40 @@
 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
 				     struct ipw_scan_request_ext *request)
 {
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_SCAN_REQUEST_EXT,
-		.len = sizeof(*request)
-	};
-
-	memcpy(cmd.param, request, sizeof(*request));
-	return ipw_send_cmd(priv, &cmd);
+	return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
+					sizeof(*request), request);
 }
 
 static int ipw_send_scan_abort(struct ipw_priv *priv)
 {
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_SCAN_ABORT,
-		.len = 0
-	};
-
 	if (!priv) {
 		IPW_ERROR("Invalid args\n");
 		return -1;
 	}
 
-	return ipw_send_cmd(priv, &cmd);
+	return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
 }
 
 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
 {
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_SENSITIVITY_CALIB,
-		.len = sizeof(struct ipw_sensitivity_calib)
+	struct ipw_sensitivity_calib calib = {
+		.beacon_rssi_raw = sens,
 	};
-	struct ipw_sensitivity_calib *calib = (struct ipw_sensitivity_calib *)
-	    &cmd.param;
-	calib->beacon_rssi_raw = sens;
-	return ipw_send_cmd(priv, &cmd);
+
+	return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
+					&calib);
 }
 
 static int ipw_send_associate(struct ipw_priv *priv,
 			      struct ipw_associate *associate)
 {
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_ASSOCIATE,
-		.len = sizeof(*associate)
-	};
-
 	struct ipw_associate tmp_associate;
+
+	if (!priv || !associate) {
+		IPW_ERROR("Invalid args\n");
+		return -1;
+	}
+
 	memcpy(&tmp_associate, associate, sizeof(*associate));
 	tmp_associate.policy_support =
 	    cpu_to_le16(tmp_associate.policy_support);
@@ -2122,80 +2113,56 @@
 	    cpu_to_le16(tmp_associate.beacon_interval);
 	tmp_associate.atim_window = cpu_to_le16(tmp_associate.atim_window);
 
-	if (!priv || !associate) {
-		IPW_ERROR("Invalid args\n");
-		return -1;
-	}
-
-	memcpy(cmd.param, &tmp_associate, sizeof(*associate));
-	return ipw_send_cmd(priv, &cmd);
+	return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(tmp_associate),
+					&tmp_associate);
 }
 
 static int ipw_send_supported_rates(struct ipw_priv *priv,
 				    struct ipw_supported_rates *rates)
 {
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_SUPPORTED_RATES,
-		.len = sizeof(*rates)
-	};
-
 	if (!priv || !rates) {
 		IPW_ERROR("Invalid args\n");
 		return -1;
 	}
 
-	memcpy(cmd.param, rates, sizeof(*rates));
-	return ipw_send_cmd(priv, &cmd);
+	return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
+					rates);
 }
 
 static int ipw_set_random_seed(struct ipw_priv *priv)
 {
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_SEED_NUMBER,
-		.len = sizeof(u32)
-	};
+	u32 val;
 
 	if (!priv) {
 		IPW_ERROR("Invalid args\n");
 		return -1;
 	}
 
-	get_random_bytes(&cmd.param, sizeof(u32));
+	get_random_bytes(&val, sizeof(val));
 
-	return ipw_send_cmd(priv, &cmd);
+	return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
 }
 
 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
 {
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_CARD_DISABLE,
-		.len = sizeof(u32)
-	};
-
 	if (!priv) {
 		IPW_ERROR("Invalid args\n");
 		return -1;
 	}
 
-	*((u32 *) & cmd.param) = phy_off;
-
-	return ipw_send_cmd(priv, &cmd);
+	return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(phy_off),
+					&phy_off);
 }
 
 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
 {
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_TX_POWER,
-		.len = sizeof(*power)
-	};
-
 	if (!priv || !power) {
 		IPW_ERROR("Invalid args\n");
 		return -1;
 	}
 
-	memcpy(cmd.param, power, sizeof(*power));
-	return ipw_send_cmd(priv, &cmd);
+	return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power),
+					power);
 }
 
 static int ipw_set_tx_power(struct ipw_priv *priv)
@@ -2247,18 +2214,14 @@
 	struct ipw_rts_threshold rts_threshold = {
 		.rts_threshold = rts,
 	};
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_RTS_THRESHOLD,
-		.len = sizeof(rts_threshold)
-	};
 
 	if (!priv) {
 		IPW_ERROR("Invalid args\n");
 		return -1;
 	}
 
-	memcpy(cmd.param, &rts_threshold, sizeof(rts_threshold));
-	return ipw_send_cmd(priv, &cmd);
+	return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
+				sizeof(rts_threshold), &rts_threshold);
 }
 
 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
@@ -2266,27 +2229,19 @@
 	struct ipw_frag_threshold frag_threshold = {
 		.frag_threshold = frag,
 	};
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_FRAG_THRESHOLD,
-		.len = sizeof(frag_threshold)
-	};
 
 	if (!priv) {
 		IPW_ERROR("Invalid args\n");
 		return -1;
 	}
 
-	memcpy(cmd.param, &frag_threshold, sizeof(frag_threshold));
-	return ipw_send_cmd(priv, &cmd);
+	return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
+				sizeof(frag_threshold), &frag_threshold);
 }
 
 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
 {
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_POWER_MODE,
-		.len = sizeof(u32)
-	};
-	u32 *param = (u32 *) (&cmd.param);
+	u32 param;
 
 	if (!priv) {
 		IPW_ERROR("Invalid args\n");
@@ -2297,17 +2252,18 @@
 	 * level */
 	switch (mode) {
 	case IPW_POWER_BATTERY:
-		*param = IPW_POWER_INDEX_3;
+		param = IPW_POWER_INDEX_3;
 		break;
 	case IPW_POWER_AC:
-		*param = IPW_POWER_MODE_CAM;
+		param = IPW_POWER_MODE_CAM;
 		break;
 	default:
-		*param = mode;
+		param = mode;
 		break;
 	}
 
-	return ipw_send_cmd(priv, &cmd);
+	return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
+					&param);
 }
 
 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
@@ -2316,18 +2272,14 @@
 		.short_retry_limit = slimit,
 		.long_retry_limit = llimit
 	};
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_RETRY_LIMIT,
-		.len = sizeof(retry_limit)
-	};
 
 	if (!priv) {
 		IPW_ERROR("Invalid args\n");
 		return -1;
 	}
 
-	memcpy(cmd.param, &retry_limit, sizeof(retry_limit));
-	return ipw_send_cmd(priv, &cmd);
+	return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
+					&retry_limit);
 }
 
 /*
@@ -5672,54 +5624,44 @@
 
 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
 {
-	struct ipw_tgi_tx_key *key;
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_TGI_TX_KEY,
-		.len = sizeof(*key)
-	};
+	struct ipw_tgi_tx_key key;
 
 	if (!(priv->ieee->sec.flags & (1 << index)))
 		return;
 
-	key = (struct ipw_tgi_tx_key *)&cmd.param;
-	key->key_id = index;
-	memcpy(key->key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
-	key->security_type = type;
-	key->station_index = 0;	/* always 0 for BSS */
-	key->flags = 0;
+	key.key_id = index;
+	memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
+	key.security_type = type;
+	key.station_index = 0;	/* always 0 for BSS */
+	key.flags = 0;
 	/* 0 for new key; previous value of counter (after fatal error) */
-	key->tx_counter[0] = 0;
-	key->tx_counter[1] = 0;
+	key.tx_counter[0] = 0;
+	key.tx_counter[1] = 0;
 
-	ipw_send_cmd(priv, &cmd);
+	ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
 }
 
 static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
 {
-	struct ipw_wep_key *key;
+	struct ipw_wep_key key;
 	int i;
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_WEP_KEY,
-		.len = sizeof(*key)
-	};
 
-	key = (struct ipw_wep_key *)&cmd.param;
-	key->cmd_id = DINO_CMD_WEP_KEY;
-	key->seq_num = 0;
+	key.cmd_id = DINO_CMD_WEP_KEY;
+	key.seq_num = 0;
 
 	/* Note: AES keys cannot be set for multiple times.
 	 * Only set it at the first time. */
 	for (i = 0; i < 4; i++) {
-		key->key_index = i | type;
+		key.key_index = i | type;
 		if (!(priv->ieee->sec.flags & (1 << i))) {
-			key->key_size = 0;
+			key.key_size = 0;
 			continue;
 		}
 
-		key->key_size = priv->ieee->sec.key_sizes[i];
-		memcpy(key->key, priv->ieee->sec.keys[i], key->key_size);
+		key.key_size = priv->ieee->sec.key_sizes[i];
+		memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
 
-		ipw_send_cmd(priv, &cmd);
+		ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
 	}
 }
 
@@ -6216,15 +6158,10 @@
 static int ipw_set_rsn_capa(struct ipw_priv *priv,
 			    char *capabilities, int length)
 {
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_RSN_CAPABILITIES,
-		.len = length,
-	};
-
 	IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
 
-	memcpy(cmd.param, capabilities, length);
-	return ipw_send_cmd(priv, &cmd);
+	return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
+					capabilities);
 }
 
 /*
@@ -7011,25 +6948,15 @@
 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
 				       *qos_param)
 {
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_QOS_PARAMETERS,
-		.len = (sizeof(struct ieee80211_qos_parameters) * 3)
-	};
-
-	memcpy(cmd.param, qos_param, sizeof(*qos_param) * 3);
-	return ipw_send_cmd(priv, &cmd);
+	return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS, qos_param,
+					sizeof(*qos_param) * 3);
 }
 
 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
 				     *qos_param)
 {
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_WME_INFO,
-		.len = sizeof(*qos_param)
-	};
-
-	memcpy(cmd.param, qos_param, sizeof(*qos_param));
-	return ipw_send_cmd(priv, &cmd);
+	return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, qos_param,
+					sizeof(*qos_param));
 }
 
 #endif				/* CONFIG_IPW_QOS */
@@ -9649,11 +9576,6 @@
 	u16 remaining_bytes;
 	int fc;
 
-	/* If there isn't room in the queue, we return busy and let the
-	 * network stack requeue the packet for us */
-	if (ipw_queue_space(q) < q->high_mark)
-		return NETDEV_TX_BUSY;
-
 	switch (priv->ieee->iw_mode) {
 	case IW_MODE_ADHOC:
 		hdr_len = IEEE80211_3ADDR_LEN;
@@ -9871,7 +9793,7 @@
 
       fail_unlock:
 	spin_unlock_irqrestore(&priv->lock, flags);
-	return 1;
+	return -1;
 }
 
 static struct net_device_stats *ipw_net_get_stats(struct net_device *dev)
diff -urN oldtree/drivers/net/wireless/ipw2200.h newtree/drivers/net/wireless/ipw2200.h
--- oldtree/drivers/net/wireless/ipw2200.h	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/net/wireless/ipw2200.h	2006-01-29 11:18:10.825643048 +0000
@@ -1860,7 +1860,7 @@
 	u8 cmd;
 	u8 len;
 	u16 reserved;
-	u32 param[TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH];
+	u32 *param;
 } __attribute__ ((packed));
 
 struct ipw_cmd_log {
diff -urN oldtree/drivers/pci/quirks.c newtree/drivers/pci/quirks.c
--- oldtree/drivers/pci/quirks.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/pci/quirks.c	2006-01-29 11:18:10.826642896 +0000
@@ -1125,6 +1125,9 @@
 	case 0x27c4:
 		ich = 7;
 		break;
+	case 0x2828:	/* ICH8M */
+		ich = 8;
+		break;
 	default:
 		/* we do not handle this PCI device */
 		return;
@@ -1144,7 +1147,7 @@
 		else
 			return;			/* not in combined mode */
 	} else {
-		WARN_ON((ich != 6) && (ich != 7));
+		WARN_ON((ich != 6) && (ich != 7) && (ich != 8));
 		tmp &= 0x3;  /* interesting bits 1:0 */
 		if (tmp & (1 << 0))
 			comb = (1 << 2);	/* PATA port 0, SATA port 1 */
diff -urN oldtree/drivers/pci/quirks.c.orig newtree/drivers/pci/quirks.c.orig
--- oldtree/drivers/pci/quirks.c.orig	1970-01-01 00:00:00.000000000 +0000
+++ newtree/drivers/pci/quirks.c.orig	2006-01-28 19:08:42.000000000 +0000
@@ -0,0 +1,1318 @@
+/*
+ *  This file contains work-arounds for many known PCI hardware
+ *  bugs.  Devices present only on certain architectures (host
+ *  bridges et cetera) should be handled in arch-specific code.
+ *
+ *  Note: any quirks for hotpluggable devices must _NOT_ be declared __init.
+ *
+ *  Copyright (c) 1999 Martin Mares <mj@ucw.cz>
+ *
+ *  Init/reset quirks for USB host controllers should be in the
+ *  USB quirks file, where their drivers can access reuse it.
+ *
+ *  The bridge optimization stuff has been removed. If you really
+ *  have a silly BIOS which is unable to set your host bridge right,
+ *  use the PowerTweak utility (see http://powertweak.sourceforge.net).
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/acpi.h>
+#include "pci.h"
+
+/* Deal with broken BIOS'es that neglect to enable passive release,
+   which can cause problems in combination with the 82441FX/PPro MTRRs */
+static void __devinit quirk_passive_release(struct pci_dev *dev)
+{
+	struct pci_dev *d = NULL;
+	unsigned char dlc;
+
+	/* We have to make sure a particular bit is set in the PIIX3
+	   ISA bridge, so we have to go out and find it. */
+	while ((d = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, d))) {
+		pci_read_config_byte(d, 0x82, &dlc);
+		if (!(dlc & 1<<1)) {
+			printk(KERN_ERR "PCI: PIIX3: Enabling Passive Release on %s\n", pci_name(d));
+			dlc |= 1<<1;
+			pci_write_config_byte(d, 0x82, dlc);
+		}
+	}
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82441,	quirk_passive_release );
+
+/*  The VIA VP2/VP3/MVP3 seem to have some 'features'. There may be a workaround
+    but VIA don't answer queries. If you happen to have good contacts at VIA
+    ask them for me please -- Alan 
+    
+    This appears to be BIOS not version dependent. So presumably there is a 
+    chipset level fix */
+int isa_dma_bridge_buggy;		/* Exported */
+    
+static void __devinit quirk_isa_dma_hangs(struct pci_dev *dev)
+{
+	if (!isa_dma_bridge_buggy) {
+		isa_dma_bridge_buggy=1;
+		printk(KERN_INFO "Activating ISA DMA hang workarounds.\n");
+	}
+}
+	/*
+	 * Its not totally clear which chipsets are the problematic ones
+	 * We know 82C586 and 82C596 variants are affected.
+	 */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C586_0,	quirk_isa_dma_hangs );
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C596,	quirk_isa_dma_hangs );
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82371SB_0,  quirk_isa_dma_hangs );
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL,	PCI_DEVICE_ID_AL_M1533, 	quirk_isa_dma_hangs );
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC,	PCI_DEVICE_ID_NEC_CBUS_1,	quirk_isa_dma_hangs );
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC,	PCI_DEVICE_ID_NEC_CBUS_2,	quirk_isa_dma_hangs );
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC,	PCI_DEVICE_ID_NEC_CBUS_3,	quirk_isa_dma_hangs );
+
+int pci_pci_problems;
+
+/*
+ *	Chipsets where PCI->PCI transfers vanish or hang
+ */
+static void __devinit quirk_nopcipci(struct pci_dev *dev)
+{
+	if ((pci_pci_problems & PCIPCI_FAIL)==0) {
+		printk(KERN_INFO "Disabling direct PCI/PCI transfers.\n");
+		pci_pci_problems |= PCIPCI_FAIL;
+	}
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_5597,		quirk_nopcipci );
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_496,		quirk_nopcipci );
+
+/*
+ *	Triton requires workarounds to be used by the drivers
+ */
+static void __devinit quirk_triton(struct pci_dev *dev)
+{
+	if ((pci_pci_problems&PCIPCI_TRITON)==0) {
+		printk(KERN_INFO "Limiting direct PCI/PCI transfers.\n");
+		pci_pci_problems |= PCIPCI_TRITON;
+	}
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 	PCI_DEVICE_ID_INTEL_82437, 	quirk_triton ); 
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 	PCI_DEVICE_ID_INTEL_82437VX, 	quirk_triton ); 
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 	PCI_DEVICE_ID_INTEL_82439, 	quirk_triton ); 
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 	PCI_DEVICE_ID_INTEL_82439TX, 	quirk_triton ); 
+
+/*
+ *	VIA Apollo KT133 needs PCI latency patch
+ *	Made according to a windows driver based patch by George E. Breese
+ *	see PCI Latency Adjust on http://www.viahardware.com/download/viatweak.shtm
+ *      Also see http://www.au-ja.org/review-kt133a-1-en.phtml for
+ *      the info on which Mr Breese based his work.
+ *
+ *	Updated based on further information from the site and also on
+ *	information provided by VIA 
+ */
+static void __devinit quirk_vialatency(struct pci_dev *dev)
+{
+	struct pci_dev *p;
+	u8 rev;
+	u8 busarb;
+	/* Ok we have a potential problem chipset here. Now see if we have
+	   a buggy southbridge */
+	   
+	p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, NULL);
+	if (p!=NULL) {
+		pci_read_config_byte(p, PCI_CLASS_REVISION, &rev);
+		/* 0x40 - 0x4f == 686B, 0x10 - 0x2f == 686A; thanks Dan Hollis */
+		/* Check for buggy part revisions */
+		if (rev < 0x40 || rev > 0x42)
+			goto exit;
+	} else {
+		p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, NULL);
+		if (p==NULL)	/* No problem parts */
+			goto exit;
+		pci_read_config_byte(p, PCI_CLASS_REVISION, &rev);
+		/* Check for buggy part revisions */
+		if (rev < 0x10 || rev > 0x12) 
+			goto exit;
+	}
+	
+	/*
+	 *	Ok we have the problem. Now set the PCI master grant to 
+	 *	occur every master grant. The apparent bug is that under high
+	 *	PCI load (quite common in Linux of course) you can get data
+	 *	loss when the CPU is held off the bus for 3 bus master requests
+	 *	This happens to include the IDE controllers....
+	 *
+	 *	VIA only apply this fix when an SB Live! is present but under
+	 *	both Linux and Windows this isnt enough, and we have seen
+	 *	corruption without SB Live! but with things like 3 UDMA IDE
+	 *	controllers. So we ignore that bit of the VIA recommendation..
+	 */
+
+	pci_read_config_byte(dev, 0x76, &busarb);
+	/* Set bit 4 and bi 5 of byte 76 to 0x01 
+	   "Master priority rotation on every PCI master grant */
+	busarb &= ~(1<<5);
+	busarb |= (1<<4);
+	pci_write_config_byte(dev, 0x76, busarb);
+	printk(KERN_INFO "Applying VIA southbridge workaround.\n");
+exit:
+	pci_dev_put(p);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8363_0,	quirk_vialatency );
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8371_1,	quirk_vialatency );
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8361,		quirk_vialatency );
+
+/*
+ *	VIA Apollo VP3 needs ETBF on BT848/878
+ */
+static void __devinit quirk_viaetbf(struct pci_dev *dev)
+{
+	if ((pci_pci_problems&PCIPCI_VIAETBF)==0) {
+		printk(KERN_INFO "Limiting direct PCI/PCI transfers.\n");
+		pci_pci_problems |= PCIPCI_VIAETBF;
+	}
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C597_0,	quirk_viaetbf );
+
+static void __devinit quirk_vsfx(struct pci_dev *dev)
+{
+	if ((pci_pci_problems&PCIPCI_VSFX)==0) {
+		printk(KERN_INFO "Limiting direct PCI/PCI transfers.\n");
+		pci_pci_problems |= PCIPCI_VSFX;
+	}
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C576,	quirk_vsfx );
+
+/*
+ *	Ali Magik requires workarounds to be used by the drivers
+ *	that DMA to AGP space. Latency must be set to 0xA and triton
+ *	workaround applied too
+ *	[Info kindly provided by ALi]
+ */	
+static void __init quirk_alimagik(struct pci_dev *dev)
+{
+	if ((pci_pci_problems&PCIPCI_ALIMAGIK)==0) {
+		printk(KERN_INFO "Limiting direct PCI/PCI transfers.\n");
+		pci_pci_problems |= PCIPCI_ALIMAGIK|PCIPCI_TRITON;
+	}
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, 	PCI_DEVICE_ID_AL_M1647, 	quirk_alimagik );
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, 	PCI_DEVICE_ID_AL_M1651, 	quirk_alimagik );
+
+/*
+ *	Natoma has some interesting boundary conditions with Zoran stuff
+ *	at least
+ */
+static void __devinit quirk_natoma(struct pci_dev *dev)
+{
+	if ((pci_pci_problems&PCIPCI_NATOMA)==0) {
+		printk(KERN_INFO "Limiting direct PCI/PCI transfers.\n");
+		pci_pci_problems |= PCIPCI_NATOMA;
+	}
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 	PCI_DEVICE_ID_INTEL_82441, 	quirk_natoma ); 
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 	PCI_DEVICE_ID_INTEL_82443LX_0, 	quirk_natoma ); 
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 	PCI_DEVICE_ID_INTEL_82443LX_1, 	quirk_natoma ); 
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 	PCI_DEVICE_ID_INTEL_82443BX_0, 	quirk_natoma ); 
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 	PCI_DEVICE_ID_INTEL_82443BX_1, 	quirk_natoma ); 
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 	PCI_DEVICE_ID_INTEL_82443BX_2, 	quirk_natoma );
+
+/*
+ *  This chip can cause PCI parity errors if config register 0xA0 is read
+ *  while DMAs are occurring.
+ */
+static void __devinit quirk_citrine(struct pci_dev *dev)
+{
+	dev->cfg_size = 0xA0;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM,	PCI_DEVICE_ID_IBM_CITRINE,	quirk_citrine );
+
+/*
+ *  S3 868 and 968 chips report region size equal to 32M, but they decode 64M.
+ *  If it's needed, re-allocate the region.
+ */
+static void __devinit quirk_s3_64M(struct pci_dev *dev)
+{
+	struct resource *r = &dev->resource[0];
+
+	if ((r->start & 0x3ffffff) || r->end != r->start + 0x3ffffff) {
+		r->start = 0;
+		r->end = 0x3ffffff;
+	}
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3,	PCI_DEVICE_ID_S3_868,		quirk_s3_64M );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3,	PCI_DEVICE_ID_S3_968,		quirk_s3_64M );
+
+static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region,
+	unsigned size, int nr, const char *name)
+{
+	region &= ~(size-1);
+	if (region) {
+		struct pci_bus_region bus_region;
+		struct resource *res = dev->resource + nr;
+
+		res->name = pci_name(dev);
+		res->start = region;
+		res->end = region + size - 1;
+		res->flags = IORESOURCE_IO;
+
+		/* Convert from PCI bus to resource space.  */
+		bus_region.start = res->start;
+		bus_region.end = res->end;
+		pcibios_bus_to_resource(dev, res, &bus_region);
+
+		pci_claim_resource(dev, nr);
+		printk("PCI quirk: region %04x-%04x claimed by %s\n", region, region + size - 1, name);
+	}
+}	
+
+/*
+ *	ATI Northbridge setups MCE the processor if you even
+ *	read somewhere between 0x3b0->0x3bb or read 0x3d3
+ */
+static void __devinit quirk_ati_exploding_mce(struct pci_dev *dev)
+{
+	printk(KERN_INFO "ATI Northbridge, reserving I/O ports 0x3b0 to 0x3bb.\n");
+	/* Mae rhaid i ni beidio ag edrych ar y lleoliadiau I/O hyn */
+	request_region(0x3b0, 0x0C, "RadeonIGP");
+	request_region(0x3d3, 0x01, "RadeonIGP");
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI,	PCI_DEVICE_ID_ATI_RS100,   quirk_ati_exploding_mce );
+
+/*
+ * Let's make the southbridge information explicit instead
+ * of having to worry about people probing the ACPI areas,
+ * for example.. (Yes, it happens, and if you read the wrong
+ * ACPI register it will put the machine to sleep with no
+ * way of waking it up again. Bummer).
+ *
+ * ALI M7101: Two IO regions pointed to by words at
+ *	0xE0 (64 bytes of ACPI registers)
+ *	0xE2 (32 bytes of SMB registers)
+ */
+static void __devinit quirk_ali7101_acpi(struct pci_dev *dev)
+{
+	u16 region;
+
+	pci_read_config_word(dev, 0xE0, &region);
+	quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES, "ali7101 ACPI");
+	pci_read_config_word(dev, 0xE2, &region);
+	quirk_io_region(dev, region, 32, PCI_BRIDGE_RESOURCES+1, "ali7101 SMB");
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL,	PCI_DEVICE_ID_AL_M7101,		quirk_ali7101_acpi );
+
+static void piix4_io_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
+{
+	u32 devres;
+	u32 mask, size, base;
+
+	pci_read_config_dword(dev, port, &devres);
+	if ((devres & enable) != enable)
+		return;
+	mask = (devres >> 16) & 15;
+	base = devres & 0xffff;
+	size = 16;
+	for (;;) {
+		unsigned bit = size >> 1;
+		if ((bit & mask) == bit)
+			break;
+		size = bit;
+	}
+	/*
+	 * For now we only print it out. Eventually we'll want to
+	 * reserve it (at least if it's in the 0x1000+ range), but
+	 * let's get enough confirmation reports first. 
+	 */
+	base &= -size;
+	printk("%s PIO at %04x-%04x\n", name, base, base + size - 1);
+}
+
+static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
+{
+	u32 devres;
+	u32 mask, size, base;
+
+	pci_read_config_dword(dev, port, &devres);
+	if ((devres & enable) != enable)
+		return;
+	base = devres & 0xffff0000;
+	mask = (devres & 0x3f) << 16;
+	size = 128 << 16;
+	for (;;) {
+		unsigned bit = size >> 1;
+		if ((bit & mask) == bit)
+			break;
+		size = bit;
+	}
+	/*
+	 * For now we only print it out. Eventually we'll want to
+	 * reserve it, but let's get enough confirmation reports first. 
+	 */
+	base &= -size;
+	printk("%s MMIO at %04x-%04x\n", name, base, base + size - 1);
+}
+
+/*
+ * PIIX4 ACPI: Two IO regions pointed to by longwords at
+ *	0x40 (64 bytes of ACPI registers)
+ *	0x90 (16 bytes of SMB registers)
+ * and a few strange programmable PIIX4 device resources.
+ */
+static void __devinit quirk_piix4_acpi(struct pci_dev *dev)
+{
+	u32 region, res_a;
+
+	pci_read_config_dword(dev, 0x40, &region);
+	quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES, "PIIX4 ACPI");
+	pci_read_config_dword(dev, 0x90, &region);
+	quirk_io_region(dev, region, 16, PCI_BRIDGE_RESOURCES+1, "PIIX4 SMB");
+
+	/* Device resource A has enables for some of the other ones */
+	pci_read_config_dword(dev, 0x5c, &res_a);
+
+	piix4_io_quirk(dev, "PIIX4 devres B", 0x60, 3 << 21);
+	piix4_io_quirk(dev, "PIIX4 devres C", 0x64, 3 << 21);
+
+	/* Device resource D is just bitfields for static resources */
+
+	/* Device 12 enabled? */
+	if (res_a & (1 << 29)) {
+		piix4_io_quirk(dev, "PIIX4 devres E", 0x68, 1 << 20);
+		piix4_mem_quirk(dev, "PIIX4 devres F", 0x6c, 1 << 7);
+	}
+	/* Device 13 enabled? */
+	if (res_a & (1 << 30)) {
+		piix4_io_quirk(dev, "PIIX4 devres G", 0x70, 1 << 20);
+		piix4_mem_quirk(dev, "PIIX4 devres H", 0x74, 1 << 7);
+	}
+	piix4_io_quirk(dev, "PIIX4 devres I", 0x78, 1 << 20);
+	piix4_io_quirk(dev, "PIIX4 devres J", 0x7c, 1 << 20);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82371AB_3,	quirk_piix4_acpi );
+
+/*
+ * ICH4, ICH4-M, ICH5, ICH5-M ACPI: Three IO regions pointed to by longwords at
+ *	0x40 (128 bytes of ACPI, GPIO & TCO registers)
+ *	0x58 (64 bytes of GPIO I/O space)
+ */
+static void __devinit quirk_ich4_lpc_acpi(struct pci_dev *dev)
+{
+	u32 region;
+
+	pci_read_config_dword(dev, 0x40, &region);
+	quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, "ICH4 ACPI/GPIO/TCO");
+
+	pci_read_config_dword(dev, 0x58, &region);
+	quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH4 GPIO");
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801AA_0,		quirk_ich4_lpc_acpi );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801AB_0,		quirk_ich4_lpc_acpi );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801BA_0,		quirk_ich4_lpc_acpi );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801BA_10,	quirk_ich4_lpc_acpi );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801CA_0,		quirk_ich4_lpc_acpi );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801CA_12,	quirk_ich4_lpc_acpi );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801DB_0,		quirk_ich4_lpc_acpi );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801DB_12,	quirk_ich4_lpc_acpi );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801EB_0,		quirk_ich4_lpc_acpi );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_ESB_1,		quirk_ich4_lpc_acpi );
+
+static void __devinit quirk_ich6_lpc_acpi(struct pci_dev *dev)
+{
+	u32 region;
+
+	pci_read_config_dword(dev, 0x40, &region);
+	quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, "ICH6 ACPI/GPIO/TCO");
+
+	pci_read_config_dword(dev, 0x48, &region);
+	quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH6 GPIO");
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc_acpi );
+
+/*
+ * VIA ACPI: One IO region pointed to by longword at
+ *	0x48 or 0x20 (256 bytes of ACPI registers)
+ */
+static void __devinit quirk_vt82c586_acpi(struct pci_dev *dev)
+{
+	u8 rev;
+	u32 region;
+
+	pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
+	if (rev & 0x10) {
+		pci_read_config_dword(dev, 0x48, &region);
+		region &= PCI_BASE_ADDRESS_IO_MASK;
+		quirk_io_region(dev, region, 256, PCI_BRIDGE_RESOURCES, "vt82c586 ACPI");
+	}
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C586_3,	quirk_vt82c586_acpi );
+
+/*
+ * VIA VT82C686 ACPI: Three IO region pointed to by (long)words at
+ *	0x48 (256 bytes of ACPI registers)
+ *	0x70 (128 bytes of hardware monitoring register)
+ *	0x90 (16 bytes of SMB registers)
+ */
+static void __devinit quirk_vt82c686_acpi(struct pci_dev *dev)
+{
+	u16 hm;
+	u32 smb;
+
+	quirk_vt82c586_acpi(dev);
+
+	pci_read_config_word(dev, 0x70, &hm);
+	hm &= PCI_BASE_ADDRESS_IO_MASK;
+	quirk_io_region(dev, hm, 128, PCI_BRIDGE_RESOURCES + 1, "vt82c686 HW-mon");
+
+	pci_read_config_dword(dev, 0x90, &smb);
+	smb &= PCI_BASE_ADDRESS_IO_MASK;
+	quirk_io_region(dev, smb, 16, PCI_BRIDGE_RESOURCES + 2, "vt82c686 SMB");
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C686_4,	quirk_vt82c686_acpi );
+
+/*
+ * VIA VT8235 ISA Bridge: Two IO regions pointed to by words at
+ *	0x88 (128 bytes of power management registers)
+ *	0xd0 (16 bytes of SMB registers)
+ */
+static void __devinit quirk_vt8235_acpi(struct pci_dev *dev)
+{
+	u16 pm, smb;
+
+	pci_read_config_word(dev, 0x88, &pm);
+	pm &= PCI_BASE_ADDRESS_IO_MASK;
+	quirk_io_region(dev, pm, 128, PCI_BRIDGE_RESOURCES, "vt8235 PM");
+
+	pci_read_config_word(dev, 0xd0, &smb);
+	smb &= PCI_BASE_ADDRESS_IO_MASK;
+	quirk_io_region(dev, smb, 16, PCI_BRIDGE_RESOURCES + 1, "vt8235 SMB");
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8235,	quirk_vt8235_acpi);
+
+
+#ifdef CONFIG_X86_IO_APIC 
+
+#include <asm/io_apic.h>
+
+/*
+ * VIA 686A/B: If an IO-APIC is active, we need to route all on-chip
+ * devices to the external APIC.
+ *
+ * TODO: When we have device-specific interrupt routers,
+ * this code will go away from quirks.
+ */
+static void __devinit quirk_via_ioapic(struct pci_dev *dev)
+{
+	u8 tmp;
+	
+	if (nr_ioapics < 1)
+		tmp = 0;    /* nothing routed to external APIC */
+	else
+		tmp = 0x1f; /* all known bits (4-0) routed to external APIC */
+		
+	printk(KERN_INFO "PCI: %sbling Via external APIC routing\n",
+	       tmp == 0 ? "Disa" : "Ena");
+
+	/* Offset 0x58: External APIC IRQ output control */
+	pci_write_config_byte (dev, 0x58, tmp);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C686,	quirk_via_ioapic );
+
+/*
+ * VIA 8237: Some BIOSs don't set the 'Bypass APIC De-Assert Message' Bit.
+ * This leads to doubled level interrupt rates.
+ * Set this bit to get rid of cycle wastage.
+ * Otherwise uncritical.
+ */
+static void __devinit quirk_via_vt8237_bypass_apic_deassert(struct pci_dev *dev)
+{
+	u8 misc_control2;
+#define BYPASS_APIC_DEASSERT 8
+
+	pci_read_config_byte(dev, 0x5B, &misc_control2);
+	if (!(misc_control2 & BYPASS_APIC_DEASSERT)) {
+		printk(KERN_INFO "PCI: Bypassing VIA 8237 APIC De-Assert Message\n");
+		pci_write_config_byte(dev, 0x5B, misc_control2|BYPASS_APIC_DEASSERT);
+	}
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8237,		quirk_via_vt8237_bypass_apic_deassert);
+
+/*
+ * The AMD io apic can hang the box when an apic irq is masked.
+ * We check all revs >= B0 (yet not in the pre production!) as the bug
+ * is currently marked NoFix
+ *
+ * We have multiple reports of hangs with this chipset that went away with
+ * noapic specified. For the moment we assume its the errata. We may be wrong
+ * of course. However the advice is demonstrably good even if so..
+ */
+static void __devinit quirk_amd_ioapic(struct pci_dev *dev)
+{
+	u8 rev;
+
+	pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
+	if (rev >= 0x02) {
+		printk(KERN_WARNING "I/O APIC: AMD Errata #22 may be present. In the event of instability try\n");
+		printk(KERN_WARNING "        : booting with the \"noapic\" option.\n");
+	}
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD,	PCI_DEVICE_ID_AMD_VIPER_7410,	quirk_amd_ioapic );
+
+static void __init quirk_ioapic_rmw(struct pci_dev *dev)
+{
+	if (dev->devfn == 0 && dev->bus->number == 0)
+		sis_apic_bug = 1;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI,	PCI_ANY_ID,			quirk_ioapic_rmw );
+
+int pci_msi_quirk;
+
+#define AMD8131_revA0        0x01
+#define AMD8131_revB0        0x11
+#define AMD8131_MISC         0x40
+#define AMD8131_NIOAMODE_BIT 0
+static void __init quirk_amd_8131_ioapic(struct pci_dev *dev) 
+{ 
+        unsigned char revid, tmp;
+        
+	pci_msi_quirk = 1;
+	printk(KERN_WARNING "PCI: MSI quirk detected. pci_msi_quirk set.\n");
+
+        if (nr_ioapics == 0) 
+                return;
+
+        pci_read_config_byte(dev, PCI_REVISION_ID, &revid);
+        if (revid == AMD8131_revA0 || revid == AMD8131_revB0) {
+                printk(KERN_INFO "Fixing up AMD8131 IOAPIC mode\n"); 
+                pci_read_config_byte( dev, AMD8131_MISC, &tmp);
+                tmp &= ~(1 << AMD8131_NIOAMODE_BIT);
+                pci_write_config_byte( dev, AMD8131_MISC, tmp);
+        }
+} 
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_APIC,         quirk_amd_8131_ioapic ); 
+
+static void __init quirk_svw_msi(struct pci_dev *dev)
+{
+	pci_msi_quirk = 1;
+	printk(KERN_WARNING "PCI: MSI quirk detected. pci_msi_quirk set.\n");
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCNB_LE, quirk_svw_msi );
+#endif /* CONFIG_X86_IO_APIC */
+
+
+/*
+ * FIXME: it is questionable that quirk_via_acpi
+ * is needed.  It shows up as an ISA bridge, and does not
+ * support the PCI_INTERRUPT_LINE register at all.  Therefore
+ * it seems like setting the pci_dev's 'irq' to the
+ * value of the ACPI SCI interrupt is only done for convenience.
+ *	-jgarzik
+ */
+static void __devinit quirk_via_acpi(struct pci_dev *d)
+{
+	/*
+	 * VIA ACPI device: SCI IRQ line in PCI config byte 0x42
+	 */
+	u8 irq;
+	pci_read_config_byte(d, 0x42, &irq);
+	irq &= 0xf;
+	if (irq && (irq != 2))
+		d->irq = irq;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C586_3,	quirk_via_acpi );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C686_4,	quirk_via_acpi );
+
+/*
+ * Via 686A/B:  The PCI_INTERRUPT_LINE register for the on-chip
+ * devices, USB0/1, AC97, MC97, and ACPI, has an unusual feature:
+ * when written, it makes an internal connection to the PIC.
+ * For these devices, this register is defined to be 4 bits wide.
+ * Normally this is fine.  However for IO-APIC motherboards, or
+ * non-x86 architectures (yes Via exists on PPC among other places),
+ * we must mask the PCI_INTERRUPT_LINE value versus 0xf to get
+ * interrupts delivered properly.
+ */
+static void quirk_via_irq(struct pci_dev *dev)
+{
+	u8 irq, new_irq;
+
+	new_irq = dev->irq & 0xf;
+	pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
+	if (new_irq != irq) {
+		printk(KERN_INFO "PCI: Via IRQ fixup for %s, from %d to %d\n",
+			pci_name(dev), irq, new_irq);
+		udelay(15);	/* unknown if delay really needed */
+		pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq);
+	}
+}
+DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_via_irq);
+
+/*
+ * VIA VT82C598 has its device ID settable and many BIOSes
+ * set it to the ID of VT82C597 for backward compatibility.
+ * We need to switch it off to be able to recognize the real
+ * type of the chip.
+ */
+static void __devinit quirk_vt82c598_id(struct pci_dev *dev)
+{
+	pci_write_config_byte(dev, 0xfc, 0);
+	pci_read_config_word(dev, PCI_DEVICE_ID, &dev->device);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C597_0,	quirk_vt82c598_id );
+
+/*
+ * CardBus controllers have a legacy base address that enables them
+ * to respond as i82365 pcmcia controllers.  We don't want them to
+ * do this even if the Linux CardBus driver is not loaded, because
+ * the Linux i82365 driver does not (and should not) handle CardBus.
+ */
+static void __devinit quirk_cardbus_legacy(struct pci_dev *dev)
+{
+	if ((PCI_CLASS_BRIDGE_CARDBUS << 8) ^ dev->class)
+		return;
+	pci_write_config_dword(dev, PCI_CB_LEGACY_MODE_BASE, 0);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_cardbus_legacy);
+
+/*
+ * Following the PCI ordering rules is optional on the AMD762. I'm not
+ * sure what the designers were smoking but let's not inhale...
+ *
+ * To be fair to AMD, it follows the spec by default, its BIOS people
+ * who turn it off!
+ */
+static void __devinit quirk_amd_ordering(struct pci_dev *dev)
+{
+	u32 pcic;
+	pci_read_config_dword(dev, 0x4C, &pcic);
+	if ((pcic&6)!=6) {
+		pcic |= 6;
+		printk(KERN_WARNING "BIOS failed to enable PCI standards compliance, fixing this error.\n");
+		pci_write_config_dword(dev, 0x4C, pcic);
+		pci_read_config_dword(dev, 0x84, &pcic);
+		pcic |= (1<<23);	/* Required in this mode */
+		pci_write_config_dword(dev, 0x84, pcic);
+	}
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD,	PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering );
+
+/*
+ *	DreamWorks provided workaround for Dunord I-3000 problem
+ *
+ *	This card decodes and responds to addresses not apparently
+ *	assigned to it. We force a larger allocation to ensure that
+ *	nothing gets put too close to it.
+ */
+static void __devinit quirk_dunord ( struct pci_dev * dev )
+{
+	struct resource *r = &dev->resource [1];
+	r->start = 0;
+	r->end = 0xffffff;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DUNORD,	PCI_DEVICE_ID_DUNORD_I3000,	quirk_dunord );
+
+/*
+ * i82380FB mobile docking controller: its PCI-to-PCI bridge
+ * is subtractive decoding (transparent), and does indicate this
+ * in the ProgIf. Unfortunately, the ProgIf value is wrong - 0x80
+ * instead of 0x01.
+ */
+static void __devinit quirk_transparent_bridge(struct pci_dev *dev)
+{
+	dev->transparent = 1;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82380FB,	quirk_transparent_bridge );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA,	0x605,	quirk_transparent_bridge );
+
+/*
+ * Common misconfiguration of the MediaGX/Geode PCI master that will
+ * reduce PCI bandwidth from 70MB/s to 25MB/s.  See the GXM/GXLV/GX1
+ * datasheets found at http://www.national.com/ds/GX for info on what
+ * these bits do.  <christer@weinigel.se>
+ */
+static void __init quirk_mediagx_master(struct pci_dev *dev)
+{
+	u8 reg;
+	pci_read_config_byte(dev, 0x41, &reg);
+	if (reg & 2) {
+		reg &= ~2;
+		printk(KERN_INFO "PCI: Fixup for MediaGX/Geode Slave Disconnect Boundary (0x41=0x%02x)\n", reg);
+                pci_write_config_byte(dev, 0x41, reg);
+	}
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CYRIX,	PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master );
+
+/*
+ * As per PCI spec, ignore base address registers 0-3 of the IDE controllers
+ * running in Compatible mode (bits 0 and 2 in the ProgIf for primary and
+ * secondary channels respectively). If the device reports Compatible mode
+ * but does use BAR0-3 for address decoding, we assume that firmware has
+ * programmed these BARs with standard values (0x1f0,0x3f4 and 0x170,0x374).
+ * Exceptions (if they exist) must be handled in chip/architecture specific
+ * fixups.
+ *
+ * Note: for non x86 people. You may need an arch specific quirk to handle
+ * moving IDE devices to native mode as well. Some plug in card devices power
+ * up in compatible mode and assume the BIOS will adjust them.
+ *
+ * Q: should we load the 0x1f0,0x3f4 into the registers or zap them as
+ * we do now ? We don't want is pci_enable_device to come along
+ * and assign new resources. Both approaches work for that.
+ */ 
+static void __devinit quirk_ide_bases(struct pci_dev *dev)
+{
+       struct resource *res;
+       int first_bar = 2, last_bar = 0;
+
+       if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE)
+               return;
+
+       res = &dev->resource[0];
+
+       /* primary channel: ProgIf bit 0, BAR0, BAR1 */
+       if (!(dev->class & 1) && (res[0].flags || res[1].flags)) { 
+               res[0].start = res[0].end = res[0].flags = 0;
+               res[1].start = res[1].end = res[1].flags = 0;
+               first_bar = 0;
+               last_bar = 1;
+       }
+
+       /* secondary channel: ProgIf bit 2, BAR2, BAR3 */
+       if (!(dev->class & 4) && (res[2].flags || res[3].flags)) { 
+               res[2].start = res[2].end = res[2].flags = 0;
+               res[3].start = res[3].end = res[3].flags = 0;
+               last_bar = 3;
+       }
+
+       if (!last_bar)
+               return;
+
+       printk(KERN_INFO "PCI: Ignoring BAR%d-%d of IDE controller %s\n",
+              first_bar, last_bar, pci_name(dev));
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, quirk_ide_bases);
+
+/*
+ *	Ensure C0 rev restreaming is off. This is normally done by
+ *	the BIOS but in the odd case it is not the results are corruption
+ *	hence the presence of a Linux check
+ */
+static void __init quirk_disable_pxb(struct pci_dev *pdev)
+{
+	u16 config;
+	u8 rev;
+	
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
+	if (rev != 0x04)		/* Only C0 requires this */
+		return;
+	pci_read_config_word(pdev, 0x40, &config);
+	if (config & (1<<6)) {
+		config &= ~(1<<6);
+		pci_write_config_word(pdev, 0x40, config);
+		printk(KERN_INFO "PCI: C0 revision 450NX. Disabling PCI restreaming.\n");
+	}
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82454NX,	quirk_disable_pxb );
+
+
+/*
+ *	Serverworks CSB5 IDE does not fully support native mode
+ */
+static void __devinit quirk_svwks_csb5ide(struct pci_dev *pdev)
+{
+	u8 prog;
+	pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
+	if (prog & 5) {
+		prog &= ~5;
+		pdev->class &= ~5;
+		pci_write_config_byte(pdev, PCI_CLASS_PROG, prog);
+		/* need to re-assign BARs for compat mode */
+		quirk_ide_bases(pdev);
+	}
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, quirk_svwks_csb5ide );
+
+/*
+ *	Intel 82801CAM ICH3-M datasheet says IDE modes must be the same
+ */
+static void __init quirk_ide_samemode(struct pci_dev *pdev)
+{
+	u8 prog;
+
+	pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
+
+	if (((prog & 1) && !(prog & 4)) || ((prog & 4) && !(prog & 1))) {
+		printk(KERN_INFO "PCI: IDE mode mismatch; forcing legacy mode\n");
+		prog &= ~5;
+		pdev->class &= ~5;
+		pci_write_config_byte(pdev, PCI_CLASS_PROG, prog);
+		/* need to re-assign BARs for compat mode */
+		quirk_ide_bases(pdev);
+	}
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, quirk_ide_samemode);
+
+/* This was originally an Alpha specific thing, but it really fits here.
+ * The i82375 PCI/EISA bridge appears as non-classified. Fix that.
+ */
+static void __init quirk_eisa_bridge(struct pci_dev *dev)
+{
+	dev->class = PCI_CLASS_BRIDGE_EISA << 8;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82375,	quirk_eisa_bridge );
+
+/*
+ * On ASUS P4B boards, the SMBus PCI Device within the ICH2/4 southbridge
+ * is not activated. The myth is that Asus said that they do not want the
+ * users to be irritated by just another PCI Device in the Win98 device
+ * manager. (see the file prog/hotplug/README.p4b in the lm_sensors 
+ * package 2.7.0 for details)
+ *
+ * The SMBus PCI Device can be activated by setting a bit in the ICH LPC 
+ * bridge. Unfortunately, this device has no subvendor/subdevice ID. So it 
+ * becomes necessary to do this tweak in two steps -- I've chosen the Host
+ * bridge as trigger.
+ */
+static int __initdata asus_hides_smbus = 0;
+
+static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
+{
+	if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) {
+		if (dev->device == PCI_DEVICE_ID_INTEL_82845_HB)
+			switch(dev->subsystem_device) {
+			case 0x8025: /* P4B-LX */
+			case 0x8070: /* P4B */
+			case 0x8088: /* P4B533 */
+			case 0x1626: /* L3C notebook */
+				asus_hides_smbus = 1;
+			}
+		if (dev->device == PCI_DEVICE_ID_INTEL_82845G_HB)
+			switch(dev->subsystem_device) {
+			case 0x80b1: /* P4GE-V */
+			case 0x80b2: /* P4PE */
+			case 0x8093: /* P4B533-V */
+				asus_hides_smbus = 1;
+			}
+		if (dev->device == PCI_DEVICE_ID_INTEL_82850_HB)
+			switch(dev->subsystem_device) {
+			case 0x8030: /* P4T533 */
+				asus_hides_smbus = 1;
+			}
+		if (dev->device == PCI_DEVICE_ID_INTEL_7205_0)
+			switch (dev->subsystem_device) {
+			case 0x8070: /* P4G8X Deluxe */
+				asus_hides_smbus = 1;
+			}
+		if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB)
+			switch (dev->subsystem_device) {
+			case 0x1751: /* M2N notebook */
+			case 0x1821: /* M5N notebook */
+				asus_hides_smbus = 1;
+			}
+		if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
+			switch (dev->subsystem_device) {
+			case 0x184b: /* W1N notebook */
+			case 0x186a: /* M6Ne notebook */
+				asus_hides_smbus = 1;
+			}
+		if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB) {
+			switch (dev->subsystem_device) {
+			case 0x1882: /* M6V notebook */
+				asus_hides_smbus = 1;
+			}
+		}
+	} else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
+		if (dev->device ==  PCI_DEVICE_ID_INTEL_82855PM_HB)
+			switch(dev->subsystem_device) {
+			case 0x088C: /* HP Compaq nc8000 */
+			case 0x0890: /* HP Compaq nc6000 */
+				asus_hides_smbus = 1;
+			}
+		if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB)
+			switch (dev->subsystem_device) {
+			case 0x12bc: /* HP D330L */
+			case 0x12bd: /* HP D530 */
+				asus_hides_smbus = 1;
+			}
+	} else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_TOSHIBA)) {
+		if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB)
+			switch(dev->subsystem_device) {
+			case 0x0001: /* Toshiba Satellite A40 */
+				asus_hides_smbus = 1;
+			}
+		if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
+			switch(dev->subsystem_device) {
+			case 0x0001: /* Toshiba Tecra M2 */
+				asus_hides_smbus = 1;
+			}
+       } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG)) {
+               if (dev->device ==  PCI_DEVICE_ID_INTEL_82855PM_HB)
+                       switch(dev->subsystem_device) {
+                       case 0xC00C: /* Samsung P35 notebook */
+                               asus_hides_smbus = 1;
+                       }
+	} else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ)) {
+		if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
+			switch(dev->subsystem_device) {
+			case 0x0058: /* Compaq Evo N620c */
+				asus_hides_smbus = 1;
+			}
+	}
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82845_HB,	asus_hides_smbus_hostbridge );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82845G_HB,	asus_hides_smbus_hostbridge );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82850_HB,	asus_hides_smbus_hostbridge );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82865_HB,	asus_hides_smbus_hostbridge );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_7205_0,	asus_hides_smbus_hostbridge );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82855PM_HB,	asus_hides_smbus_hostbridge );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82855GM_HB,	asus_hides_smbus_hostbridge );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge );
+
+static void __init asus_hides_smbus_lpc(struct pci_dev *dev)
+{
+	u16 val;
+	
+	if (likely(!asus_hides_smbus))
+		return;
+
+	pci_read_config_word(dev, 0xF2, &val);
+	if (val & 0x8) {
+		pci_write_config_word(dev, 0xF2, val & (~0x8));
+		pci_read_config_word(dev, 0xF2, &val);
+		if (val & 0x8)
+			printk(KERN_INFO "PCI: i801 SMBus device continues to play 'hide and seek'! 0x%x\n", val);
+		else
+			printk(KERN_INFO "PCI: Enabled i801 SMBus device\n");
+	}
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801DB_0,	asus_hides_smbus_lpc );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801BA_0,	asus_hides_smbus_lpc );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801CA_12,	asus_hides_smbus_lpc );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801DB_12,	asus_hides_smbus_lpc );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801EB_0,	asus_hides_smbus_lpc );
+
+static void __init asus_hides_smbus_lpc_ich6(struct pci_dev *dev)
+{
+	u32 val, rcba;
+	void __iomem *base;
+
+	if (likely(!asus_hides_smbus))
+		return;
+	pci_read_config_dword(dev, 0xF0, &rcba);
+	base = ioremap_nocache(rcba & 0xFFFFC000, 0x4000); /* use bits 31:14, 16 kB aligned */
+	if (base == NULL) return;
+	val=readl(base + 0x3418); /* read the Function Disable register, dword mode only */
+	writel(val & 0xFFFFFFF7, base + 0x3418); /* enable the SMBus device */
+	iounmap(base);
+	printk(KERN_INFO "PCI: Enabled ICH6/i801 SMBus device\n");
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH6_1,	asus_hides_smbus_lpc_ich6 );
+
+/*
+ * SiS 96x south bridge: BIOS typically hides SMBus device...
+ */
+static void __init quirk_sis_96x_smbus(struct pci_dev *dev)
+{
+	u8 val = 0;
+	printk(KERN_INFO "Enabling SiS 96x SMBus.\n");
+	pci_read_config_byte(dev, 0x77, &val);
+	pci_write_config_byte(dev, 0x77, val & ~0x10);
+	pci_read_config_byte(dev, 0x77, &val);
+}
+
+/*
+ * ... This is further complicated by the fact that some SiS96x south
+ * bridges pretend to be 85C503/5513 instead.  In that case see if we
+ * spotted a compatible north bridge to make sure.
+ * (pci_find_device doesn't work yet)
+ *
+ * We can also enable the sis96x bit in the discovery register..
+ */
+static int __devinitdata sis_96x_compatible = 0;
+
+#define SIS_DETECT_REGISTER 0x40
+
+static void __init quirk_sis_503(struct pci_dev *dev)
+{
+	u8 reg;
+	u16 devid;
+
+	pci_read_config_byte(dev, SIS_DETECT_REGISTER, &reg);
+	pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg | (1 << 6));
+	pci_read_config_word(dev, PCI_DEVICE_ID, &devid);
+	if (((devid & 0xfff0) != 0x0960) && (devid != 0x0018)) {
+		pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg);
+		return;
+	}
+
+	/* Make people aware that we changed the config.. */
+	printk(KERN_WARNING "Uncovering SIS%x that hid as a SIS503 (compatible=%d)\n", devid, sis_96x_compatible);
+
+	/*
+	 * Ok, it now shows up as a 96x.. The 96x quirks are after
+	 * the 503 quirk in the quirk table, so they'll automatically
+	 * run and enable things like the SMBus device
+	 */
+	dev->device = devid;
+}
+
+static void __init quirk_sis_96x_compatible(struct pci_dev *dev)
+{
+	sis_96x_compatible = 1;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_645,		quirk_sis_96x_compatible );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_646,		quirk_sis_96x_compatible );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_648,		quirk_sis_96x_compatible );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_650,		quirk_sis_96x_compatible );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_651,		quirk_sis_96x_compatible );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_735,		quirk_sis_96x_compatible );
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_503,		quirk_sis_503 );
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_961,		quirk_sis_96x_smbus );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_962,		quirk_sis_96x_smbus );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_963,		quirk_sis_96x_smbus );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_LPC,		quirk_sis_96x_smbus );
+
+#ifdef CONFIG_X86_IO_APIC
+static void __init quirk_alder_ioapic(struct pci_dev *pdev)
+{
+	int i;
+
+	if ((pdev->class >> 8) != 0xff00)
+		return;
+
+	/* the first BAR is the location of the IO APIC...we must
+	 * not touch this (and it's already covered by the fixmap), so
+	 * forcibly insert it into the resource tree */
+	if (pci_resource_start(pdev, 0) && pci_resource_len(pdev, 0))
+		insert_resource(&iomem_resource, &pdev->resource[0]);
+
+	/* The next five BARs all seem to be rubbish, so just clean
+	 * them out */
+	for (i=1; i < 6; i++) {
+		memset(&pdev->resource[i], 0, sizeof(pdev->resource[i]));
+	}
+
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_EESSC,	quirk_alder_ioapic );
+#endif
+
+#ifdef CONFIG_SCSI_SATA_INTEL_COMBINED
+static void __devinit quirk_intel_ide_combined(struct pci_dev *pdev)
+{
+	u8 prog, comb, tmp;
+	int ich = 0;
+
+	/*
+	 * Narrow down to Intel SATA PCI devices.
+	 */
+	switch (pdev->device) {
+	/* PCI ids taken from drivers/scsi/ata_piix.c */
+	case 0x24d1:
+	case 0x24df:
+	case 0x25a3:
+	case 0x25b0:
+		ich = 5;
+		break;
+	case 0x2651:
+	case 0x2652:
+	case 0x2653:
+	case 0x2680:	/* ESB2 */
+		ich = 6;
+		break;
+	case 0x27c0:
+	case 0x27c4:
+		ich = 7;
+		break;
+	default:
+		/* we do not handle this PCI device */
+		return;
+	}
+
+	/*
+	 * Read combined mode register.
+	 */
+	pci_read_config_byte(pdev, 0x90, &tmp);	/* combined mode reg */
+
+	if (ich == 5) {
+		tmp &= 0x6;  /* interesting bits 2:1, PATA primary/secondary */
+		if (tmp == 0x4)		/* bits 10x */
+			comb = (1 << 0);	/* SATA port 0, PATA port 1 */
+		else if (tmp == 0x6)	/* bits 11x */
+			comb = (1 << 2);	/* PATA port 0, SATA port 1 */
+		else
+			return;			/* not in combined mode */
+	} else {
+		WARN_ON((ich != 6) && (ich != 7));
+		tmp &= 0x3;  /* interesting bits 1:0 */
+		if (tmp & (1 << 0))
+			comb = (1 << 2);	/* PATA port 0, SATA port 1 */
+		else if (tmp & (1 << 1))
+			comb = (1 << 0);	/* SATA port 0, PATA port 1 */
+		else
+			return;			/* not in combined mode */
+	}
+
+	/*
+	 * Read programming interface register.
+	 * (Tells us if it's legacy or native mode)
+	 */
+	pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
+
+	/* if SATA port is in native mode, we're ok. */
+	if (prog & comb)
+		return;
+
+	/* SATA port is in legacy mode.  Reserve port so that
+	 * IDE driver does not attempt to use it.  If request_region
+	 * fails, it will be obvious at boot time, so we don't bother
+	 * checking return values.
+	 */
+	if (comb == (1 << 0))
+		request_region(0x1f0, 8, "libata");	/* port 0 */
+	else
+		request_region(0x170, 8, "libata");	/* port 1 */
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,    PCI_ANY_ID,	  quirk_intel_ide_combined );
+#endif /* CONFIG_SCSI_SATA_INTEL_COMBINED */
+
+
+int pcie_mch_quirk;
+
+static void __devinit quirk_pcie_mch(struct pci_dev *pdev)
+{
+	pcie_mch_quirk = 1;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_E7520_MCH,	quirk_pcie_mch );
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_E7320_MCH,	quirk_pcie_mch );
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_E7525_MCH,	quirk_pcie_mch );
+
+
+/*
+ * It's possible for the MSI to get corrupted if shpc and acpi
+ * are used together on certain PXH-based systems.
+ */
+static void __devinit quirk_pcie_pxh(struct pci_dev *dev)
+{
+	disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI),
+					PCI_CAP_ID_MSI);
+	dev->no_msi = 1;
+
+	printk(KERN_WARNING "PCI: PXH quirk detected, "
+		"disabling MSI for SHPC device\n");
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_PXHD_0,	quirk_pcie_pxh);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_PXHD_1,	quirk_pcie_pxh);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_PXH_0,	quirk_pcie_pxh);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_PXH_1,	quirk_pcie_pxh);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_PXHV,	quirk_pcie_pxh);
+
+
+static void __devinit quirk_netmos(struct pci_dev *dev)
+{
+	unsigned int num_parallel = (dev->subsystem_device & 0xf0) >> 4;
+	unsigned int num_serial = dev->subsystem_device & 0xf;
+
+	/*
+	 * These Netmos parts are multiport serial devices with optional
+	 * parallel ports.  Even when parallel ports are present, they
+	 * are identified as class SERIAL, which means the serial driver
+	 * will claim them.  To prevent this, mark them as class OTHER.
+	 * These combo devices should be claimed by parport_serial.
+	 *
+	 * The subdevice ID is of the form 0x00PS, where <P> is the number
+	 * of parallel ports and <S> is the number of serial ports.
+	 */
+	switch (dev->device) {
+	case PCI_DEVICE_ID_NETMOS_9735:
+	case PCI_DEVICE_ID_NETMOS_9745:
+	case PCI_DEVICE_ID_NETMOS_9835:
+	case PCI_DEVICE_ID_NETMOS_9845:
+	case PCI_DEVICE_ID_NETMOS_9855:
+		if ((dev->class >> 8) == PCI_CLASS_COMMUNICATION_SERIAL &&
+		    num_parallel) {
+			printk(KERN_INFO "PCI: Netmos %04x (%u parallel, "
+				"%u serial); changing class SERIAL to OTHER "
+				"(use parport_serial)\n",
+				dev->device, num_parallel, num_serial);
+			dev->class = (PCI_CLASS_COMMUNICATION_OTHER << 8) |
+			    (dev->class & 0xff);
+		}
+	}
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID, quirk_netmos);
+
+
+static void __devinit fixup_rev1_53c810(struct pci_dev* dev)
+{
+	/* rev 1 ncr53c810 chips don't set the class at all which means
+	 * they don't get their resources remapped. Fix that here.
+	 */
+
+	if (dev->class == PCI_CLASS_NOT_DEFINED) {
+		printk(KERN_INFO "NCR 53c810 rev 1 detected, setting PCI class.\n");
+		dev->class = PCI_CLASS_STORAGE_SCSI;
+	}
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810);
+
+
+static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end)
+{
+	while (f < end) {
+		if ((f->vendor == dev->vendor || f->vendor == (u16) PCI_ANY_ID) &&
+ 		    (f->device == dev->device || f->device == (u16) PCI_ANY_ID)) {
+			pr_debug("PCI: Calling quirk %p for %s\n", f->hook, pci_name(dev));
+			f->hook(dev);
+		}
+		f++;
+	}
+}
+
+extern struct pci_fixup __start_pci_fixups_early[];
+extern struct pci_fixup __end_pci_fixups_early[];
+extern struct pci_fixup __start_pci_fixups_header[];
+extern struct pci_fixup __end_pci_fixups_header[];
+extern struct pci_fixup __start_pci_fixups_final[];
+extern struct pci_fixup __end_pci_fixups_final[];
+extern struct pci_fixup __start_pci_fixups_enable[];
+extern struct pci_fixup __end_pci_fixups_enable[];
+
+
+void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
+{
+	struct pci_fixup *start, *end;
+
+	switch(pass) {
+	case pci_fixup_early:
+		start = __start_pci_fixups_early;
+		end = __end_pci_fixups_early;
+		break;
+
+	case pci_fixup_header:
+		start = __start_pci_fixups_header;
+		end = __end_pci_fixups_header;
+		break;
+
+	case pci_fixup_final:
+		start = __start_pci_fixups_final;
+		end = __end_pci_fixups_final;
+		break;
+
+	case pci_fixup_enable:
+		start = __start_pci_fixups_enable;
+		end = __end_pci_fixups_enable;
+		break;
+
+	default:
+		/* stupid compiler warning, you would think with an enum... */
+		return;
+	}
+	pci_do_fixups(dev, start, end);
+}
+
+EXPORT_SYMBOL(pcie_mch_quirk);
+#ifdef CONFIG_HOTPLUG
+EXPORT_SYMBOL(pci_fixup_device);
+#endif
diff -urN oldtree/drivers/scsi/libata-core.c newtree/drivers/scsi/libata-core.c
--- oldtree/drivers/scsi/libata-core.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/scsi/libata-core.c	2006-01-29 11:18:10.830642288 +0000
@@ -1032,18 +1032,22 @@
 {
 	u16 modes;
 
-	/* Usual case. Word 53 indicates word 88 is valid */
-	if (adev->id[ATA_ID_FIELD_VALID] & (1 << 2)) {
+	/* Usual case. Word 53 indicates word 64 is valid */
+	if (adev->id[ATA_ID_FIELD_VALID] & (1 << 1)) {
 		modes = adev->id[ATA_ID_PIO_MODES] & 0x03;
 		modes <<= 3;
 		modes |= 0x7;
 		return modes;
 	}
 
-	/* If word 88 isn't valid then Word 51 holds the PIO timing number
-	   for the maximum. Turn it into a mask and return it */
-	modes = (2 << (adev->id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
+	/* If word 64 isn't valid then Word 51 high byte holds the PIO timing
+	   number for the maximum. Turn it into a mask and return it */
+	modes = (2 << ((adev->id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF)) - 1 ;
 	return modes;
+	/* But wait.. there's more. Design your standards by committee and
+	   you too can get a free iordy field to process. However its the
+	   speeds not the modes that are supported... Note drivers using the
+	   timing API will get this right anyway */
 }
 
 static int ata_qc_wait_err(struct ata_queued_cmd *qc,
diff -urN oldtree/drivers/scsi/libata-core.c.orig newtree/drivers/scsi/libata-core.c.orig
--- oldtree/drivers/scsi/libata-core.c.orig	1970-01-01 00:00:00.000000000 +0000
+++ newtree/drivers/scsi/libata-core.c.orig	2006-01-28 19:08:42.000000000 +0000
@@ -0,0 +1,4966 @@
+/*
+ *  libata-core.c - helper library for ATA
+ *
+ *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
+ *    		    Please ALWAYS copy linux-ide@vger.kernel.org
+ *		    on emails.
+ *
+ *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
+ *  Copyright 2003-2004 Jeff Garzik
+ *
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2, or (at your option)
+ *  any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ *  libata documentation is available via 'make {ps|pdf}docs',
+ *  as Documentation/DocBook/libata.*
+ *
+ *  Hardware documentation available from http://www.t13.org/ and
+ *  http://www.sata-io.org/
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/spinlock.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/suspend.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
+#include <linux/scatterlist.h>
+#include <scsi/scsi.h>
+#include "scsi_priv.h"
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <asm/io.h>
+#include <asm/semaphore.h>
+#include <asm/byteorder.h>
+
+#include "libata.h"
+
+static unsigned int ata_busy_sleep (struct ata_port *ap,
+				    unsigned long tmout_pat,
+			    	    unsigned long tmout);
+static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev);
+static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev);
+static void ata_set_mode(struct ata_port *ap);
+static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
+static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift);
+static int fgb(u32 bitmap);
+static int ata_choose_xfer_mode(const struct ata_port *ap,
+				u8 *xfer_mode_out,
+				unsigned int *xfer_shift_out);
+static void __ata_qc_complete(struct ata_queued_cmd *qc);
+
+static unsigned int ata_unique_id = 1;
+static struct workqueue_struct *ata_wq;
+
+int atapi_enabled = 0;
+module_param(atapi_enabled, int, 0444);
+MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
+
+MODULE_AUTHOR("Jeff Garzik");
+MODULE_DESCRIPTION("Library module for ATA devices");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+/**
+ *	ata_tf_load_pio - send taskfile registers to host controller
+ *	@ap: Port to which output is sent
+ *	@tf: ATA taskfile register set
+ *
+ *	Outputs ATA taskfile to standard ATA host controller.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+
+static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+	unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
+
+	if (tf->ctl != ap->last_ctl) {
+		outb(tf->ctl, ioaddr->ctl_addr);
+		ap->last_ctl = tf->ctl;
+		ata_wait_idle(ap);
+	}
+
+	if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
+		outb(tf->hob_feature, ioaddr->feature_addr);
+		outb(tf->hob_nsect, ioaddr->nsect_addr);
+		outb(tf->hob_lbal, ioaddr->lbal_addr);
+		outb(tf->hob_lbam, ioaddr->lbam_addr);
+		outb(tf->hob_lbah, ioaddr->lbah_addr);
+		VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
+			tf->hob_feature,
+			tf->hob_nsect,
+			tf->hob_lbal,
+			tf->hob_lbam,
+			tf->hob_lbah);
+	}
+
+	if (is_addr) {
+		outb(tf->feature, ioaddr->feature_addr);
+		outb(tf->nsect, ioaddr->nsect_addr);
+		outb(tf->lbal, ioaddr->lbal_addr);
+		outb(tf->lbam, ioaddr->lbam_addr);
+		outb(tf->lbah, ioaddr->lbah_addr);
+		VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
+			tf->feature,
+			tf->nsect,
+			tf->lbal,
+			tf->lbam,
+			tf->lbah);
+	}
+
+	if (tf->flags & ATA_TFLAG_DEVICE) {
+		outb(tf->device, ioaddr->device_addr);
+		VPRINTK("device 0x%X\n", tf->device);
+	}
+
+	ata_wait_idle(ap);
+}
+
+/**
+ *	ata_tf_load_mmio - send taskfile registers to host controller
+ *	@ap: Port to which output is sent
+ *	@tf: ATA taskfile register set
+ *
+ *	Outputs ATA taskfile to standard ATA host controller using MMIO.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+
+static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+	unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
+
+	if (tf->ctl != ap->last_ctl) {
+		writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
+		ap->last_ctl = tf->ctl;
+		ata_wait_idle(ap);
+	}
+
+	if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
+		writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
+		writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
+		writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
+		writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
+		writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
+		VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
+			tf->hob_feature,
+			tf->hob_nsect,
+			tf->hob_lbal,
+			tf->hob_lbam,
+			tf->hob_lbah);
+	}
+
+	if (is_addr) {
+		writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
+		writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
+		writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
+		writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
+		writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
+		VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
+			tf->feature,
+			tf->nsect,
+			tf->lbal,
+			tf->lbam,
+			tf->lbah);
+	}
+
+	if (tf->flags & ATA_TFLAG_DEVICE) {
+		writeb(tf->device, (void __iomem *) ioaddr->device_addr);
+		VPRINTK("device 0x%X\n", tf->device);
+	}
+
+	ata_wait_idle(ap);
+}
+
+
+/**
+ *	ata_tf_load - send taskfile registers to host controller
+ *	@ap: Port to which output is sent
+ *	@tf: ATA taskfile register set
+ *
+ *	Outputs ATA taskfile to standard ATA host controller using MMIO
+ *	or PIO as indicated by the ATA_FLAG_MMIO flag.
+ *	Writes the control, feature, nsect, lbal, lbam, and lbah registers.
+ *	Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
+ *	hob_lbal, hob_lbam, and hob_lbah.
+ *
+ *	This function waits for idle (!BUSY and !DRQ) after writing
+ *	registers.  If the control register has a new value, this
+ *	function also waits for idle after writing control and before
+ *	writing the remaining registers.
+ *
+ *	May be used as the tf_load() entry in ata_port_operations.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
+{
+	if (ap->flags & ATA_FLAG_MMIO)
+		ata_tf_load_mmio(ap, tf);
+	else
+		ata_tf_load_pio(ap, tf);
+}
+
+/**
+ *	ata_exec_command_pio - issue ATA command to host controller
+ *	@ap: port to which command is being issued
+ *	@tf: ATA taskfile register set
+ *
+ *	Issues PIO write to ATA command register, with proper
+ *	synchronization with interrupt handler / other threads.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+
+static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
+{
+	DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
+
+       	outb(tf->command, ap->ioaddr.command_addr);
+	ata_pause(ap);
+}
+
+
+/**
+ *	ata_exec_command_mmio - issue ATA command to host controller
+ *	@ap: port to which command is being issued
+ *	@tf: ATA taskfile register set
+ *
+ *	Issues MMIO write to ATA command register, with proper
+ *	synchronization with interrupt handler / other threads.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+
+static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
+{
+	DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
+
+       	writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
+	ata_pause(ap);
+}
+
+
+/**
+ *	ata_exec_command - issue ATA command to host controller
+ *	@ap: port to which command is being issued
+ *	@tf: ATA taskfile register set
+ *
+ *	Issues PIO/MMIO write to ATA command register, with proper
+ *	synchronization with interrupt handler / other threads.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
+{
+	if (ap->flags & ATA_FLAG_MMIO)
+		ata_exec_command_mmio(ap, tf);
+	else
+		ata_exec_command_pio(ap, tf);
+}
+
+/**
+ *	ata_tf_to_host - issue ATA taskfile to host controller
+ *	@ap: port to which command is being issued
+ *	@tf: ATA taskfile register set
+ *
+ *	Issues ATA taskfile register set to ATA host controller,
+ *	with proper synchronization with interrupt handler and
+ *	other threads.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+
+static inline void ata_tf_to_host(struct ata_port *ap,
+				  const struct ata_taskfile *tf)
+{
+	ap->ops->tf_load(ap, tf);
+	ap->ops->exec_command(ap, tf);
+}
+
+/**
+ *	ata_tf_read_pio - input device's ATA taskfile shadow registers
+ *	@ap: Port from which input is read
+ *	@tf: ATA taskfile register set for storing input
+ *
+ *	Reads ATA taskfile registers for currently-selected device
+ *	into @tf.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+
+static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+
+	tf->command = ata_check_status(ap);
+	tf->feature = inb(ioaddr->error_addr);
+	tf->nsect = inb(ioaddr->nsect_addr);
+	tf->lbal = inb(ioaddr->lbal_addr);
+	tf->lbam = inb(ioaddr->lbam_addr);
+	tf->lbah = inb(ioaddr->lbah_addr);
+	tf->device = inb(ioaddr->device_addr);
+
+	if (tf->flags & ATA_TFLAG_LBA48) {
+		outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
+		tf->hob_feature = inb(ioaddr->error_addr);
+		tf->hob_nsect = inb(ioaddr->nsect_addr);
+		tf->hob_lbal = inb(ioaddr->lbal_addr);
+		tf->hob_lbam = inb(ioaddr->lbam_addr);
+		tf->hob_lbah = inb(ioaddr->lbah_addr);
+	}
+}
+
+/**
+ *	ata_tf_read_mmio - input device's ATA taskfile shadow registers
+ *	@ap: Port from which input is read
+ *	@tf: ATA taskfile register set for storing input
+ *
+ *	Reads ATA taskfile registers for currently-selected device
+ *	into @tf via MMIO.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+
+static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+
+	tf->command = ata_check_status(ap);
+	tf->feature = readb((void __iomem *)ioaddr->error_addr);
+	tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
+	tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
+	tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
+	tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
+	tf->device = readb((void __iomem *)ioaddr->device_addr);
+
+	if (tf->flags & ATA_TFLAG_LBA48) {
+		writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
+		tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
+		tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
+		tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
+		tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
+		tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
+	}
+}
+
+
+/**
+ *	ata_tf_read - input device's ATA taskfile shadow registers
+ *	@ap: Port from which input is read
+ *	@tf: ATA taskfile register set for storing input
+ *
+ *	Reads ATA taskfile registers for currently-selected device
+ *	into @tf.
+ *
+ *	Reads nsect, lbal, lbam, lbah, and device.  If ATA_TFLAG_LBA48
+ *	is set, also reads the hob registers.
+ *
+ *	May be used as the tf_read() entry in ata_port_operations.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+{
+	if (ap->flags & ATA_FLAG_MMIO)
+		ata_tf_read_mmio(ap, tf);
+	else
+		ata_tf_read_pio(ap, tf);
+}
+
+/**
+ *	ata_check_status_pio - Read device status reg & clear interrupt
+ *	@ap: port where the device is
+ *
+ *	Reads ATA taskfile status register for currently-selected device
+ *	and return its value. This also clears pending interrupts
+ *      from this device
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+static u8 ata_check_status_pio(struct ata_port *ap)
+{
+	return inb(ap->ioaddr.status_addr);
+}
+
+/**
+ *	ata_check_status_mmio - Read device status reg & clear interrupt
+ *	@ap: port where the device is
+ *
+ *	Reads ATA taskfile status register for currently-selected device
+ *	via MMIO and return its value. This also clears pending interrupts
+ *      from this device
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+static u8 ata_check_status_mmio(struct ata_port *ap)
+{
+       	return readb((void __iomem *) ap->ioaddr.status_addr);
+}
+
+
+/**
+ *	ata_check_status - Read device status reg & clear interrupt
+ *	@ap: port where the device is
+ *
+ *	Reads ATA taskfile status register for currently-selected device
+ *	and return its value. This also clears pending interrupts
+ *      from this device
+ *
+ *	May be used as the check_status() entry in ata_port_operations.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+u8 ata_check_status(struct ata_port *ap)
+{
+	if (ap->flags & ATA_FLAG_MMIO)
+		return ata_check_status_mmio(ap);
+	return ata_check_status_pio(ap);
+}
+
+
+/**
+ *	ata_altstatus - Read device alternate status reg
+ *	@ap: port where the device is
+ *
+ *	Reads ATA taskfile alternate status register for
+ *	currently-selected device and return its value.
+ *
+ *	Note: may NOT be used as the check_altstatus() entry in
+ *	ata_port_operations.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+u8 ata_altstatus(struct ata_port *ap)
+{
+	if (ap->ops->check_altstatus)
+		return ap->ops->check_altstatus(ap);
+
+	if (ap->flags & ATA_FLAG_MMIO)
+		return readb((void __iomem *)ap->ioaddr.altstatus_addr);
+	return inb(ap->ioaddr.altstatus_addr);
+}
+
+
+/**
+ *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
+ *	@tf: Taskfile to convert
+ *	@fis: Buffer into which data will output
+ *	@pmp: Port multiplier port
+ *
+ *	Converts a standard ATA taskfile to a Serial ATA
+ *	FIS structure (Register - Host to Device).
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+
+void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
+{
+	fis[0] = 0x27;	/* Register - Host to Device FIS */
+	fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
+					    bit 7 indicates Command FIS */
+	fis[2] = tf->command;
+	fis[3] = tf->feature;
+
+	fis[4] = tf->lbal;
+	fis[5] = tf->lbam;
+	fis[6] = tf->lbah;
+	fis[7] = tf->device;
+
+	fis[8] = tf->hob_lbal;
+	fis[9] = tf->hob_lbam;
+	fis[10] = tf->hob_lbah;
+	fis[11] = tf->hob_feature;
+
+	fis[12] = tf->nsect;
+	fis[13] = tf->hob_nsect;
+	fis[14] = 0;
+	fis[15] = tf->ctl;
+
+	fis[16] = 0;
+	fis[17] = 0;
+	fis[18] = 0;
+	fis[19] = 0;
+}
+
+/**
+ *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
+ *	@fis: Buffer from which data will be input
+ *	@tf: Taskfile to output
+ *
+ *	Converts a serial ATA FIS structure to a standard ATA taskfile.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+
+void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
+{
+	tf->command	= fis[2];	/* status */
+	tf->feature	= fis[3];	/* error */
+
+	tf->lbal	= fis[4];
+	tf->lbam	= fis[5];
+	tf->lbah	= fis[6];
+	tf->device	= fis[7];
+
+	tf->hob_lbal	= fis[8];
+	tf->hob_lbam	= fis[9];
+	tf->hob_lbah	= fis[10];
+
+	tf->nsect	= fis[12];
+	tf->hob_nsect	= fis[13];
+}
+
+static const u8 ata_rw_cmds[] = {
+	/* pio multi */
+	ATA_CMD_READ_MULTI,
+	ATA_CMD_WRITE_MULTI,
+	ATA_CMD_READ_MULTI_EXT,
+	ATA_CMD_WRITE_MULTI_EXT,
+	/* pio */
+	ATA_CMD_PIO_READ,
+	ATA_CMD_PIO_WRITE,
+	ATA_CMD_PIO_READ_EXT,
+	ATA_CMD_PIO_WRITE_EXT,
+	/* dma */
+	ATA_CMD_READ,
+	ATA_CMD_WRITE,
+	ATA_CMD_READ_EXT,
+	ATA_CMD_WRITE_EXT
+};
+
+/**
+ *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
+ *	@qc: command to examine and configure
+ *
+ *	Examine the device configuration and tf->flags to calculate 
+ *	the proper read/write commands and protocol to use.
+ *
+ *	LOCKING:
+ *	caller.
+ */
+void ata_rwcmd_protocol(struct ata_queued_cmd *qc)
+{
+	struct ata_taskfile *tf = &qc->tf;
+	struct ata_device *dev = qc->dev;
+
+	int index, lba48, write;
+ 
+	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
+	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
+
+	if (dev->flags & ATA_DFLAG_PIO) {
+		tf->protocol = ATA_PROT_PIO;
+		index = dev->multi_count ? 0 : 4;
+	} else {
+		tf->protocol = ATA_PROT_DMA;
+		index = 8;
+	}
+
+	tf->command = ata_rw_cmds[index + lba48 + write];
+}
+
+static const char * xfer_mode_str[] = {
+	"UDMA/16",
+	"UDMA/25",
+	"UDMA/33",
+	"UDMA/44",
+	"UDMA/66",
+	"UDMA/100",
+	"UDMA/133",
+	"UDMA7",
+	"MWDMA0",
+	"MWDMA1",
+	"MWDMA2",
+	"PIO0",
+	"PIO1",
+	"PIO2",
+	"PIO3",
+	"PIO4",
+};
+
+/**
+ *	ata_udma_string - convert UDMA bit offset to string
+ *	@mask: mask of bits supported; only highest bit counts.
+ *
+ *	Determine string which represents the highest speed
+ *	(highest bit in @udma_mask).
+ *
+ *	LOCKING:
+ *	None.
+ *
+ *	RETURNS:
+ *	Constant C string representing highest speed listed in
+ *	@udma_mask, or the constant C string "<n/a>".
+ */
+
+static const char *ata_mode_string(unsigned int mask)
+{
+	int i;
+
+	for (i = 7; i >= 0; i--)
+		if (mask & (1 << i))
+			goto out;
+	for (i = ATA_SHIFT_MWDMA + 2; i >= ATA_SHIFT_MWDMA; i--)
+		if (mask & (1 << i))
+			goto out;
+	for (i = ATA_SHIFT_PIO + 4; i >= ATA_SHIFT_PIO; i--)
+		if (mask & (1 << i))
+			goto out;
+
+	return "<n/a>";
+
+out:
+	return xfer_mode_str[i];
+}
+
+/**
+ *	ata_pio_devchk - PATA device presence detection
+ *	@ap: ATA channel to examine
+ *	@device: Device to examine (starting at zero)
+ *
+ *	This technique was originally described in
+ *	Hale Landis's ATADRVR (www.ata-atapi.com), and
+ *	later found its way into the ATA/ATAPI spec.
+ *
+ *	Write a pattern to the ATA shadow registers,
+ *	and if a device is present, it will respond by
+ *	correctly storing and echoing back the
+ *	ATA shadow register contents.
+ *
+ *	LOCKING:
+ *	caller.
+ */
+
+static unsigned int ata_pio_devchk(struct ata_port *ap,
+				   unsigned int device)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+	u8 nsect, lbal;
+
+	ap->ops->dev_select(ap, device);
+
+	outb(0x55, ioaddr->nsect_addr);
+	outb(0xaa, ioaddr->lbal_addr);
+
+	outb(0xaa, ioaddr->nsect_addr);
+	outb(0x55, ioaddr->lbal_addr);
+
+	outb(0x55, ioaddr->nsect_addr);
+	outb(0xaa, ioaddr->lbal_addr);
+
+	nsect = inb(ioaddr->nsect_addr);
+	lbal = inb(ioaddr->lbal_addr);
+
+	if ((nsect == 0x55) && (lbal == 0xaa))
+		return 1;	/* we found a device */
+
+	return 0;		/* nothing found */
+}
+
+/**
+ *	ata_mmio_devchk - PATA device presence detection
+ *	@ap: ATA channel to examine
+ *	@device: Device to examine (starting at zero)
+ *
+ *	This technique was originally described in
+ *	Hale Landis's ATADRVR (www.ata-atapi.com), and
+ *	later found its way into the ATA/ATAPI spec.
+ *
+ *	Write a pattern to the ATA shadow registers,
+ *	and if a device is present, it will respond by
+ *	correctly storing and echoing back the
+ *	ATA shadow register contents.
+ *
+ *	LOCKING:
+ *	caller.
+ */
+
+static unsigned int ata_mmio_devchk(struct ata_port *ap,
+				    unsigned int device)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+	u8 nsect, lbal;
+
+	ap->ops->dev_select(ap, device);
+
+	writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
+	writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
+
+	writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
+	writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
+
+	writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
+	writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
+
+	nsect = readb((void __iomem *) ioaddr->nsect_addr);
+	lbal = readb((void __iomem *) ioaddr->lbal_addr);
+
+	if ((nsect == 0x55) && (lbal == 0xaa))
+		return 1;	/* we found a device */
+
+	return 0;		/* nothing found */
+}
+
+/**
+ *	ata_devchk - PATA device presence detection
+ *	@ap: ATA channel to examine
+ *	@device: Device to examine (starting at zero)
+ *
+ *	Dispatch ATA device presence detection, depending
+ *	on whether we are using PIO or MMIO to talk to the
+ *	ATA shadow registers.
+ *
+ *	LOCKING:
+ *	caller.
+ */
+
+static unsigned int ata_devchk(struct ata_port *ap,
+				    unsigned int device)
+{
+	if (ap->flags & ATA_FLAG_MMIO)
+		return ata_mmio_devchk(ap, device);
+	return ata_pio_devchk(ap, device);
+}
+
+/**
+ *	ata_dev_classify - determine device type based on ATA-spec signature
+ *	@tf: ATA taskfile register set for device to be identified
+ *
+ *	Determine from taskfile register contents whether a device is
+ *	ATA or ATAPI, as per "Signature and persistence" section
+ *	of ATA/PI spec (volume 1, sect 5.14).
+ *
+ *	LOCKING:
+ *	None.
+ *
+ *	RETURNS:
+ *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
+ *	the event of failure.
+ */
+
+unsigned int ata_dev_classify(const struct ata_taskfile *tf)
+{
+	/* Apple's open source Darwin code hints that some devices only
+	 * put a proper signature into the LBA mid/high registers,
+	 * So, we only check those.  It's sufficient for uniqueness.
+	 */
+
+	if (((tf->lbam == 0) && (tf->lbah == 0)) ||
+	    ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
+		DPRINTK("found ATA device by sig\n");
+		return ATA_DEV_ATA;
+	}
+
+	if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
+	    ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
+		DPRINTK("found ATAPI device by sig\n");
+		return ATA_DEV_ATAPI;
+	}
+
+	DPRINTK("unknown device\n");
+	return ATA_DEV_UNKNOWN;
+}
+
+/**
+ *	ata_dev_try_classify - Parse returned ATA device signature
+ *	@ap: ATA channel to examine
+ *	@device: Device to examine (starting at zero)
+ *
+ *	After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
+ *	an ATA/ATAPI-defined set of values is placed in the ATA
+ *	shadow registers, indicating the results of device detection
+ *	and diagnostics.
+ *
+ *	Select the ATA device, and read the values from the ATA shadow
+ *	registers.  Then parse according to the Error register value,
+ *	and the spec-defined values examined by ata_dev_classify().
+ *
+ *	LOCKING:
+ *	caller.
+ */
+
+static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
+{
+	struct ata_device *dev = &ap->device[device];
+	struct ata_taskfile tf;
+	unsigned int class;
+	u8 err;
+
+	ap->ops->dev_select(ap, device);
+
+	memset(&tf, 0, sizeof(tf));
+
+	ap->ops->tf_read(ap, &tf);
+	err = tf.feature;
+
+	dev->class = ATA_DEV_NONE;
+
+	/* see if device passed diags */
+	if (err == 1)
+		/* do nothing */ ;
+	else if ((device == 0) && (err == 0x81))
+		/* do nothing */ ;
+	else
+		return err;
+
+	/* determine if device if ATA or ATAPI */
+	class = ata_dev_classify(&tf);
+	if (class == ATA_DEV_UNKNOWN)
+		return err;
+	if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
+		return err;
+
+	dev->class = class;
+
+	return err;
+}
+
+/**
+ *	ata_dev_id_string - Convert IDENTIFY DEVICE page into string
+ *	@id: IDENTIFY DEVICE results we will examine
+ *	@s: string into which data is output
+ *	@ofs: offset into identify device page
+ *	@len: length of string to return. must be an even number.
+ *
+ *	The strings in the IDENTIFY DEVICE page are broken up into
+ *	16-bit chunks.  Run through the string, and output each
+ *	8-bit chunk linearly, regardless of platform.
+ *
+ *	LOCKING:
+ *	caller.
+ */
+
+void ata_dev_id_string(const u16 *id, unsigned char *s,
+		       unsigned int ofs, unsigned int len)
+{
+	unsigned int c;
+
+	while (len > 0) {
+		c = id[ofs] >> 8;
+		*s = c;
+		s++;
+
+		c = id[ofs] & 0xff;
+		*s = c;
+		s++;
+
+		ofs++;
+		len -= 2;
+	}
+}
+
+
+/**
+ *	ata_noop_dev_select - Select device 0/1 on ATA bus
+ *	@ap: ATA channel to manipulate
+ *	@device: ATA device (numbered from zero) to select
+ *
+ *	This function performs no actual function.
+ *
+ *	May be used as the dev_select() entry in ata_port_operations.
+ *
+ *	LOCKING:
+ *	caller.
+ */
+void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
+{
+}
+
+
+/**
+ *	ata_std_dev_select - Select device 0/1 on ATA bus
+ *	@ap: ATA channel to manipulate
+ *	@device: ATA device (numbered from zero) to select
+ *
+ *	Use the method defined in the ATA specification to
+ *	make either device 0, or device 1, active on the
+ *	ATA channel.  Works with both PIO and MMIO.
+ *
+ *	May be used as the dev_select() entry in ata_port_operations.
+ *
+ *	LOCKING:
+ *	caller.
+ */
+
+void ata_std_dev_select (struct ata_port *ap, unsigned int device)
+{
+	u8 tmp;
+
+	if (device == 0)
+		tmp = ATA_DEVICE_OBS;
+	else
+		tmp = ATA_DEVICE_OBS | ATA_DEV1;
+
+	if (ap->flags & ATA_FLAG_MMIO) {
+		writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
+	} else {
+		outb(tmp, ap->ioaddr.device_addr);
+	}
+	ata_pause(ap);		/* needed; also flushes, for mmio */
+}
+
+/**
+ *	ata_dev_select - Select device 0/1 on ATA bus
+ *	@ap: ATA channel to manipulate
+ *	@device: ATA device (numbered from zero) to select
+ *	@wait: non-zero to wait for Status register BSY bit to clear
+ *	@can_sleep: non-zero if context allows sleeping
+ *
+ *	Use the method defined in the ATA specification to
+ *	make either device 0, or device 1, active on the
+ *	ATA channel.
+ *
+ *	This is a high-level version of ata_std_dev_select(),
+ *	which additionally provides the services of inserting
+ *	the proper pauses and status polling, where needed.
+ *
+ *	LOCKING:
+ *	caller.
+ */
+
+void ata_dev_select(struct ata_port *ap, unsigned int device,
+			   unsigned int wait, unsigned int can_sleep)
+{
+	VPRINTK("ENTER, ata%u: device %u, wait %u\n",
+		ap->id, device, wait);
+
+	if (wait)
+		ata_wait_idle(ap);
+
+	ap->ops->dev_select(ap, device);
+
+	if (wait) {
+		if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
+			msleep(150);
+		ata_wait_idle(ap);
+	}
+}
+
+/**
+ *	ata_dump_id - IDENTIFY DEVICE info debugging output
+ *	@dev: Device whose IDENTIFY DEVICE page we will dump
+ *
+ *	Dump selected 16-bit words from a detected device's
+ *	IDENTIFY PAGE page.
+ *
+ *	LOCKING:
+ *	caller.
+ */
+
+static inline void ata_dump_id(const struct ata_device *dev)
+{
+	DPRINTK("49==0x%04x  "
+		"53==0x%04x  "
+		"63==0x%04x  "
+		"64==0x%04x  "
+		"75==0x%04x  \n",
+		dev->id[49],
+		dev->id[53],
+		dev->id[63],
+		dev->id[64],
+		dev->id[75]);
+	DPRINTK("80==0x%04x  "
+		"81==0x%04x  "
+		"82==0x%04x  "
+		"83==0x%04x  "
+		"84==0x%04x  \n",
+		dev->id[80],
+		dev->id[81],
+		dev->id[82],
+		dev->id[83],
+		dev->id[84]);
+	DPRINTK("88==0x%04x  "
+		"93==0x%04x\n",
+		dev->id[88],
+		dev->id[93]);
+}
+
+/*
+ *	Compute the PIO modes available for this device. This is not as
+ *	trivial as it seems if we must consider early devices correctly.
+ *
+ *	FIXME: pre IDE drive timing (do we care ?). 
+ */
+
+static unsigned int ata_pio_modes(const struct ata_device *adev)
+{
+	u16 modes;
+
+	/* Usual case. Word 53 indicates word 88 is valid */
+	if (adev->id[ATA_ID_FIELD_VALID] & (1 << 2)) {
+		modes = adev->id[ATA_ID_PIO_MODES] & 0x03;
+		modes <<= 3;
+		modes |= 0x7;
+		return modes;
+	}
+
+	/* If word 88 isn't valid then Word 51 holds the PIO timing number
+	   for the maximum. Turn it into a mask and return it */
+	modes = (2 << (adev->id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
+	return modes;
+}
+
+static int ata_qc_wait_err(struct ata_queued_cmd *qc,
+			   struct completion *wait)
+{
+	int rc = 0;
+
+	if (wait_for_completion_timeout(wait, 30 * HZ) < 1) {
+		/* timeout handling */
+		unsigned int err_mask = ac_err_mask(ata_chk_status(qc->ap));
+
+		if (!err_mask) {
+			printk(KERN_WARNING "ata%u: slow completion (cmd %x)\n",
+			       qc->ap->id, qc->tf.command);
+		} else {
+			printk(KERN_WARNING "ata%u: qc timeout (cmd %x)\n",
+			       qc->ap->id, qc->tf.command);
+			rc = -EIO;
+		}
+
+		ata_qc_complete(qc, err_mask);
+	}
+
+	return rc;
+}
+
+/**
+ *	ata_dev_identify - obtain IDENTIFY x DEVICE page
+ *	@ap: port on which device we wish to probe resides
+ *	@device: device bus address, starting at zero
+ *
+ *	Following bus reset, we issue the IDENTIFY [PACKET] DEVICE
+ *	command, and read back the 512-byte device information page.
+ *	The device information page is fed to us via the standard
+ *	PIO-IN protocol, but we hand-code it here. (TODO: investigate
+ *	using standard PIO-IN paths)
+ *
+ *	After reading the device information page, we use several
+ *	bits of information from it to initialize data structures
+ *	that will be used during the lifetime of the ata_device.
+ *	Other data from the info page is used to disqualify certain
+ *	older ATA devices we do not wish to support.
+ *
+ *	LOCKING:
+ *	Inherited from caller.  Some functions called by this function
+ *	obtain the host_set lock.
+ */
+
+static void ata_dev_identify(struct ata_port *ap, unsigned int device)
+{
+	struct ata_device *dev = &ap->device[device];
+	unsigned int major_version;
+	u16 tmp;
+	unsigned long xfer_modes;
+	unsigned int using_edd;
+	DECLARE_COMPLETION(wait);
+	struct ata_queued_cmd *qc;
+	unsigned long flags;
+	int rc;
+
+	if (!ata_dev_present(dev)) {
+		DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
+			ap->id, device);
+		return;
+	}
+
+	if (ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
+		using_edd = 0;
+	else
+		using_edd = 1;
+
+	DPRINTK("ENTER, host %u, dev %u\n", ap->id, device);
+
+	assert (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ATAPI ||
+		dev->class == ATA_DEV_NONE);
+
+	ata_dev_select(ap, device, 1, 1); /* select device 0/1 */
+
+	qc = ata_qc_new_init(ap, dev);
+	BUG_ON(qc == NULL);
+
+	ata_sg_init_one(qc, dev->id, sizeof(dev->id));
+	qc->dma_dir = DMA_FROM_DEVICE;
+	qc->tf.protocol = ATA_PROT_PIO;
+	qc->nsect = 1;
+
+retry:
+	if (dev->class == ATA_DEV_ATA) {
+		qc->tf.command = ATA_CMD_ID_ATA;
+		DPRINTK("do ATA identify\n");
+	} else {
+		qc->tf.command = ATA_CMD_ID_ATAPI;
+		DPRINTK("do ATAPI identify\n");
+	}
+
+	qc->waiting = &wait;
+	qc->complete_fn = ata_qc_complete_noop;
+
+	spin_lock_irqsave(&ap->host_set->lock, flags);
+	rc = ata_qc_issue(qc);
+	spin_unlock_irqrestore(&ap->host_set->lock, flags);
+
+	if (rc)
+		goto err_out;
+	else
+		ata_qc_wait_err(qc, &wait);
+
+	spin_lock_irqsave(&ap->host_set->lock, flags);
+	ap->ops->tf_read(ap, &qc->tf);
+	spin_unlock_irqrestore(&ap->host_set->lock, flags);
+
+	if (qc->tf.command & ATA_ERR) {
+		/*
+		 * arg!  EDD works for all test cases, but seems to return
+		 * the ATA signature for some ATAPI devices.  Until the
+		 * reason for this is found and fixed, we fix up the mess
+		 * here.  If IDENTIFY DEVICE returns command aborted
+		 * (as ATAPI devices do), then we issue an
+		 * IDENTIFY PACKET DEVICE.
+		 *
+		 * ATA software reset (SRST, the default) does not appear
+		 * to have this problem.
+		 */
+		if ((using_edd) && (dev->class == ATA_DEV_ATA)) {
+			u8 err = qc->tf.feature;
+			if (err & ATA_ABORTED) {
+				dev->class = ATA_DEV_ATAPI;
+				qc->cursg = 0;
+				qc->cursg_ofs = 0;
+				qc->cursect = 0;
+				qc->nsect = 1;
+				goto retry;
+			}
+		}
+		goto err_out;
+	}
+
+	swap_buf_le16(dev->id, ATA_ID_WORDS);
+
+	/* print device capabilities */
+	printk(KERN_DEBUG "ata%u: dev %u cfg "
+	       "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
+	       ap->id, device, dev->id[49],
+	       dev->id[82], dev->id[83], dev->id[84],
+	       dev->id[85], dev->id[86], dev->id[87],
+	       dev->id[88]);
+
+	/*
+	 * common ATA, ATAPI feature tests
+	 */
+
+	/* we require DMA support (bits 8 of word 49) */
+	if (!ata_id_has_dma(dev->id)) {
+		printk(KERN_DEBUG "ata%u: no dma\n", ap->id);
+		goto err_out_nosup;
+	}
+
+	/* quick-n-dirty find max transfer mode; for printk only */
+	xfer_modes = dev->id[ATA_ID_UDMA_MODES];
+	if (!xfer_modes)
+		xfer_modes = (dev->id[ATA_ID_MWDMA_MODES]) << ATA_SHIFT_MWDMA;
+	if (!xfer_modes)
+		xfer_modes = ata_pio_modes(dev);
+
+	ata_dump_id(dev);
+
+	/* ATA-specific feature tests */
+	if (dev->class == ATA_DEV_ATA) {
+		if (!ata_id_is_ata(dev->id))	/* sanity check */
+			goto err_out_nosup;
+
+		/* get major version */
+		tmp = dev->id[ATA_ID_MAJOR_VER];
+		for (major_version = 14; major_version >= 1; major_version--)
+			if (tmp & (1 << major_version))
+				break;
+
+		/*
+		 * The exact sequence expected by certain pre-ATA4 drives is:
+		 * SRST RESET
+		 * IDENTIFY
+		 * INITIALIZE DEVICE PARAMETERS
+		 * anything else..
+		 * Some drives were very specific about that exact sequence.
+		 */
+		if (major_version < 4 || (!ata_id_has_lba(dev->id))) {
+			ata_dev_init_params(ap, dev);
+
+			/* current CHS translation info (id[53-58]) might be
+			 * changed. reread the identify device info.
+			 */
+			ata_dev_reread_id(ap, dev);
+		}
+
+		if (ata_id_has_lba(dev->id)) {
+			dev->flags |= ATA_DFLAG_LBA;
+
+			if (ata_id_has_lba48(dev->id)) {
+				dev->flags |= ATA_DFLAG_LBA48;
+				dev->n_sectors = ata_id_u64(dev->id, 100);
+			} else {
+				dev->n_sectors = ata_id_u32(dev->id, 60);
+			}
+
+			/* print device info to dmesg */
+			printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors:%s\n",
+			       ap->id, device,
+			       major_version,
+			       ata_mode_string(xfer_modes),
+			       (unsigned long long)dev->n_sectors,
+			       dev->flags & ATA_DFLAG_LBA48 ? " LBA48" : " LBA");
+		} else { 
+			/* CHS */
+
+			/* Default translation */
+			dev->cylinders	= dev->id[1];
+			dev->heads	= dev->id[3];
+			dev->sectors	= dev->id[6];
+			dev->n_sectors	= dev->cylinders * dev->heads * dev->sectors;
+
+			if (ata_id_current_chs_valid(dev->id)) {
+				/* Current CHS translation is valid. */
+				dev->cylinders = dev->id[54];
+				dev->heads     = dev->id[55];
+				dev->sectors   = dev->id[56];
+				
+				dev->n_sectors = ata_id_u32(dev->id, 57);
+			}
+
+			/* print device info to dmesg */
+			printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors: CHS %d/%d/%d\n",
+			       ap->id, device,
+			       major_version,
+			       ata_mode_string(xfer_modes),
+			       (unsigned long long)dev->n_sectors,
+			       (int)dev->cylinders, (int)dev->heads, (int)dev->sectors);
+
+		}
+
+		ap->host->max_cmd_len = 16;
+	}
+
+	/* ATAPI-specific feature tests */
+	else if (dev->class == ATA_DEV_ATAPI) {
+		if (ata_id_is_ata(dev->id))		/* sanity check */
+			goto err_out_nosup;
+
+		rc = atapi_cdb_len(dev->id);
+		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
+			printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
+			goto err_out_nosup;
+		}
+		ap->cdb_len = (unsigned int) rc;
+		ap->host->max_cmd_len = (unsigned char) ap->cdb_len;
+
+		/* print device info to dmesg */
+		printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
+		       ap->id, device,
+		       ata_mode_string(xfer_modes));
+	}
+
+	DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
+	return;
+
+err_out_nosup:
+	printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
+	       ap->id, device);
+err_out:
+	dev->class++;	/* converts ATA_DEV_xxx into ATA_DEV_xxx_UNSUP */
+	DPRINTK("EXIT, err\n");
+}
+
+
+static inline u8 ata_dev_knobble(const struct ata_port *ap)
+{
+	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(ap->device->id)));
+}
+
+/**
+ * 	ata_dev_config - Run device specific handlers and check for
+ * 			 SATA->PATA bridges
+ * 	@ap: Bus
+ * 	@i:  Device
+ *
+ * 	LOCKING:
+ */
+
+void ata_dev_config(struct ata_port *ap, unsigned int i)
+{
+	/* limit bridge transfers to udma5, 200 sectors */
+	if (ata_dev_knobble(ap)) {
+		printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
+			ap->id, ap->device->devno);
+		ap->udma_mask &= ATA_UDMA5;
+		ap->host->max_sectors = ATA_MAX_SECTORS;
+		ap->host->hostt->max_sectors = ATA_MAX_SECTORS;
+		ap->device->flags |= ATA_DFLAG_LOCK_SECTORS;
+	}
+
+	if (ap->ops->dev_config)
+		ap->ops->dev_config(ap, &ap->device[i]);
+}
+
+/**
+ *	ata_bus_probe - Reset and probe ATA bus
+ *	@ap: Bus to probe
+ *
+ *	Master ATA bus probing function.  Initiates a hardware-dependent
+ *	bus reset, then attempts to identify any devices found on
+ *	the bus.
+ *
+ *	LOCKING:
+ *	PCI/etc. bus probe sem.
+ *
+ *	RETURNS:
+ *	Zero on success, non-zero on error.
+ */
+
+static int ata_bus_probe(struct ata_port *ap)
+{
+	unsigned int i, found = 0;
+
+	ap->ops->phy_reset(ap);
+	if (ap->flags & ATA_FLAG_PORT_DISABLED)
+		goto err_out;
+
+	for (i = 0; i < ATA_MAX_DEVICES; i++) {
+		ata_dev_identify(ap, i);
+		if (ata_dev_present(&ap->device[i])) {
+			found = 1;
+			ata_dev_config(ap,i);
+		}
+	}
+
+	if ((!found) || (ap->flags & ATA_FLAG_PORT_DISABLED))
+		goto err_out_disable;
+
+	ata_set_mode(ap);
+	if (ap->flags & ATA_FLAG_PORT_DISABLED)
+		goto err_out_disable;
+
+	return 0;
+
+err_out_disable:
+	ap->ops->port_disable(ap);
+err_out:
+	return -1;
+}
+
+/**
+ *	ata_port_probe - Mark port as enabled
+ *	@ap: Port for which we indicate enablement
+ *
+ *	Modify @ap data structure such that the system
+ *	thinks that the entire port is enabled.
+ *
+ *	LOCKING: host_set lock, or some other form of
+ *	serialization.
+ */
+
+void ata_port_probe(struct ata_port *ap)
+{
+	ap->flags &= ~ATA_FLAG_PORT_DISABLED;
+}
+
+/**
+ *	__sata_phy_reset - Wake/reset a low-level SATA PHY
+ *	@ap: SATA port associated with target SATA PHY.
+ *
+ *	This function issues commands to standard SATA Sxxx
+ *	PHY registers, to wake up the phy (and device), and
+ *	clear any reset condition.
+ *
+ *	LOCKING:
+ *	PCI/etc. bus probe sem.
+ *
+ */
+void __sata_phy_reset(struct ata_port *ap)
+{
+	u32 sstatus;
+	unsigned long timeout = jiffies + (HZ * 5);
+
+	if (ap->flags & ATA_FLAG_SATA_RESET) {
+		/* issue phy wake/reset */
+		scr_write_flush(ap, SCR_CONTROL, 0x301);
+		/* Couldn't find anything in SATA I/II specs, but
+		 * AHCI-1.1 10.4.2 says at least 1 ms. */
+		mdelay(1);
+	}
+	scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
+
+	/* wait for phy to become ready, if necessary */
+	do {
+		msleep(200);
+		sstatus = scr_read(ap, SCR_STATUS);
+		if ((sstatus & 0xf) != 1)
+			break;
+	} while (time_before(jiffies, timeout));
+
+	/* TODO: phy layer with polling, timeouts, etc. */
+	if (sata_dev_present(ap))
+		ata_port_probe(ap);
+	else {
+		sstatus = scr_read(ap, SCR_STATUS);
+		printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n",
+		       ap->id, sstatus);
+		ata_port_disable(ap);
+	}
+
+	if (ap->flags & ATA_FLAG_PORT_DISABLED)
+		return;
+
+	if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
+		ata_port_disable(ap);
+		return;
+	}
+
+	ap->cbl = ATA_CBL_SATA;
+}
+
+/**
+ *	sata_phy_reset - Reset SATA bus.
+ *	@ap: SATA port associated with target SATA PHY.
+ *
+ *	This function resets the SATA bus, and then probes
+ *	the bus for devices.
+ *
+ *	LOCKING:
+ *	PCI/etc. bus probe sem.
+ *
+ */
+void sata_phy_reset(struct ata_port *ap)
+{
+	__sata_phy_reset(ap);
+	if (ap->flags & ATA_FLAG_PORT_DISABLED)
+		return;
+	ata_bus_reset(ap);
+}
+
+/**
+ *	ata_port_disable - Disable port.
+ *	@ap: Port to be disabled.
+ *
+ *	Modify @ap data structure such that the system
+ *	thinks that the entire port is disabled, and should
+ *	never attempt to probe or communicate with devices
+ *	on this port.
+ *
+ *	LOCKING: host_set lock, or some other form of
+ *	serialization.
+ */
+
+void ata_port_disable(struct ata_port *ap)
+{
+	ap->device[0].class = ATA_DEV_NONE;
+	ap->device[1].class = ATA_DEV_NONE;
+	ap->flags |= ATA_FLAG_PORT_DISABLED;
+}
+
+/*
+ * This mode timing computation functionality is ported over from
+ * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
+ */
+/*
+ * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
+ * These were taken from ATA/ATAPI-6 standard, rev 0a, except
+ * for PIO 5, which is a nonstandard extension and UDMA6, which
+ * is currently supported only by Maxtor drives. 
+ */
+
+static const struct ata_timing ata_timing[] = {
+
+	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0,   0,  15 },
+	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0,   0,  20 },
+	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0,   0,  30 },
+	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0,   0,  45 },
+
+	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0,   0,  60 },
+	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0,   0,  80 },
+	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0,   0, 120 },
+
+/*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0,   0, 150 }, */
+                                          
+	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 120,   0 },
+	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 150,   0 },
+	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 480,   0 },
+                                          
+	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 240,   0 },
+	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 480,   0 },
+	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 960,   0 },
+
+/*	{ XFER_PIO_5,     20,  50,  30, 100,  50,  30, 100,   0 }, */
+	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 120,   0 },
+	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 180,   0 },
+
+	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 240,   0 },
+	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 383,   0 },
+	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 600,   0 },
+
+/*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960,   0 }, */
+
+	{ 0xFF }
+};
+
+#define ENOUGH(v,unit)		(((v)-1)/(unit)+1)
+#define EZ(v,unit)		((v)?ENOUGH(v,unit):0)
+
+static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
+{
+	q->setup   = EZ(t->setup   * 1000,  T);
+	q->act8b   = EZ(t->act8b   * 1000,  T);
+	q->rec8b   = EZ(t->rec8b   * 1000,  T);
+	q->cyc8b   = EZ(t->cyc8b   * 1000,  T);
+	q->active  = EZ(t->active  * 1000,  T);
+	q->recover = EZ(t->recover * 1000,  T);
+	q->cycle   = EZ(t->cycle   * 1000,  T);
+	q->udma    = EZ(t->udma    * 1000, UT);
+}
+
+void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
+		      struct ata_timing *m, unsigned int what)
+{
+	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
+	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
+	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
+	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
+	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
+	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
+	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
+	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
+}
+
+static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
+{
+	const struct ata_timing *t;
+
+	for (t = ata_timing; t->mode != speed; t++)
+		if (t->mode == 0xFF)
+			return NULL;
+	return t; 
+}
+
+int ata_timing_compute(struct ata_device *adev, unsigned short speed,
+		       struct ata_timing *t, int T, int UT)
+{
+	const struct ata_timing *s;
+	struct ata_timing p;
+
+	/*
+	 * Find the mode. 
+	 */
+
+	if (!(s = ata_timing_find_mode(speed)))
+		return -EINVAL;
+
+	memcpy(t, s, sizeof(*s));
+
+	/*
+	 * If the drive is an EIDE drive, it can tell us it needs extended
+	 * PIO/MW_DMA cycle timing.
+	 */
+
+	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
+		memset(&p, 0, sizeof(p));
+		if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
+			if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
+					    else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
+		} else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
+			p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
+		}
+		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
+	}
+
+	/*
+	 * Convert the timing to bus clock counts.
+	 */
+
+	ata_timing_quantize(t, t, T, UT);
+
+	/*
+	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, S.M.A.R.T
+	 * and some other commands. We have to ensure that the DMA cycle timing is
+	 * slower/equal than the fastest PIO timing.
+	 */
+
+	if (speed > XFER_PIO_4) {
+		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
+		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
+	}
+
+	/*
+	 * Lenghten active & recovery time so that cycle time is correct.
+	 */
+
+	if (t->act8b + t->rec8b < t->cyc8b) {
+		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
+		t->rec8b = t->cyc8b - t->act8b;
+	}
+
+	if (t->active + t->recover < t->cycle) {
+		t->active += (t->cycle - (t->active + t->recover)) / 2;
+		t->recover = t->cycle - t->active;
+	}
+
+	return 0;
+}
+
+static const struct {
+	unsigned int shift;
+	u8 base;
+} xfer_mode_classes[] = {
+	{ ATA_SHIFT_UDMA,	XFER_UDMA_0 },
+	{ ATA_SHIFT_MWDMA,	XFER_MW_DMA_0 },
+	{ ATA_SHIFT_PIO,	XFER_PIO_0 },
+};
+
+static inline u8 base_from_shift(unsigned int shift)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++)
+		if (xfer_mode_classes[i].shift == shift)
+			return xfer_mode_classes[i].base;
+
+	return 0xff;
+}
+
+static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
+{
+	int ofs, idx;
+	u8 base;
+
+	if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
+		return;
+
+	if (dev->xfer_shift == ATA_SHIFT_PIO)
+		dev->flags |= ATA_DFLAG_PIO;
+
+	ata_dev_set_xfermode(ap, dev);
+
+	base = base_from_shift(dev->xfer_shift);
+	ofs = dev->xfer_mode - base;
+	idx = ofs + dev->xfer_shift;
+	WARN_ON(idx >= ARRAY_SIZE(xfer_mode_str));
+
+	DPRINTK("idx=%d xfer_shift=%u, xfer_mode=0x%x, base=0x%x, offset=%d\n",
+		idx, dev->xfer_shift, (int)dev->xfer_mode, (int)base, ofs);
+
+	printk(KERN_INFO "ata%u: dev %u configured for %s\n",
+		ap->id, dev->devno, xfer_mode_str[idx]);
+}
+
+static int ata_host_set_pio(struct ata_port *ap)
+{
+	unsigned int mask;
+	int x, i;
+	u8 base, xfer_mode;
+
+	mask = ata_get_mode_mask(ap, ATA_SHIFT_PIO);
+	x = fgb(mask);
+	if (x < 0) {
+		printk(KERN_WARNING "ata%u: no PIO support\n", ap->id);
+		return -1;
+	}
+
+	base = base_from_shift(ATA_SHIFT_PIO);
+	xfer_mode = base + x;
+
+	DPRINTK("base 0x%x xfer_mode 0x%x mask 0x%x x %d\n",
+		(int)base, (int)xfer_mode, mask, x);
+
+	for (i = 0; i < ATA_MAX_DEVICES; i++) {
+		struct ata_device *dev = &ap->device[i];
+		if (ata_dev_present(dev)) {
+			dev->pio_mode = xfer_mode;
+			dev->xfer_mode = xfer_mode;
+			dev->xfer_shift = ATA_SHIFT_PIO;
+			if (ap->ops->set_piomode)
+				ap->ops->set_piomode(ap, dev);
+		}
+	}
+
+	return 0;
+}
+
+static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode,
+			    unsigned int xfer_shift)
+{
+	int i;
+
+	for (i = 0; i < ATA_MAX_DEVICES; i++) {
+		struct ata_device *dev = &ap->device[i];
+		if (ata_dev_present(dev)) {
+			dev->dma_mode = xfer_mode;
+			dev->xfer_mode = xfer_mode;
+			dev->xfer_shift = xfer_shift;
+			if (ap->ops->set_dmamode)
+				ap->ops->set_dmamode(ap, dev);
+		}
+	}
+}
+
+/**
+ *	ata_set_mode - Program timings and issue SET FEATURES - XFER
+ *	@ap: port on which timings will be programmed
+ *
+ *	Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
+ *
+ *	LOCKING:
+ *	PCI/etc. bus probe sem.
+ *
+ */
+static void ata_set_mode(struct ata_port *ap)
+{
+	unsigned int xfer_shift;
+	u8 xfer_mode;
+	int rc;
+
+	/* step 1: always set host PIO timings */
+	rc = ata_host_set_pio(ap);
+	if (rc)
+		goto err_out;
+
+	/* step 2: choose the best data xfer mode */
+	xfer_mode = xfer_shift = 0;
+	rc = ata_choose_xfer_mode(ap, &xfer_mode, &xfer_shift);
+	if (rc)
+		goto err_out;
+
+	/* step 3: if that xfer mode isn't PIO, set host DMA timings */
+	if (xfer_shift != ATA_SHIFT_PIO)
+		ata_host_set_dma(ap, xfer_mode, xfer_shift);
+
+	/* step 4: update devices' xfer mode */
+	ata_dev_set_mode(ap, &ap->device[0]);
+	ata_dev_set_mode(ap, &ap->device[1]);
+
+	if (ap->flags & ATA_FLAG_PORT_DISABLED)
+		return;
+
+	if (ap->ops->post_set_mode)
+		ap->ops->post_set_mode(ap);
+
+	return;
+
+err_out:
+	ata_port_disable(ap);
+}
+
+/**
+ *	ata_busy_sleep - sleep until BSY clears, or timeout
+ *	@ap: port containing status register to be polled
+ *	@tmout_pat: impatience timeout
+ *	@tmout: overall timeout
+ *
+ *	Sleep until ATA Status register bit BSY clears,
+ *	or a timeout occurs.
+ *
+ *	LOCKING: None.
+ *
+ */
+
+static unsigned int ata_busy_sleep (struct ata_port *ap,
+				    unsigned long tmout_pat,
+			    	    unsigned long tmout)
+{
+	unsigned long timer_start, timeout;
+	u8 status;
+
+	status = ata_busy_wait(ap, ATA_BUSY, 300);
+	timer_start = jiffies;
+	timeout = timer_start + tmout_pat;
+	while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
+		msleep(50);
+		status = ata_busy_wait(ap, ATA_BUSY, 3);
+	}
+
+	if (status & ATA_BUSY)
+		printk(KERN_WARNING "ata%u is slow to respond, "
+		       "please be patient\n", ap->id);
+
+	timeout = timer_start + tmout;
+	while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
+		msleep(50);
+		status = ata_chk_status(ap);
+	}
+
+	if (status & ATA_BUSY) {
+		printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
+		       ap->id, tmout / HZ);
+		return 1;
+	}
+
+	return 0;
+}
+
+static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+	unsigned int dev0 = devmask & (1 << 0);
+	unsigned int dev1 = devmask & (1 << 1);
+	unsigned long timeout;
+
+	/* if device 0 was found in ata_devchk, wait for its
+	 * BSY bit to clear
+	 */
+	if (dev0)
+		ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
+
+	/* if device 1 was found in ata_devchk, wait for
+	 * register access, then wait for BSY to clear
+	 */
+	timeout = jiffies + ATA_TMOUT_BOOT;
+	while (dev1) {
+		u8 nsect, lbal;
+
+		ap->ops->dev_select(ap, 1);
+		if (ap->flags & ATA_FLAG_MMIO) {
+			nsect = readb((void __iomem *) ioaddr->nsect_addr);
+			lbal = readb((void __iomem *) ioaddr->lbal_addr);
+		} else {
+			nsect = inb(ioaddr->nsect_addr);
+			lbal = inb(ioaddr->lbal_addr);
+		}
+		if ((nsect == 1) && (lbal == 1))
+			break;
+		if (time_after(jiffies, timeout)) {
+			dev1 = 0;
+			break;
+		}
+		msleep(50);	/* give drive a breather */
+	}
+	if (dev1)
+		ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
+
+	/* is all this really necessary? */
+	ap->ops->dev_select(ap, 0);
+	if (dev1)
+		ap->ops->dev_select(ap, 1);
+	if (dev0)
+		ap->ops->dev_select(ap, 0);
+}
+
+/**
+ *	ata_bus_edd - Issue EXECUTE DEVICE DIAGNOSTIC command.
+ *	@ap: Port to reset and probe
+ *
+ *	Use the EXECUTE DEVICE DIAGNOSTIC command to reset and
+ *	probe the bus.  Not often used these days.
+ *
+ *	LOCKING:
+ *	PCI/etc. bus probe sem.
+ *	Obtains host_set lock.
+ *
+ */
+
+static unsigned int ata_bus_edd(struct ata_port *ap)
+{
+	struct ata_taskfile tf;
+	unsigned long flags;
+
+	/* set up execute-device-diag (bus reset) taskfile */
+	/* also, take interrupts to a known state (disabled) */
+	DPRINTK("execute-device-diag\n");
+	ata_tf_init(ap, &tf, 0);
+	tf.ctl |= ATA_NIEN;
+	tf.command = ATA_CMD_EDD;
+	tf.protocol = ATA_PROT_NODATA;
+
+	/* do bus reset */
+	spin_lock_irqsave(&ap->host_set->lock, flags);
+	ata_tf_to_host(ap, &tf);
+	spin_unlock_irqrestore(&ap->host_set->lock, flags);
+
+	/* spec says at least 2ms.  but who knows with those
+	 * crazy ATAPI devices...
+	 */
+	msleep(150);
+
+	return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
+}
+
+static unsigned int ata_bus_softreset(struct ata_port *ap,
+				      unsigned int devmask)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+
+	DPRINTK("ata%u: bus reset via SRST\n", ap->id);
+
+	/* software reset.  causes dev0 to be selected */
+	if (ap->flags & ATA_FLAG_MMIO) {
+		writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
+		udelay(20);	/* FIXME: flush */
+		writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
+		udelay(20);	/* FIXME: flush */
+		writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
+	} else {
+		outb(ap->ctl, ioaddr->ctl_addr);
+		udelay(10);
+		outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
+		udelay(10);
+		outb(ap->ctl, ioaddr->ctl_addr);
+	}
+
+	/* spec mandates ">= 2ms" before checking status.
+	 * We wait 150ms, because that was the magic delay used for
+	 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
+	 * between when the ATA command register is written, and then
+	 * status is checked.  Because waiting for "a while" before
+	 * checking status is fine, post SRST, we perform this magic
+	 * delay here as well.
+	 */
+	msleep(150);
+
+	ata_bus_post_reset(ap, devmask);
+
+	return 0;
+}
+
+/**
+ *	ata_bus_reset - reset host port and associated ATA channel
+ *	@ap: port to reset
+ *
+ *	This is typically the first time we actually start issuing
+ *	commands to the ATA channel.  We wait for BSY to clear, then
+ *	issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
+ *	result.  Determine what devices, if any, are on the channel
+ *	by looking at the device 0/1 error register.  Look at the signature
+ *	stored in each device's taskfile registers, to determine if
+ *	the device is ATA or ATAPI.
+ *
+ *	LOCKING:
+ *	PCI/etc. bus probe sem.
+ *	Obtains host_set lock.
+ *
+ *	SIDE EFFECTS:
+ *	Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
+ */
+
+void ata_bus_reset(struct ata_port *ap)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
+	u8 err;
+	unsigned int dev0, dev1 = 0, rc = 0, devmask = 0;
+
+	DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
+
+	/* determine if device 0/1 are present */
+	if (ap->flags & ATA_FLAG_SATA_RESET)
+		dev0 = 1;
+	else {
+		dev0 = ata_devchk(ap, 0);
+		if (slave_possible)
+			dev1 = ata_devchk(ap, 1);
+	}
+
+	if (dev0)
+		devmask |= (1 << 0);
+	if (dev1)
+		devmask |= (1 << 1);
+
+	/* select device 0 again */
+	ap->ops->dev_select(ap, 0);
+
+	/* issue bus reset */
+	if (ap->flags & ATA_FLAG_SRST)
+		rc = ata_bus_softreset(ap, devmask);
+	else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) {
+		/* set up device control */
+		if (ap->flags & ATA_FLAG_MMIO)
+			writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
+		else
+			outb(ap->ctl, ioaddr->ctl_addr);
+		rc = ata_bus_edd(ap);
+	}
+
+	if (rc)
+		goto err_out;
+
+	/*
+	 * determine by signature whether we have ATA or ATAPI devices
+	 */
+	err = ata_dev_try_classify(ap, 0);
+	if ((slave_possible) && (err != 0x81))
+		ata_dev_try_classify(ap, 1);
+
+	/* re-enable interrupts */
+	if (ap->ioaddr.ctl_addr)	/* FIXME: hack. create a hook instead */
+		ata_irq_on(ap);
+
+	/* is double-select really necessary? */
+	if (ap->device[1].class != ATA_DEV_NONE)
+		ap->ops->dev_select(ap, 1);
+	if (ap->device[0].class != ATA_DEV_NONE)
+		ap->ops->dev_select(ap, 0);
+
+	/* if no devices were detected, disable this port */
+	if ((ap->device[0].class == ATA_DEV_NONE) &&
+	    (ap->device[1].class == ATA_DEV_NONE))
+		goto err_out;
+
+	if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
+		/* set up device control for ATA_FLAG_SATA_RESET */
+		if (ap->flags & ATA_FLAG_MMIO)
+			writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
+		else
+			outb(ap->ctl, ioaddr->ctl_addr);
+	}
+
+	DPRINTK("EXIT\n");
+	return;
+
+err_out:
+	printk(KERN_ERR "ata%u: disabling port\n", ap->id);
+	ap->ops->port_disable(ap);
+
+	DPRINTK("EXIT\n");
+}
+
+static void ata_pr_blacklisted(const struct ata_port *ap,
+			       const struct ata_device *dev)
+{
+	printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, disabling DMA\n",
+		ap->id, dev->devno);
+}
+
+static const char * ata_dma_blacklist [] = {
+	"WDC AC11000H",
+	"WDC AC22100H",
+	"WDC AC32500H",
+	"WDC AC33100H",
+	"WDC AC31600H",
+	"WDC AC32100H",
+	"WDC AC23200L",
+	"Compaq CRD-8241B",
+	"CRD-8400B",
+	"CRD-8480B",
+	"CRD-8482B",
+ 	"CRD-84",
+	"SanDisk SDP3B",
+	"SanDisk SDP3B-64",
+	"SANYO CD-ROM CRD",
+	"HITACHI CDR-8",
+	"HITACHI CDR-8335",
+	"HITACHI CDR-8435",
+	"Toshiba CD-ROM XM-6202B",
+	"TOSHIBA CD-ROM XM-1702BC",
+	"CD-532E-A",
+	"E-IDE CD-ROM CR-840",
+	"CD-ROM Drive/F5A",
+	"WPI CDD-820",
+	"SAMSUNG CD-ROM SC-148C",
+	"SAMSUNG CD-ROM SC",
+	"SanDisk SDP3B-64",
+	"ATAPI CD-ROM DRIVE 40X MAXIMUM",
+	"_NEC DV5800A",
+};
+
+static int ata_dma_blacklisted(const struct ata_device *dev)
+{
+	unsigned char model_num[40];
+	char *s;
+	unsigned int len;
+	int i;
+
+	ata_dev_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
+			  sizeof(model_num));
+	s = &model_num[0];
+	len = strnlen(s, sizeof(model_num));
+
+	/* ATAPI specifies that empty space is blank-filled; remove blanks */
+	while ((len > 0) && (s[len - 1] == ' ')) {
+		len--;
+		s[len] = 0;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++)
+		if (!strncmp(ata_dma_blacklist[i], s, len))
+			return 1;
+
+	return 0;
+}
+
+static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift)
+{
+	const struct ata_device *master, *slave;
+	unsigned int mask;
+
+	master = &ap->device[0];
+	slave = &ap->device[1];
+
+	assert (ata_dev_present(master) || ata_dev_present(slave));
+
+	if (shift == ATA_SHIFT_UDMA) {
+		mask = ap->udma_mask;
+		if (ata_dev_present(master)) {
+			mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff);
+			if (ata_dma_blacklisted(master)) {
+				mask = 0;
+				ata_pr_blacklisted(ap, master);
+			}
+		}
+		if (ata_dev_present(slave)) {
+			mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff);
+			if (ata_dma_blacklisted(slave)) {
+				mask = 0;
+				ata_pr_blacklisted(ap, slave);
+			}
+		}
+	}
+	else if (shift == ATA_SHIFT_MWDMA) {
+		mask = ap->mwdma_mask;
+		if (ata_dev_present(master)) {
+			mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07);
+			if (ata_dma_blacklisted(master)) {
+				mask = 0;
+				ata_pr_blacklisted(ap, master);
+			}
+		}
+		if (ata_dev_present(slave)) {
+			mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07);
+			if (ata_dma_blacklisted(slave)) {
+				mask = 0;
+				ata_pr_blacklisted(ap, slave);
+			}
+		}
+	}
+	else if (shift == ATA_SHIFT_PIO) {
+		mask = ap->pio_mask;
+		if (ata_dev_present(master)) {
+			/* spec doesn't return explicit support for
+			 * PIO0-2, so we fake it
+			 */
+			u16 tmp_mode = master->id[ATA_ID_PIO_MODES] & 0x03;
+			tmp_mode <<= 3;
+			tmp_mode |= 0x7;
+			mask &= tmp_mode;
+		}
+		if (ata_dev_present(slave)) {
+			/* spec doesn't return explicit support for
+			 * PIO0-2, so we fake it
+			 */
+			u16 tmp_mode = slave->id[ATA_ID_PIO_MODES] & 0x03;
+			tmp_mode <<= 3;
+			tmp_mode |= 0x7;
+			mask &= tmp_mode;
+		}
+	}
+	else {
+		mask = 0xffffffff; /* shut up compiler warning */
+		BUG();
+	}
+
+	return mask;
+}
+
+/* find greatest bit */
+static int fgb(u32 bitmap)
+{
+	unsigned int i;
+	int x = -1;
+
+	for (i = 0; i < 32; i++)
+		if (bitmap & (1 << i))
+			x = i;
+
+	return x;
+}
+
+/**
+ *	ata_choose_xfer_mode - attempt to find best transfer mode
+ *	@ap: Port for which an xfer mode will be selected
+ *	@xfer_mode_out: (output) SET FEATURES - XFER MODE code
+ *	@xfer_shift_out: (output) bit shift that selects this mode
+ *
+ *	Based on host and device capabilities, determine the
+ *	maximum transfer mode that is amenable to all.
+ *
+ *	LOCKING:
+ *	PCI/etc. bus probe sem.
+ *
+ *	RETURNS:
+ *	Zero on success, negative on error.
+ */
+
+static int ata_choose_xfer_mode(const struct ata_port *ap,
+				u8 *xfer_mode_out,
+				unsigned int *xfer_shift_out)
+{
+	unsigned int mask, shift;
+	int x, i;
+
+	for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++) {
+		shift = xfer_mode_classes[i].shift;
+		mask = ata_get_mode_mask(ap, shift);
+
+		x = fgb(mask);
+		if (x >= 0) {
+			*xfer_mode_out = xfer_mode_classes[i].base + x;
+			*xfer_shift_out = shift;
+			return 0;
+		}
+	}
+
+	return -1;
+}
+
+/**
+ *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
+ *	@ap: Port associated with device @dev
+ *	@dev: Device to which command will be sent
+ *
+ *	Issue SET FEATURES - XFER MODE command to device @dev
+ *	on port @ap.
+ *
+ *	LOCKING:
+ *	PCI/etc. bus probe sem.
+ */
+
+static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
+{
+	DECLARE_COMPLETION(wait);
+	struct ata_queued_cmd *qc;
+	int rc;
+	unsigned long flags;
+
+	/* set up set-features taskfile */
+	DPRINTK("set features - xfer mode\n");
+
+	qc = ata_qc_new_init(ap, dev);
+	BUG_ON(qc == NULL);
+
+	qc->tf.command = ATA_CMD_SET_FEATURES;
+	qc->tf.feature = SETFEATURES_XFER;
+	qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+	qc->tf.protocol = ATA_PROT_NODATA;
+	qc->tf.nsect = dev->xfer_mode;
+
+	qc->waiting = &wait;
+	qc->complete_fn = ata_qc_complete_noop;
+
+	spin_lock_irqsave(&ap->host_set->lock, flags);
+	rc = ata_qc_issue(qc);
+	spin_unlock_irqrestore(&ap->host_set->lock, flags);
+
+	if (rc)
+		ata_port_disable(ap);
+	else
+		ata_qc_wait_err(qc, &wait);
+
+	DPRINTK("EXIT\n");
+}
+
+/**
+ *	ata_dev_reread_id - Reread the device identify device info
+ *	@ap: port where the device is
+ *	@dev: device to reread the identify device info
+ *
+ *	LOCKING:
+ */
+
+static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev)
+{
+	DECLARE_COMPLETION(wait);
+	struct ata_queued_cmd *qc;
+	unsigned long flags;
+	int rc;
+
+	qc = ata_qc_new_init(ap, dev);
+	BUG_ON(qc == NULL);
+
+	ata_sg_init_one(qc, dev->id, sizeof(dev->id));
+	qc->dma_dir = DMA_FROM_DEVICE;
+
+	if (dev->class == ATA_DEV_ATA) {
+		qc->tf.command = ATA_CMD_ID_ATA;
+		DPRINTK("do ATA identify\n");
+	} else {
+		qc->tf.command = ATA_CMD_ID_ATAPI;
+		DPRINTK("do ATAPI identify\n");
+	}
+
+	qc->tf.flags |= ATA_TFLAG_DEVICE;
+	qc->tf.protocol = ATA_PROT_PIO;
+	qc->nsect = 1;
+
+	qc->waiting = &wait;
+	qc->complete_fn = ata_qc_complete_noop;
+
+	spin_lock_irqsave(&ap->host_set->lock, flags);
+	rc = ata_qc_issue(qc);
+	spin_unlock_irqrestore(&ap->host_set->lock, flags);
+
+	if (rc)
+		goto err_out;
+
+	ata_qc_wait_err(qc, &wait);
+
+	swap_buf_le16(dev->id, ATA_ID_WORDS);
+
+	ata_dump_id(dev);
+
+	DPRINTK("EXIT\n");
+
+	return;
+err_out:
+	ata_port_disable(ap);
+}
+
+/**
+ *	ata_dev_init_params - Issue INIT DEV PARAMS command
+ *	@ap: Port associated with device @dev
+ *	@dev: Device to which command will be sent
+ *
+ *	LOCKING:
+ */
+
+static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev)
+{
+	DECLARE_COMPLETION(wait);
+	struct ata_queued_cmd *qc;
+	int rc;
+	unsigned long flags;
+	u16 sectors = dev->id[6];
+	u16 heads   = dev->id[3];
+
+	/* Number of sectors per track 1-255. Number of heads 1-16 */
+	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
+		return;
+
+	/* set up init dev params taskfile */
+	DPRINTK("init dev params \n");
+
+	qc = ata_qc_new_init(ap, dev);
+	BUG_ON(qc == NULL);
+
+	qc->tf.command = ATA_CMD_INIT_DEV_PARAMS;
+	qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+	qc->tf.protocol = ATA_PROT_NODATA;
+	qc->tf.nsect = sectors;
+	qc->tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
+
+	qc->waiting = &wait;
+	qc->complete_fn = ata_qc_complete_noop;
+
+	spin_lock_irqsave(&ap->host_set->lock, flags);
+	rc = ata_qc_issue(qc);
+	spin_unlock_irqrestore(&ap->host_set->lock, flags);
+
+	if (rc)
+		ata_port_disable(ap);
+	else
+		ata_qc_wait_err(qc, &wait);
+
+	DPRINTK("EXIT\n");
+}
+
+/**
+ *	ata_sg_clean - Unmap DMA memory associated with command
+ *	@qc: Command containing DMA memory to be released
+ *
+ *	Unmap all mapped DMA memory associated with this command.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+
+static void ata_sg_clean(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct scatterlist *sg = qc->__sg;
+	int dir = qc->dma_dir;
+	void *pad_buf = NULL;
+
+	assert(qc->flags & ATA_QCFLAG_DMAMAP);
+	assert(sg != NULL);
+
+	if (qc->flags & ATA_QCFLAG_SINGLE)
+		assert(qc->n_elem == 1);
+
+	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
+
+	/* if we padded the buffer out to 32-bit bound, and data
+	 * xfer direction is from-device, we must copy from the
+	 * pad buffer back into the supplied buffer
+	 */
+	if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
+		pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
+
+	if (qc->flags & ATA_QCFLAG_SG) {
+		if (qc->n_elem)
+			dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
+		/* restore last sg */
+		sg[qc->orig_n_elem - 1].length += qc->pad_len;
+		if (pad_buf) {
+			struct scatterlist *psg = &qc->pad_sgent;
+			void *addr = kmap_atomic(psg->page, KM_IRQ0);
+			memcpy(addr + psg->offset, pad_buf, qc->pad_len);
+			kunmap_atomic(addr, KM_IRQ0);
+		}
+	} else {
+		if (sg_dma_len(&sg[0]) > 0)
+			dma_unmap_single(ap->host_set->dev,
+				sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
+				dir);
+		/* restore sg */
+		sg->length += qc->pad_len;
+		if (pad_buf)
+			memcpy(qc->buf_virt + sg->length - qc->pad_len,
+			       pad_buf, qc->pad_len);
+	}
+
+	qc->flags &= ~ATA_QCFLAG_DMAMAP;
+	qc->__sg = NULL;
+}
+
+/**
+ *	ata_fill_sg - Fill PCI IDE PRD table
+ *	@qc: Metadata associated with taskfile to be transferred
+ *
+ *	Fill PCI IDE PRD (scatter-gather) table with segments
+ *	associated with the current disk command.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ *
+ */
+static void ata_fill_sg(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct scatterlist *sg;
+	unsigned int idx;
+
+	assert(qc->__sg != NULL);
+	assert(qc->n_elem > 0);
+
+	idx = 0;
+	ata_for_each_sg(sg, qc) {
+		u32 addr, offset;
+		u32 sg_len, len;
+
+		/* determine if physical DMA addr spans 64K boundary.
+		 * Note h/w doesn't support 64-bit, so we unconditionally
+		 * truncate dma_addr_t to u32.
+		 */
+		addr = (u32) sg_dma_address(sg);
+		sg_len = sg_dma_len(sg);
+
+		while (sg_len) {
+			offset = addr & 0xffff;
+			len = sg_len;
+			if ((offset + sg_len) > 0x10000)
+				len = 0x10000 - offset;
+
+			ap->prd[idx].addr = cpu_to_le32(addr);
+			ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
+			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
+
+			idx++;
+			sg_len -= len;
+			addr += len;
+		}
+	}
+
+	if (idx)
+		ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
+}
+/**
+ *	ata_check_atapi_dma - Check whether ATAPI DMA can be supported
+ *	@qc: Metadata associated with taskfile to check
+ *
+ *	Allow low-level driver to filter ATA PACKET commands, returning
+ *	a status indicating whether or not it is OK to use DMA for the
+ *	supplied PACKET command.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ *
+ *	RETURNS: 0 when ATAPI DMA can be used
+ *               nonzero otherwise
+ */
+int ata_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	int rc = 0; /* Assume ATAPI DMA is OK by default */
+
+	if (ap->ops->check_atapi_dma)
+		rc = ap->ops->check_atapi_dma(qc);
+
+	return rc;
+}
+/**
+ *	ata_qc_prep - Prepare taskfile for submission
+ *	@qc: Metadata associated with taskfile to be prepared
+ *
+ *	Prepare ATA taskfile for submission.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+void ata_qc_prep(struct ata_queued_cmd *qc)
+{
+	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+		return;
+
+	ata_fill_sg(qc);
+}
+
+/**
+ *	ata_sg_init_one - Associate command with memory buffer
+ *	@qc: Command to be associated
+ *	@buf: Memory buffer
+ *	@buflen: Length of memory buffer, in bytes.
+ *
+ *	Initialize the data-related elements of queued_cmd @qc
+ *	to point to a single memory buffer, @buf of byte length @buflen.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+
+void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
+{
+	struct scatterlist *sg;
+
+	qc->flags |= ATA_QCFLAG_SINGLE;
+
+	memset(&qc->sgent, 0, sizeof(qc->sgent));
+	qc->__sg = &qc->sgent;
+	qc->n_elem = 1;
+	qc->orig_n_elem = 1;
+	qc->buf_virt = buf;
+
+	sg = qc->__sg;
+	sg_init_one(sg, buf, buflen);
+}
+
+/**
+ *	ata_sg_init - Associate command with scatter-gather table.
+ *	@qc: Command to be associated
+ *	@sg: Scatter-gather table.
+ *	@n_elem: Number of elements in s/g table.
+ *
+ *	Initialize the data-related elements of queued_cmd @qc
+ *	to point to a scatter-gather table @sg, containing @n_elem
+ *	elements.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+
+void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
+		 unsigned int n_elem)
+{
+	qc->flags |= ATA_QCFLAG_SG;
+	qc->__sg = sg;
+	qc->n_elem = n_elem;
+	qc->orig_n_elem = n_elem;
+}
+
+/**
+ *	ata_sg_setup_one - DMA-map the memory buffer associated with a command.
+ *	@qc: Command with memory buffer to be mapped.
+ *
+ *	DMA-map the memory buffer associated with queued_cmd @qc.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ *
+ *	RETURNS:
+ *	Zero on success, negative on error.
+ */
+
+static int ata_sg_setup_one(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	int dir = qc->dma_dir;
+	struct scatterlist *sg = qc->__sg;
+	dma_addr_t dma_address;
+
+	/* we must lengthen transfers to end on a 32-bit boundary */
+	qc->pad_len = sg->length & 3;
+	if (qc->pad_len) {
+		void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
+		struct scatterlist *psg = &qc->pad_sgent;
+
+		assert(qc->dev->class == ATA_DEV_ATAPI);
+
+		memset(pad_buf, 0, ATA_DMA_PAD_SZ);
+
+		if (qc->tf.flags & ATA_TFLAG_WRITE)
+			memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
+			       qc->pad_len);
+
+		sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
+		sg_dma_len(psg) = ATA_DMA_PAD_SZ;
+		/* trim sg */
+		sg->length -= qc->pad_len;
+
+		DPRINTK("padding done, sg->length=%u pad_len=%u\n",
+			sg->length, qc->pad_len);
+	}
+
+	if (!sg->length) {
+		sg_dma_address(sg) = 0;
+		goto skip_map;
+	}
+
+	dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
+				     sg->length, dir);
+	if (dma_mapping_error(dma_address)) {
+		/* restore sg */
+		sg->length += qc->pad_len;
+		return -1;
+	}
+
+	sg_dma_address(sg) = dma_address;
+skip_map:
+	sg_dma_len(sg) = sg->length;
+
+	DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
+		qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
+
+	return 0;
+}
+
+/**
+ *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
+ *	@qc: Command with scatter-gather table to be mapped.
+ *
+ *	DMA-map the scatter-gather table associated with queued_cmd @qc.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ *
+ *	RETURNS:
+ *	Zero on success, negative on error.
+ *
+ */
+
+static int ata_sg_setup(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct scatterlist *sg = qc->__sg;
+	struct scatterlist *lsg = &sg[qc->n_elem - 1];
+	int n_elem, pre_n_elem, dir, trim_sg = 0;
+
+	VPRINTK("ENTER, ata%u\n", ap->id);
+	assert(qc->flags & ATA_QCFLAG_SG);
+
+	/* we must lengthen transfers to end on a 32-bit boundary */
+	qc->pad_len = lsg->length & 3;
+	if (qc->pad_len) {
+		void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
+		struct scatterlist *psg = &qc->pad_sgent;
+		unsigned int offset;
+
+		assert(qc->dev->class == ATA_DEV_ATAPI);
+
+		memset(pad_buf, 0, ATA_DMA_PAD_SZ);
+
+		/*
+		 * psg->page/offset are used to copy to-be-written
+		 * data in this function or read data in ata_sg_clean.
+		 */
+		offset = lsg->offset + lsg->length - qc->pad_len;
+		psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
+		psg->offset = offset_in_page(offset);
+
+		if (qc->tf.flags & ATA_TFLAG_WRITE) {
+			void *addr = kmap_atomic(psg->page, KM_IRQ0);
+			memcpy(pad_buf, addr + psg->offset, qc->pad_len);
+			kunmap_atomic(addr, KM_IRQ0);
+		}
+
+		sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
+		sg_dma_len(psg) = ATA_DMA_PAD_SZ;
+		/* trim last sg */
+		lsg->length -= qc->pad_len;
+		if (lsg->length == 0)
+			trim_sg = 1;
+
+		DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
+			qc->n_elem - 1, lsg->length, qc->pad_len);
+	}
+
+	pre_n_elem = qc->n_elem;
+	if (trim_sg && pre_n_elem)
+		pre_n_elem--;
+
+	if (!pre_n_elem) {
+		n_elem = 0;
+		goto skip_map;
+	}
+
+	dir = qc->dma_dir;
+	n_elem = dma_map_sg(ap->host_set->dev, sg, pre_n_elem, dir);
+	if (n_elem < 1) {
+		/* restore last sg */
+		lsg->length += qc->pad_len;
+		return -1;
+	}
+
+	DPRINTK("%d sg elements mapped\n", n_elem);
+
+skip_map:
+	qc->n_elem = n_elem;
+
+	return 0;
+}
+
+/**
+ *	ata_poll_qc_complete - turn irq back on and finish qc
+ *	@qc: Command to complete
+ *	@err_mask: ATA status register content
+ *
+ *	LOCKING:
+ *	None.  (grabs host lock)
+ */
+
+void ata_poll_qc_complete(struct ata_queued_cmd *qc, unsigned int err_mask)
+{
+	struct ata_port *ap = qc->ap;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ap->host_set->lock, flags);
+	ap->flags &= ~ATA_FLAG_NOINTR;
+	ata_irq_on(ap);
+	ata_qc_complete(qc, err_mask);
+	spin_unlock_irqrestore(&ap->host_set->lock, flags);
+}
+
+/**
+ *	ata_pio_poll -
+ *	@ap: the target ata_port
+ *
+ *	LOCKING:
+ *	None.  (executing in kernel thread context)
+ *
+ *	RETURNS:
+ *	timeout value to use
+ */
+
+static unsigned long ata_pio_poll(struct ata_port *ap)
+{
+	u8 status;
+	unsigned int poll_state = HSM_ST_UNKNOWN;
+	unsigned int reg_state = HSM_ST_UNKNOWN;
+
+	switch (ap->hsm_task_state) {
+	case HSM_ST:
+	case HSM_ST_POLL:
+		poll_state = HSM_ST_POLL;
+		reg_state = HSM_ST;
+		break;
+	case HSM_ST_LAST:
+	case HSM_ST_LAST_POLL:
+		poll_state = HSM_ST_LAST_POLL;
+		reg_state = HSM_ST_LAST;
+		break;
+	default:
+		BUG();
+		break;
+	}
+
+	status = ata_chk_status(ap);
+	if (status & ATA_BUSY) {
+		if (time_after(jiffies, ap->pio_task_timeout)) {
+			ap->hsm_task_state = HSM_ST_TMOUT;
+			return 0;
+		}
+		ap->hsm_task_state = poll_state;
+		return ATA_SHORT_PAUSE;
+	}
+
+	ap->hsm_task_state = reg_state;
+	return 0;
+}
+
+/**
+ *	ata_pio_complete - check if drive is busy or idle
+ *	@ap: the target ata_port
+ *
+ *	LOCKING:
+ *	None.  (executing in kernel thread context)
+ *
+ *	RETURNS:
+ *	Non-zero if qc completed, zero otherwise.
+ */
+
+static int ata_pio_complete (struct ata_port *ap)
+{
+	struct ata_queued_cmd *qc;
+	u8 drv_stat;
+
+	/*
+	 * This is purely heuristic.  This is a fast path.  Sometimes when
+	 * we enter, BSY will be cleared in a chk-status or two.  If not,
+	 * the drive is probably seeking or something.  Snooze for a couple
+	 * msecs, then chk-status again.  If still busy, fall back to
+	 * HSM_ST_POLL state.
+	 */
+	drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
+	if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
+		msleep(2);
+		drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
+		if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
+			ap->hsm_task_state = HSM_ST_LAST_POLL;
+			ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
+			return 0;
+		}
+	}
+
+	drv_stat = ata_wait_idle(ap);
+	if (!ata_ok(drv_stat)) {
+		ap->hsm_task_state = HSM_ST_ERR;
+		return 0;
+	}
+
+	qc = ata_qc_from_tag(ap, ap->active_tag);
+	assert(qc != NULL);
+
+	ap->hsm_task_state = HSM_ST_IDLE;
+
+	ata_poll_qc_complete(qc, 0);
+
+	/* another command may start at this point */
+
+	return 1;
+}
+
+
+/**
+ *	swap_buf_le16 - swap halves of 16-words in place
+ *	@buf:  Buffer to swap
+ *	@buf_words:  Number of 16-bit words in buffer.
+ *
+ *	Swap halves of 16-bit words if needed to convert from
+ *	little-endian byte order to native cpu byte order, or
+ *	vice-versa.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+void swap_buf_le16(u16 *buf, unsigned int buf_words)
+{
+#ifdef __BIG_ENDIAN
+	unsigned int i;
+
+	for (i = 0; i < buf_words; i++)
+		buf[i] = le16_to_cpu(buf[i]);
+#endif /* __BIG_ENDIAN */
+}
+
+/**
+ *	ata_mmio_data_xfer - Transfer data by MMIO
+ *	@ap: port to read/write
+ *	@buf: data buffer
+ *	@buflen: buffer length
+ *	@write_data: read/write
+ *
+ *	Transfer data from/to the device data register by MMIO.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+
+static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
+			       unsigned int buflen, int write_data)
+{
+	unsigned int i;
+	unsigned int words = buflen >> 1;
+	u16 *buf16 = (u16 *) buf;
+	void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
+
+	/* Transfer multiple of 2 bytes */
+	if (write_data) {
+		for (i = 0; i < words; i++)
+			writew(le16_to_cpu(buf16[i]), mmio);
+	} else {
+		for (i = 0; i < words; i++)
+			buf16[i] = cpu_to_le16(readw(mmio));
+	}
+
+	/* Transfer trailing 1 byte, if any. */
+	if (unlikely(buflen & 0x01)) {
+		u16 align_buf[1] = { 0 };
+		unsigned char *trailing_buf = buf + buflen - 1;
+
+		if (write_data) {
+			memcpy(align_buf, trailing_buf, 1);
+			writew(le16_to_cpu(align_buf[0]), mmio);
+		} else {
+			align_buf[0] = cpu_to_le16(readw(mmio));
+			memcpy(trailing_buf, align_buf, 1);
+		}
+	}
+}
+
+/**
+ *	ata_pio_data_xfer - Transfer data by PIO
+ *	@ap: port to read/write
+ *	@buf: data buffer
+ *	@buflen: buffer length
+ *	@write_data: read/write
+ *
+ *	Transfer data from/to the device data register by PIO.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+
+static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
+			      unsigned int buflen, int write_data)
+{
+	unsigned int words = buflen >> 1;
+
+	/* Transfer multiple of 2 bytes */
+	if (write_data)
+		outsw(ap->ioaddr.data_addr, buf, words);
+	else
+		insw(ap->ioaddr.data_addr, buf, words);
+
+	/* Transfer trailing 1 byte, if any. */
+	if (unlikely(buflen & 0x01)) {
+		u16 align_buf[1] = { 0 };
+		unsigned char *trailing_buf = buf + buflen - 1;
+
+		if (write_data) {
+			memcpy(align_buf, trailing_buf, 1);
+			outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
+		} else {
+			align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
+			memcpy(trailing_buf, align_buf, 1);
+		}
+	}
+}
+
+/**
+ *	ata_data_xfer - Transfer data from/to the data register.
+ *	@ap: port to read/write
+ *	@buf: data buffer
+ *	@buflen: buffer length
+ *	@do_write: read/write
+ *
+ *	Transfer data from/to the device data register.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+
+static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
+			  unsigned int buflen, int do_write)
+{
+	if (ap->flags & ATA_FLAG_MMIO)
+		ata_mmio_data_xfer(ap, buf, buflen, do_write);
+	else
+		ata_pio_data_xfer(ap, buf, buflen, do_write);
+}
+
+/**
+ *	ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
+ *	@qc: Command on going
+ *
+ *	Transfer ATA_SECT_SIZE of data from/to the ATA device.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+
+static void ata_pio_sector(struct ata_queued_cmd *qc)
+{
+	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
+	struct scatterlist *sg = qc->__sg;
+	struct ata_port *ap = qc->ap;
+	struct page *page;
+	unsigned int offset;
+	unsigned char *buf;
+
+	if (qc->cursect == (qc->nsect - 1))
+		ap->hsm_task_state = HSM_ST_LAST;
+
+	page = sg[qc->cursg].page;
+	offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
+
+	/* get the current page and offset */
+	page = nth_page(page, (offset >> PAGE_SHIFT));
+	offset %= PAGE_SIZE;
+
+	buf = kmap(page) + offset;
+
+	qc->cursect++;
+	qc->cursg_ofs++;
+
+	if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
+		qc->cursg++;
+		qc->cursg_ofs = 0;
+	}
+
+	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
+
+	/* do the actual data transfer */
+	do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
+	ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
+
+	kunmap(page);
+}
+
+/**
+ *	__atapi_pio_bytes - Transfer data from/to the ATAPI device.
+ *	@qc: Command on going
+ *	@bytes: number of bytes
+ *
+ *	Transfer Transfer data from/to the ATAPI device.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ *
+ */
+
+static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
+{
+	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
+	struct scatterlist *sg = qc->__sg;
+	struct ata_port *ap = qc->ap;
+	struct page *page;
+	unsigned char *buf;
+	unsigned int offset, count;
+
+	if (qc->curbytes + bytes >= qc->nbytes)
+		ap->hsm_task_state = HSM_ST_LAST;
+
+next_sg:
+	if (unlikely(qc->cursg >= qc->n_elem)) {
+		/*
+		 * The end of qc->sg is reached and the device expects
+		 * more data to transfer. In order not to overrun qc->sg
+		 * and fulfill length specified in the byte count register,
+		 *    - for read case, discard trailing data from the device
+		 *    - for write case, padding zero data to the device
+		 */
+		u16 pad_buf[1] = { 0 };
+		unsigned int words = bytes >> 1;
+		unsigned int i;
+
+		if (words) /* warning if bytes > 1 */
+			printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
+			       ap->id, bytes);
+
+		for (i = 0; i < words; i++)
+			ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
+
+		ap->hsm_task_state = HSM_ST_LAST;
+		return;
+	}
+
+	sg = &qc->__sg[qc->cursg];
+
+	page = sg->page;
+	offset = sg->offset + qc->cursg_ofs;
+
+	/* get the current page and offset */
+	page = nth_page(page, (offset >> PAGE_SHIFT));
+	offset %= PAGE_SIZE;
+
+	/* don't overrun current sg */
+	count = min(sg->length - qc->cursg_ofs, bytes);
+
+	/* don't cross page boundaries */
+	count = min(count, (unsigned int)PAGE_SIZE - offset);
+
+	buf = kmap(page) + offset;
+
+	bytes -= count;
+	qc->curbytes += count;
+	qc->cursg_ofs += count;
+
+	if (qc->cursg_ofs == sg->length) {
+		qc->cursg++;
+		qc->cursg_ofs = 0;
+	}
+
+	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
+
+	/* do the actual data transfer */
+	ata_data_xfer(ap, buf, count, do_write);
+
+	kunmap(page);
+
+	if (bytes)
+		goto next_sg;
+}
+
+/**
+ *	atapi_pio_bytes - Transfer data from/to the ATAPI device.
+ *	@qc: Command on going
+ *
+ *	Transfer Transfer data from/to the ATAPI device.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+
+static void atapi_pio_bytes(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_device *dev = qc->dev;
+	unsigned int ireason, bc_lo, bc_hi, bytes;
+	int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
+
+	ap->ops->tf_read(ap, &qc->tf);
+	ireason = qc->tf.nsect;
+	bc_lo = qc->tf.lbam;
+	bc_hi = qc->tf.lbah;
+	bytes = (bc_hi << 8) | bc_lo;
+
+	/* shall be cleared to zero, indicating xfer of data */
+	if (ireason & (1 << 0))
+		goto err_out;
+
+	/* make sure transfer direction matches expected */
+	i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
+	if (do_write != i_write)
+		goto err_out;
+
+	__atapi_pio_bytes(qc, bytes);
+
+	return;
+
+err_out:
+	printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
+	      ap->id, dev->devno);
+	ap->hsm_task_state = HSM_ST_ERR;
+}
+
+/**
+ *	ata_pio_block - start PIO on a block
+ *	@ap: the target ata_port
+ *
+ *	LOCKING:
+ *	None.  (executing in kernel thread context)
+ */
+
+static void ata_pio_block(struct ata_port *ap)
+{
+	struct ata_queued_cmd *qc;
+	u8 status;
+
+	/*
+	 * This is purely heuristic.  This is a fast path.
+	 * Sometimes when we enter, BSY will be cleared in
+	 * a chk-status or two.  If not, the drive is probably seeking
+	 * or something.  Snooze for a couple msecs, then
+	 * chk-status again.  If still busy, fall back to
+	 * HSM_ST_POLL state.
+	 */
+	status = ata_busy_wait(ap, ATA_BUSY, 5);
+	if (status & ATA_BUSY) {
+		msleep(2);
+		status = ata_busy_wait(ap, ATA_BUSY, 10);
+		if (status & ATA_BUSY) {
+			ap->hsm_task_state = HSM_ST_POLL;
+			ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
+			return;
+		}
+	}
+
+	qc = ata_qc_from_tag(ap, ap->active_tag);
+	assert(qc != NULL);
+
+	if (is_atapi_taskfile(&qc->tf)) {
+		/* no more data to transfer or unsupported ATAPI command */
+		if ((status & ATA_DRQ) == 0) {
+			ap->hsm_task_state = HSM_ST_LAST;
+			return;
+		}
+
+		atapi_pio_bytes(qc);
+	} else {
+		/* handle BSY=0, DRQ=0 as error */
+		if ((status & ATA_DRQ) == 0) {
+			ap->hsm_task_state = HSM_ST_ERR;
+			return;
+		}
+
+		ata_pio_sector(qc);
+	}
+}
+
+static void ata_pio_error(struct ata_port *ap)
+{
+	struct ata_queued_cmd *qc;
+
+	printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
+
+	qc = ata_qc_from_tag(ap, ap->active_tag);
+	assert(qc != NULL);
+
+	ap->hsm_task_state = HSM_ST_IDLE;
+
+	ata_poll_qc_complete(qc, AC_ERR_ATA_BUS);
+}
+
+static void ata_pio_task(void *_data)
+{
+	struct ata_port *ap = _data;
+	unsigned long timeout;
+	int qc_completed;
+
+fsm_start:
+	timeout = 0;
+	qc_completed = 0;
+
+	switch (ap->hsm_task_state) {
+	case HSM_ST_IDLE:
+		return;
+
+	case HSM_ST:
+		ata_pio_block(ap);
+		break;
+
+	case HSM_ST_LAST:
+		qc_completed = ata_pio_complete(ap);
+		break;
+
+	case HSM_ST_POLL:
+	case HSM_ST_LAST_POLL:
+		timeout = ata_pio_poll(ap);
+		break;
+
+	case HSM_ST_TMOUT:
+	case HSM_ST_ERR:
+		ata_pio_error(ap);
+		return;
+	}
+
+	if (timeout)
+		queue_delayed_work(ata_wq, &ap->pio_task, timeout);
+	else if (!qc_completed)
+		goto fsm_start;
+}
+
+/**
+ *	ata_qc_timeout - Handle timeout of queued command
+ *	@qc: Command that timed out
+ *
+ *	Some part of the kernel (currently, only the SCSI layer)
+ *	has noticed that the active command on port @ap has not
+ *	completed after a specified length of time.  Handle this
+ *	condition by disabling DMA (if necessary) and completing
+ *	transactions, with error if necessary.
+ *
+ *	This also handles the case of the "lost interrupt", where
+ *	for some reason (possibly hardware bug, possibly driver bug)
+ *	an interrupt was not delivered to the driver, even though the
+ *	transaction completed successfully.
+ *
+ *	LOCKING:
+ *	Inherited from SCSI layer (none, can sleep)
+ */
+
+static void ata_qc_timeout(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_host_set *host_set = ap->host_set;
+	u8 host_stat = 0, drv_stat;
+	unsigned long flags;
+
+	DPRINTK("ENTER\n");
+
+	spin_lock_irqsave(&host_set->lock, flags);
+
+	/* hack alert!  We cannot use the supplied completion
+	 * function from inside the ->eh_strategy_handler() thread.
+	 * libata is the only user of ->eh_strategy_handler() in
+	 * any kernel, so the default scsi_done() assumes it is
+	 * not being called from the SCSI EH.
+	 */
+	qc->scsidone = scsi_finish_command;
+
+	switch (qc->tf.protocol) {
+
+	case ATA_PROT_DMA:
+	case ATA_PROT_ATAPI_DMA:
+		host_stat = ap->ops->bmdma_status(ap);
+
+		/* before we do anything else, clear DMA-Start bit */
+		ap->ops->bmdma_stop(qc);
+
+		/* fall through */
+
+	default:
+		ata_altstatus(ap);
+		drv_stat = ata_chk_status(ap);
+
+		/* ack bmdma irq events */
+		ap->ops->irq_clear(ap);
+
+		printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
+		       ap->id, qc->tf.command, drv_stat, host_stat);
+
+		/* complete taskfile transaction */
+		ata_qc_complete(qc, ac_err_mask(drv_stat));
+		break;
+	}
+
+	spin_unlock_irqrestore(&host_set->lock, flags);
+
+	DPRINTK("EXIT\n");
+}
+
+/**
+ *	ata_eng_timeout - Handle timeout of queued command
+ *	@ap: Port on which timed-out command is active
+ *
+ *	Some part of the kernel (currently, only the SCSI layer)
+ *	has noticed that the active command on port @ap has not
+ *	completed after a specified length of time.  Handle this
+ *	condition by disabling DMA (if necessary) and completing
+ *	transactions, with error if necessary.
+ *
+ *	This also handles the case of the "lost interrupt", where
+ *	for some reason (possibly hardware bug, possibly driver bug)
+ *	an interrupt was not delivered to the driver, even though the
+ *	transaction completed successfully.
+ *
+ *	LOCKING:
+ *	Inherited from SCSI layer (none, can sleep)
+ */
+
+void ata_eng_timeout(struct ata_port *ap)
+{
+	struct ata_queued_cmd *qc;
+
+	DPRINTK("ENTER\n");
+
+	qc = ata_qc_from_tag(ap, ap->active_tag);
+	if (qc)
+		ata_qc_timeout(qc);
+	else {
+		printk(KERN_ERR "ata%u: BUG: timeout without command\n",
+		       ap->id);
+		goto out;
+	}
+
+out:
+	DPRINTK("EXIT\n");
+}
+
+/**
+ *	ata_qc_new - Request an available ATA command, for queueing
+ *	@ap: Port associated with device @dev
+ *	@dev: Device from whom we request an available command structure
+ *
+ *	LOCKING:
+ *	None.
+ */
+
+static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
+{
+	struct ata_queued_cmd *qc = NULL;
+	unsigned int i;
+
+	for (i = 0; i < ATA_MAX_QUEUE; i++)
+		if (!test_and_set_bit(i, &ap->qactive)) {
+			qc = ata_qc_from_tag(ap, i);
+			break;
+		}
+
+	if (qc)
+		qc->tag = i;
+
+	return qc;
+}
+
+/**
+ *	ata_qc_new_init - Request an available ATA command, and initialize it
+ *	@ap: Port associated with device @dev
+ *	@dev: Device from whom we request an available command structure
+ *
+ *	LOCKING:
+ *	None.
+ */
+
+struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
+				      struct ata_device *dev)
+{
+	struct ata_queued_cmd *qc;
+
+	qc = ata_qc_new(ap);
+	if (qc) {
+		qc->scsicmd = NULL;
+		qc->ap = ap;
+		qc->dev = dev;
+
+		ata_qc_reinit(qc);
+	}
+
+	return qc;
+}
+
+int ata_qc_complete_noop(struct ata_queued_cmd *qc, unsigned int err_mask)
+{
+	return 0;
+}
+
+static void __ata_qc_complete(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	unsigned int tag, do_clear = 0;
+
+	qc->flags = 0;
+	tag = qc->tag;
+	if (likely(ata_tag_valid(tag))) {
+		if (tag == ap->active_tag)
+			ap->active_tag = ATA_TAG_POISON;
+		qc->tag = ATA_TAG_POISON;
+		do_clear = 1;
+	}
+
+	if (qc->waiting) {
+		struct completion *waiting = qc->waiting;
+		qc->waiting = NULL;
+		complete(waiting);
+	}
+
+	if (likely(do_clear))
+		clear_bit(tag, &ap->qactive);
+}
+
+/**
+ *	ata_qc_free - free unused ata_queued_cmd
+ *	@qc: Command to complete
+ *
+ *	Designed to free unused ata_queued_cmd object
+ *	in case something prevents using it.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+void ata_qc_free(struct ata_queued_cmd *qc)
+{
+	assert(qc != NULL);	/* ata_qc_from_tag _might_ return NULL */
+	assert(qc->waiting == NULL);	/* nothing should be waiting */
+
+	__ata_qc_complete(qc);
+}
+
+/**
+ *	ata_qc_complete - Complete an active ATA command
+ *	@qc: Command to complete
+ *	@err_mask: ATA Status register contents
+ *
+ *	Indicate to the mid and upper layers that an ATA
+ *	command has completed, with either an ok or not-ok status.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+
+void ata_qc_complete(struct ata_queued_cmd *qc, unsigned int err_mask)
+{
+	int rc;
+
+	assert(qc != NULL);	/* ata_qc_from_tag _might_ return NULL */
+	assert(qc->flags & ATA_QCFLAG_ACTIVE);
+
+	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
+		ata_sg_clean(qc);
+
+	/* atapi: mark qc as inactive to prevent the interrupt handler
+	 * from completing the command twice later, before the error handler
+	 * is called. (when rc != 0 and atapi request sense is needed)
+	 */
+	qc->flags &= ~ATA_QCFLAG_ACTIVE;
+
+	/* call completion callback */
+	rc = qc->complete_fn(qc, err_mask);
+
+	/* if callback indicates not to complete command (non-zero),
+	 * return immediately
+	 */
+	if (rc != 0)
+		return;
+
+	__ata_qc_complete(qc);
+
+	VPRINTK("EXIT\n");
+}
+
+static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+
+	switch (qc->tf.protocol) {
+	case ATA_PROT_DMA:
+	case ATA_PROT_ATAPI_DMA:
+		return 1;
+
+	case ATA_PROT_ATAPI:
+	case ATA_PROT_PIO:
+	case ATA_PROT_PIO_MULT:
+		if (ap->flags & ATA_FLAG_PIO_DMA)
+			return 1;
+
+		/* fall through */
+
+	default:
+		return 0;
+	}
+
+	/* never reached */
+}
+
+/**
+ *	ata_qc_issue - issue taskfile to device
+ *	@qc: command to issue to device
+ *
+ *	Prepare an ATA command to submission to device.
+ *	This includes mapping the data into a DMA-able
+ *	area, filling in the S/G table, and finally
+ *	writing the taskfile to hardware, starting the command.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ *
+ *	RETURNS:
+ *	Zero on success, negative on error.
+ */
+
+int ata_qc_issue(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+
+	if (ata_should_dma_map(qc)) {
+		if (qc->flags & ATA_QCFLAG_SG) {
+			if (ata_sg_setup(qc))
+				goto err_out;
+		} else if (qc->flags & ATA_QCFLAG_SINGLE) {
+			if (ata_sg_setup_one(qc))
+				goto err_out;
+		}
+	} else {
+		qc->flags &= ~ATA_QCFLAG_DMAMAP;
+	}
+
+	ap->ops->qc_prep(qc);
+
+	qc->ap->active_tag = qc->tag;
+	qc->flags |= ATA_QCFLAG_ACTIVE;
+
+	return ap->ops->qc_issue(qc);
+
+err_out:
+	return -1;
+}
+
+
+/**
+ *	ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
+ *	@qc: command to issue to device
+ *
+ *	Using various libata functions and hooks, this function
+ *	starts an ATA command.  ATA commands are grouped into
+ *	classes called "protocols", and issuing each type of protocol
+ *	is slightly different.
+ *
+ *	May be used as the qc_issue() entry in ata_port_operations.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ *
+ *	RETURNS:
+ *	Zero on success, negative on error.
+ */
+
+int ata_qc_issue_prot(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+
+	ata_dev_select(ap, qc->dev->devno, 1, 0);
+
+	switch (qc->tf.protocol) {
+	case ATA_PROT_NODATA:
+		ata_tf_to_host(ap, &qc->tf);
+		break;
+
+	case ATA_PROT_DMA:
+		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
+		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
+		ap->ops->bmdma_start(qc);	    /* initiate bmdma */
+		break;
+
+	case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
+		ata_qc_set_polling(qc);
+		ata_tf_to_host(ap, &qc->tf);
+		ap->hsm_task_state = HSM_ST;
+		queue_work(ata_wq, &ap->pio_task);
+		break;
+
+	case ATA_PROT_ATAPI:
+		ata_qc_set_polling(qc);
+		ata_tf_to_host(ap, &qc->tf);
+		queue_work(ata_wq, &ap->packet_task);
+		break;
+
+	case ATA_PROT_ATAPI_NODATA:
+		ap->flags |= ATA_FLAG_NOINTR;
+		ata_tf_to_host(ap, &qc->tf);
+		queue_work(ata_wq, &ap->packet_task);
+		break;
+
+	case ATA_PROT_ATAPI_DMA:
+		ap->flags |= ATA_FLAG_NOINTR;
+		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
+		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
+		queue_work(ata_wq, &ap->packet_task);
+		break;
+
+	default:
+		WARN_ON(1);
+		return -1;
+	}
+
+	return 0;
+}
+
+/**
+ *	ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction
+ *	@qc: Info associated with this ATA transaction.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+
+static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
+	u8 dmactl;
+	void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
+
+	/* load PRD table addr. */
+	mb();	/* make sure PRD table writes are visible to controller */
+	writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
+
+	/* specify data direction, triple-check start bit is clear */
+	dmactl = readb(mmio + ATA_DMA_CMD);
+	dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
+	if (!rw)
+		dmactl |= ATA_DMA_WR;
+	writeb(dmactl, mmio + ATA_DMA_CMD);
+
+	/* issue r/w command */
+	ap->ops->exec_command(ap, &qc->tf);
+}
+
+/**
+ *	ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction
+ *	@qc: Info associated with this ATA transaction.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+
+static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
+	u8 dmactl;
+
+	/* start host DMA transaction */
+	dmactl = readb(mmio + ATA_DMA_CMD);
+	writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
+
+	/* Strictly, one may wish to issue a readb() here, to
+	 * flush the mmio write.  However, control also passes
+	 * to the hardware at this point, and it will interrupt
+	 * us when we are to resume control.  So, in effect,
+	 * we don't care when the mmio write flushes.
+	 * Further, a read of the DMA status register _immediately_
+	 * following the write may not be what certain flaky hardware
+	 * is expected, so I think it is best to not add a readb()
+	 * without first all the MMIO ATA cards/mobos.
+	 * Or maybe I'm just being paranoid.
+	 */
+}
+
+/**
+ *	ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
+ *	@qc: Info associated with this ATA transaction.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+
+static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
+	u8 dmactl;
+
+	/* load PRD table addr. */
+	outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
+
+	/* specify data direction, triple-check start bit is clear */
+	dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+	dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
+	if (!rw)
+		dmactl |= ATA_DMA_WR;
+	outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+
+	/* issue r/w command */
+	ap->ops->exec_command(ap, &qc->tf);
+}
+
+/**
+ *	ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
+ *	@qc: Info associated with this ATA transaction.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+
+static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	u8 dmactl;
+
+	/* start host DMA transaction */
+	dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+	outb(dmactl | ATA_DMA_START,
+	     ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+}
+
+
+/**
+ *	ata_bmdma_start - Start a PCI IDE BMDMA transaction
+ *	@qc: Info associated with this ATA transaction.
+ *
+ *	Writes the ATA_DMA_START flag to the DMA command register.
+ *
+ *	May be used as the bmdma_start() entry in ata_port_operations.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+void ata_bmdma_start(struct ata_queued_cmd *qc)
+{
+	if (qc->ap->flags & ATA_FLAG_MMIO)
+		ata_bmdma_start_mmio(qc);
+	else
+		ata_bmdma_start_pio(qc);
+}
+
+
+/**
+ *	ata_bmdma_setup - Set up PCI IDE BMDMA transaction
+ *	@qc: Info associated with this ATA transaction.
+ *
+ *	Writes address of PRD table to device's PRD Table Address
+ *	register, sets the DMA control register, and calls
+ *	ops->exec_command() to start the transfer.
+ *
+ *	May be used as the bmdma_setup() entry in ata_port_operations.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+void ata_bmdma_setup(struct ata_queued_cmd *qc)
+{
+	if (qc->ap->flags & ATA_FLAG_MMIO)
+		ata_bmdma_setup_mmio(qc);
+	else
+		ata_bmdma_setup_pio(qc);
+}
+
+
+/**
+ *	ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
+ *	@ap: Port associated with this ATA transaction.
+ *
+ *	Clear interrupt and error flags in DMA status register.
+ *
+ *	May be used as the irq_clear() entry in ata_port_operations.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+
+void ata_bmdma_irq_clear(struct ata_port *ap)
+{
+    if (ap->flags & ATA_FLAG_MMIO) {
+        void __iomem *mmio = ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
+        writeb(readb(mmio), mmio);
+    } else {
+        unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
+        outb(inb(addr), addr);
+    }
+
+}
+
+
+/**
+ *	ata_bmdma_status - Read PCI IDE BMDMA status
+ *	@ap: Port associated with this ATA transaction.
+ *
+ *	Read and return BMDMA status register.
+ *
+ *	May be used as the bmdma_status() entry in ata_port_operations.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+
+u8 ata_bmdma_status(struct ata_port *ap)
+{
+	u8 host_stat;
+	if (ap->flags & ATA_FLAG_MMIO) {
+		void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
+		host_stat = readb(mmio + ATA_DMA_STATUS);
+	} else
+		host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+	return host_stat;
+}
+
+
+/**
+ *	ata_bmdma_stop - Stop PCI IDE BMDMA transfer
+ *	@qc: Command we are ending DMA for
+ *
+ *	Clears the ATA_DMA_START flag in the dma control register
+ *
+ *	May be used as the bmdma_stop() entry in ata_port_operations.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+
+void ata_bmdma_stop(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	if (ap->flags & ATA_FLAG_MMIO) {
+		void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
+
+		/* clear start/stop bit */
+		writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
+			mmio + ATA_DMA_CMD);
+	} else {
+		/* clear start/stop bit */
+		outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
+			ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+	}
+
+	/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
+	ata_altstatus(ap);        /* dummy read */
+}
+
+/**
+ *	ata_host_intr - Handle host interrupt for given (port, task)
+ *	@ap: Port on which interrupt arrived (possibly...)
+ *	@qc: Taskfile currently active in engine
+ *
+ *	Handle host interrupt for given queued command.  Currently,
+ *	only DMA interrupts are handled.  All other commands are
+ *	handled via polling with interrupts disabled (nIEN bit).
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ *
+ *	RETURNS:
+ *	One if interrupt was handled, zero if not (shared irq).
+ */
+
+inline unsigned int ata_host_intr (struct ata_port *ap,
+				   struct ata_queued_cmd *qc)
+{
+	u8 status, host_stat;
+
+	switch (qc->tf.protocol) {
+
+	case ATA_PROT_DMA:
+	case ATA_PROT_ATAPI_DMA:
+	case ATA_PROT_ATAPI:
+		/* check status of DMA engine */
+		host_stat = ap->ops->bmdma_status(ap);
+		VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
+
+		/* if it's not our irq... */
+		if (!(host_stat & ATA_DMA_INTR))
+			goto idle_irq;
+
+		/* before we do anything else, clear DMA-Start bit */
+		ap->ops->bmdma_stop(qc);
+
+		/* fall through */
+
+	case ATA_PROT_ATAPI_NODATA:
+	case ATA_PROT_NODATA:
+		/* check altstatus */
+		status = ata_altstatus(ap);
+		if (status & ATA_BUSY)
+			goto idle_irq;
+
+		/* check main status, clearing INTRQ */
+		status = ata_chk_status(ap);
+		if (unlikely(status & ATA_BUSY))
+			goto idle_irq;
+		DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
+			ap->id, qc->tf.protocol, status);
+
+		/* ack bmdma irq events */
+		ap->ops->irq_clear(ap);
+
+		/* complete taskfile transaction */
+		ata_qc_complete(qc, ac_err_mask(status));
+		break;
+
+	default:
+		goto idle_irq;
+	}
+
+	return 1;	/* irq handled */
+
+idle_irq:
+	ap->stats.idle_irq++;
+
+#ifdef ATA_IRQ_TRAP
+	if ((ap->stats.idle_irq % 1000) == 0) {
+		handled = 1;
+		ata_irq_ack(ap, 0); /* debug trap */
+		printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
+	}
+#endif
+	return 0;	/* irq not handled */
+}
+
+/**
+ *	ata_interrupt - Default ATA host interrupt handler
+ *	@irq: irq line (unused)
+ *	@dev_instance: pointer to our ata_host_set information structure
+ *	@regs: unused
+ *
+ *	Default interrupt handler for PCI IDE devices.  Calls
+ *	ata_host_intr() for each port that is not disabled.
+ *
+ *	LOCKING:
+ *	Obtains host_set lock during operation.
+ *
+ *	RETURNS:
+ *	IRQ_NONE or IRQ_HANDLED.
+ */
+
+irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
+{
+	struct ata_host_set *host_set = dev_instance;
+	unsigned int i;
+	unsigned int handled = 0;
+	unsigned long flags;
+
+	/* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
+	spin_lock_irqsave(&host_set->lock, flags);
+
+	for (i = 0; i < host_set->n_ports; i++) {
+		struct ata_port *ap;
+
+		ap = host_set->ports[i];
+		if (ap &&
+		    !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
+			struct ata_queued_cmd *qc;
+
+			qc = ata_qc_from_tag(ap, ap->active_tag);
+			if (qc && (!(qc->tf.ctl & ATA_NIEN)) &&
+			    (qc->flags & ATA_QCFLAG_ACTIVE))
+				handled |= ata_host_intr(ap, qc);
+		}
+	}
+
+	spin_unlock_irqrestore(&host_set->lock, flags);
+
+	return IRQ_RETVAL(handled);
+}
+
+/**
+ *	atapi_packet_task - Write CDB bytes to hardware
+ *	@_data: Port to which ATAPI device is attached.
+ *
+ *	When device has indicated its readiness to accept
+ *	a CDB, this function is called.  Send the CDB.
+ *	If DMA is to be performed, exit immediately.
+ *	Otherwise, we are in polling mode, so poll
+ *	status under operation succeeds or fails.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ */
+
+static void atapi_packet_task(void *_data)
+{
+	struct ata_port *ap = _data;
+	struct ata_queued_cmd *qc;
+	u8 status;
+
+	qc = ata_qc_from_tag(ap, ap->active_tag);
+	assert(qc != NULL);
+	assert(qc->flags & ATA_QCFLAG_ACTIVE);
+
+	/* sleep-wait for BSY to clear */
+	DPRINTK("busy wait\n");
+	if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB))
+		goto err_out_status;
+
+	/* make sure DRQ is set */
+	status = ata_chk_status(ap);
+	if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)
+		goto err_out;
+
+	/* send SCSI cdb */
+	DPRINTK("send cdb\n");
+	assert(ap->cdb_len >= 12);
+
+	if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
+	    qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
+		unsigned long flags;
+
+		/* Once we're done issuing command and kicking bmdma,
+		 * irq handler takes over.  To not lose irq, we need
+		 * to clear NOINTR flag before sending cdb, but
+		 * interrupt handler shouldn't be invoked before we're
+		 * finished.  Hence, the following locking.
+		 */
+		spin_lock_irqsave(&ap->host_set->lock, flags);
+		ap->flags &= ~ATA_FLAG_NOINTR;
+		ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
+		if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
+			ap->ops->bmdma_start(qc);	/* initiate bmdma */
+		spin_unlock_irqrestore(&ap->host_set->lock, flags);
+	} else {
+		ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
+
+		/* PIO commands are handled by polling */
+		ap->hsm_task_state = HSM_ST;
+		queue_work(ata_wq, &ap->pio_task);
+	}
+
+	return;
+
+err_out_status:
+	status = ata_chk_status(ap);
+err_out:
+	ata_poll_qc_complete(qc, __ac_err_mask(status));
+}
+
+
+/**
+ *	ata_port_start - Set port up for dma.
+ *	@ap: Port to initialize
+ *
+ *	Called just after data structures for each port are
+ *	initialized.  Allocates space for PRD table.
+ *
+ *	May be used as the port_start() entry in ata_port_operations.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+
+int ata_port_start (struct ata_port *ap)
+{
+	struct device *dev = ap->host_set->dev;
+	int rc;
+
+	ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
+	if (!ap->prd)
+		return -ENOMEM;
+
+	rc = ata_pad_alloc(ap, dev);
+	if (rc) {
+		dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
+		return rc;
+	}
+
+	DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
+
+	return 0;
+}
+
+
+/**
+ *	ata_port_stop - Undo ata_port_start()
+ *	@ap: Port to shut down
+ *
+ *	Frees the PRD table.
+ *
+ *	May be used as the port_stop() entry in ata_port_operations.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+
+void ata_port_stop (struct ata_port *ap)
+{
+	struct device *dev = ap->host_set->dev;
+
+	dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
+	ata_pad_free(ap, dev);
+}
+
+void ata_host_stop (struct ata_host_set *host_set)
+{
+	if (host_set->mmio_base)
+		iounmap(host_set->mmio_base);
+}
+
+
+/**
+ *	ata_host_remove - Unregister SCSI host structure with upper layers
+ *	@ap: Port to unregister
+ *	@do_unregister: 1 if we fully unregister, 0 to just stop the port
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+
+static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
+{
+	struct Scsi_Host *sh = ap->host;
+
+	DPRINTK("ENTER\n");
+
+	if (do_unregister)
+		scsi_remove_host(sh);
+
+	ap->ops->port_stop(ap);
+}
+
+/**
+ *	ata_host_init - Initialize an ata_port structure
+ *	@ap: Structure to initialize
+ *	@host: associated SCSI mid-layer structure
+ *	@host_set: Collection of hosts to which @ap belongs
+ *	@ent: Probe information provided by low-level driver
+ *	@port_no: Port number associated with this ata_port
+ *
+ *	Initialize a new ata_port structure, and its associated
+ *	scsi_host.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+
+static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
+			  struct ata_host_set *host_set,
+			  const struct ata_probe_ent *ent, unsigned int port_no)
+{
+	unsigned int i;
+
+	host->max_id = 16;
+	host->max_lun = 1;
+	host->max_channel = 1;
+	host->unique_id = ata_unique_id++;
+	host->max_cmd_len = 12;
+
+	ap->flags = ATA_FLAG_PORT_DISABLED;
+	ap->id = host->unique_id;
+	ap->host = host;
+	ap->ctl = ATA_DEVCTL_OBS;
+	ap->host_set = host_set;
+	ap->port_no = port_no;
+	ap->hard_port_no =
+		ent->legacy_mode ? ent->hard_port_no : port_no;
+	ap->pio_mask = ent->pio_mask;
+	ap->mwdma_mask = ent->mwdma_mask;
+	ap->udma_mask = ent->udma_mask;
+	ap->flags |= ent->host_flags;
+	ap->ops = ent->port_ops;
+	ap->cbl = ATA_CBL_NONE;
+	ap->active_tag = ATA_TAG_POISON;
+	ap->last_ctl = 0xFF;
+
+	INIT_WORK(&ap->packet_task, atapi_packet_task, ap);
+	INIT_WORK(&ap->pio_task, ata_pio_task, ap);
+
+	for (i = 0; i < ATA_MAX_DEVICES; i++)
+		ap->device[i].devno = i;
+
+#ifdef ATA_IRQ_TRAP
+	ap->stats.unhandled_irq = 1;
+	ap->stats.idle_irq = 1;
+#endif
+
+	memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
+}
+
+/**
+ *	ata_host_add - Attach low-level ATA driver to system
+ *	@ent: Information provided by low-level driver
+ *	@host_set: Collections of ports to which we add
+ *	@port_no: Port number associated with this host
+ *
+ *	Attach low-level ATA driver to system.
+ *
+ *	LOCKING:
+ *	PCI/etc. bus probe sem.
+ *
+ *	RETURNS:
+ *	New ata_port on success, for NULL on error.
+ */
+
+static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
+				      struct ata_host_set *host_set,
+				      unsigned int port_no)
+{
+	struct Scsi_Host *host;
+	struct ata_port *ap;
+	int rc;
+
+	DPRINTK("ENTER\n");
+	host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
+	if (!host)
+		return NULL;
+
+	ap = (struct ata_port *) &host->hostdata[0];
+
+	ata_host_init(ap, host, host_set, ent, port_no);
+
+	rc = ap->ops->port_start(ap);
+	if (rc)
+		goto err_out;
+
+	return ap;
+
+err_out:
+	scsi_host_put(host);
+	return NULL;
+}
+
+/**
+ *	ata_device_add - Register hardware device with ATA and SCSI layers
+ *	@ent: Probe information describing hardware device to be registered
+ *
+ *	This function processes the information provided in the probe
+ *	information struct @ent, allocates the necessary ATA and SCSI
+ *	host information structures, initializes them, and registers
+ *	everything with requisite kernel subsystems.
+ *
+ *	This function requests irqs, probes the ATA bus, and probes
+ *	the SCSI bus.
+ *
+ *	LOCKING:
+ *	PCI/etc. bus probe sem.
+ *
+ *	RETURNS:
+ *	Number of ports registered.  Zero on error (no ports registered).
+ */
+
+int ata_device_add(const struct ata_probe_ent *ent)
+{
+	unsigned int count = 0, i;
+	struct device *dev = ent->dev;
+	struct ata_host_set *host_set;
+
+	DPRINTK("ENTER\n");
+	/* alloc a container for our list of ATA ports (buses) */
+	host_set = kzalloc(sizeof(struct ata_host_set) +
+			   (ent->n_ports * sizeof(void *)), GFP_KERNEL);
+	if (!host_set)
+		return 0;
+	spin_lock_init(&host_set->lock);
+
+	host_set->dev = dev;
+	host_set->n_ports = ent->n_ports;
+	host_set->irq = ent->irq;
+	host_set->mmio_base = ent->mmio_base;
+	host_set->private_data = ent->private_data;
+	host_set->ops = ent->port_ops;
+
+	/* register each port bound to this device */
+	for (i = 0; i < ent->n_ports; i++) {
+		struct ata_port *ap;
+		unsigned long xfer_mode_mask;
+
+		ap = ata_host_add(ent, host_set, i);
+		if (!ap)
+			goto err_out;
+
+		host_set->ports[i] = ap;
+		xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
+				(ap->mwdma_mask << ATA_SHIFT_MWDMA) |
+				(ap->pio_mask << ATA_SHIFT_PIO);
+
+		/* print per-port info to dmesg */
+		printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
+				 "bmdma 0x%lX irq %lu\n",
+			ap->id,
+			ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
+			ata_mode_string(xfer_mode_mask),
+	       		ap->ioaddr.cmd_addr,
+	       		ap->ioaddr.ctl_addr,
+	       		ap->ioaddr.bmdma_addr,
+	       		ent->irq);
+
+		ata_chk_status(ap);
+		host_set->ops->irq_clear(ap);
+		count++;
+	}
+
+	if (!count)
+		goto err_free_ret;
+
+	/* obtain irq, that is shared between channels */
+	if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
+			DRV_NAME, host_set))
+		goto err_out;
+
+	/* perform each probe synchronously */
+	DPRINTK("probe begin\n");
+	for (i = 0; i < count; i++) {
+		struct ata_port *ap;
+		int rc;
+
+		ap = host_set->ports[i];
+
+		DPRINTK("ata%u: probe begin\n", ap->id);
+		rc = ata_bus_probe(ap);
+		DPRINTK("ata%u: probe end\n", ap->id);
+
+		if (rc) {
+			/* FIXME: do something useful here?
+			 * Current libata behavior will
+			 * tear down everything when
+			 * the module is removed
+			 * or the h/w is unplugged.
+			 */
+		}
+
+		rc = scsi_add_host(ap->host, dev);
+		if (rc) {
+			printk(KERN_ERR "ata%u: scsi_add_host failed\n",
+			       ap->id);
+			/* FIXME: do something useful here */
+			/* FIXME: handle unconditional calls to
+			 * scsi_scan_host and ata_host_remove, below,
+			 * at the very least
+			 */
+		}
+	}
+
+	/* probes are done, now scan each port's disk(s) */
+	DPRINTK("probe begin\n");
+	for (i = 0; i < count; i++) {
+		struct ata_port *ap = host_set->ports[i];
+
+		ata_scsi_scan_host(ap);
+	}
+
+	dev_set_drvdata(dev, host_set);
+
+	VPRINTK("EXIT, returning %u\n", ent->n_ports);
+	return ent->n_ports; /* success */
+
+err_out:
+	for (i = 0; i < count; i++) {
+		ata_host_remove(host_set->ports[i], 1);
+		scsi_host_put(host_set->ports[i]->host);
+	}
+err_free_ret:
+	kfree(host_set);
+	VPRINTK("EXIT, returning 0\n");
+	return 0;
+}
+
+/**
+ *	ata_host_set_remove - PCI layer callback for device removal
+ *	@host_set: ATA host set that was removed
+ *
+ *	Unregister all objects associated with this host set. Free those 
+ *	objects.
+ *
+ *	LOCKING:
+ *	Inherited from calling layer (may sleep).
+ */
+
+void ata_host_set_remove(struct ata_host_set *host_set)
+{
+	struct ata_port *ap;
+	unsigned int i;
+
+	for (i = 0; i < host_set->n_ports; i++) {
+		ap = host_set->ports[i];
+		scsi_remove_host(ap->host);
+	}
+
+	free_irq(host_set->irq, host_set);
+
+	for (i = 0; i < host_set->n_ports; i++) {
+		ap = host_set->ports[i];
+
+		ata_scsi_release(ap->host);
+
+		if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
+			struct ata_ioports *ioaddr = &ap->ioaddr;
+
+			if (ioaddr->cmd_addr == 0x1f0)
+				release_region(0x1f0, 8);
+			else if (ioaddr->cmd_addr == 0x170)
+				release_region(0x170, 8);
+		}
+
+		scsi_host_put(ap->host);
+	}
+
+	if (host_set->ops->host_stop)
+		host_set->ops->host_stop(host_set);
+
+	kfree(host_set);
+}
+
+/**
+ *	ata_scsi_release - SCSI layer callback hook for host unload
+ *	@host: libata host to be unloaded
+ *
+ *	Performs all duties necessary to shut down a libata port...
+ *	Kill port kthread, disable port, and release resources.
+ *
+ *	LOCKING:
+ *	Inherited from SCSI layer.
+ *
+ *	RETURNS:
+ *	One.
+ */
+
+int ata_scsi_release(struct Scsi_Host *host)
+{
+	struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
+
+	DPRINTK("ENTER\n");
+
+	ap->ops->port_disable(ap);
+	ata_host_remove(ap, 0);
+
+	DPRINTK("EXIT\n");
+	return 1;
+}
+
+/**
+ *	ata_std_ports - initialize ioaddr with standard port offsets.
+ *	@ioaddr: IO address structure to be initialized
+ *
+ *	Utility function which initializes data_addr, error_addr,
+ *	feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
+ *	device_addr, status_addr, and command_addr to standard offsets
+ *	relative to cmd_addr.
+ *
+ *	Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
+ */
+
+void ata_std_ports(struct ata_ioports *ioaddr)
+{
+	ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
+	ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
+	ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
+	ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
+	ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
+	ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
+	ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
+	ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
+	ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
+	ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
+}
+
+static struct ata_probe_ent *
+ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
+{
+	struct ata_probe_ent *probe_ent;
+
+	probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
+	if (!probe_ent) {
+		printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
+		       kobject_name(&(dev->kobj)));
+		return NULL;
+	}
+
+	INIT_LIST_HEAD(&probe_ent->node);
+	probe_ent->dev = dev;
+
+	probe_ent->sht = port->sht;
+	probe_ent->host_flags = port->host_flags;
+	probe_ent->pio_mask = port->pio_mask;
+	probe_ent->mwdma_mask = port->mwdma_mask;
+	probe_ent->udma_mask = port->udma_mask;
+	probe_ent->port_ops = port->port_ops;
+
+	return probe_ent;
+}
+
+
+
+#ifdef CONFIG_PCI
+
+void ata_pci_host_stop (struct ata_host_set *host_set)
+{
+	struct pci_dev *pdev = to_pci_dev(host_set->dev);
+
+	pci_iounmap(pdev, host_set->mmio_base);
+}
+
+/**
+ *	ata_pci_init_native_mode - Initialize native-mode driver
+ *	@pdev:  pci device to be initialized
+ *	@port:  array[2] of pointers to port info structures.
+ *	@ports: bitmap of ports present
+ *
+ *	Utility function which allocates and initializes an
+ *	ata_probe_ent structure for a standard dual-port
+ *	PIO-based IDE controller.  The returned ata_probe_ent
+ *	structure can be passed to ata_device_add().  The returned
+ *	ata_probe_ent structure should then be freed with kfree().
+ *
+ *	The caller need only pass the address of the primary port, the
+ *	secondary will be deduced automatically. If the device has non
+ *	standard secondary port mappings this function can be called twice,
+ *	once for each interface.
+ */
+
+struct ata_probe_ent *
+ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
+{
+	struct ata_probe_ent *probe_ent =
+		ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
+	int p = 0;
+
+	if (!probe_ent)
+		return NULL;
+
+	probe_ent->irq = pdev->irq;
+	probe_ent->irq_flags = SA_SHIRQ;
+	probe_ent->private_data = port[0]->private_data;
+
+	if (ports & ATA_PORT_PRIMARY) {
+		probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
+		probe_ent->port[p].altstatus_addr =
+		probe_ent->port[p].ctl_addr =
+			pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
+		probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4);
+		ata_std_ports(&probe_ent->port[p]);
+		p++;
+	}
+
+	if (ports & ATA_PORT_SECONDARY) {
+		probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
+		probe_ent->port[p].altstatus_addr =
+		probe_ent->port[p].ctl_addr =
+			pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
+		probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4) + 8;
+		ata_std_ports(&probe_ent->port[p]);
+		p++;
+	}
+
+	probe_ent->n_ports = p;
+	return probe_ent;
+}
+
+static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, struct ata_port_info *port, int port_num)
+{
+	struct ata_probe_ent *probe_ent;
+
+	probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port);
+	if (!probe_ent)
+		return NULL;
+
+	probe_ent->legacy_mode = 1;
+	probe_ent->n_ports = 1;
+	probe_ent->hard_port_no = port_num;
+	probe_ent->private_data = port->private_data;
+
+	switch(port_num)
+	{
+		case 0:
+			probe_ent->irq = 14;
+			probe_ent->port[0].cmd_addr = 0x1f0;
+			probe_ent->port[0].altstatus_addr =
+			probe_ent->port[0].ctl_addr = 0x3f6;
+			break;
+		case 1:
+			probe_ent->irq = 15;
+			probe_ent->port[0].cmd_addr = 0x170;
+			probe_ent->port[0].altstatus_addr =
+			probe_ent->port[0].ctl_addr = 0x376;
+			break;
+	}
+	probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4) + 8 * port_num;
+	ata_std_ports(&probe_ent->port[0]);
+	return probe_ent;
+}
+
+/**
+ *	ata_pci_init_one - Initialize/register PCI IDE host controller
+ *	@pdev: Controller to be initialized
+ *	@port_info: Information from low-level host driver
+ *	@n_ports: Number of ports attached to host controller
+ *
+ *	This is a helper function which can be called from a driver's
+ *	xxx_init_one() probe function if the hardware uses traditional
+ *	IDE taskfile registers.
+ *
+ *	This function calls pci_enable_device(), reserves its register
+ *	regions, sets the dma mask, enables bus master mode, and calls
+ *	ata_device_add()
+ *
+ *	LOCKING:
+ *	Inherited from PCI layer (may sleep).
+ *
+ *	RETURNS:
+ *	Zero on success, negative on errno-based value on error.
+ */
+
+int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
+		      unsigned int n_ports)
+{
+	struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL;
+	struct ata_port_info *port[2];
+	u8 tmp8, mask;
+	unsigned int legacy_mode = 0;
+	int disable_dev_on_err = 1;
+	int rc;
+
+	DPRINTK("ENTER\n");
+
+	port[0] = port_info[0];
+	if (n_ports > 1)
+		port[1] = port_info[1];
+	else
+		port[1] = port[0];
+
+	if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
+	    && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
+		/* TODO: What if one channel is in native mode ... */
+		pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
+		mask = (1 << 2) | (1 << 0);
+		if ((tmp8 & mask) != mask)
+			legacy_mode = (1 << 3);
+	}
+
+	/* FIXME... */
+	if ((!legacy_mode) && (n_ports > 2)) {
+		printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
+		n_ports = 2;
+		/* For now */
+	}
+
+	/* FIXME: Really for ATA it isn't safe because the device may be
+	   multi-purpose and we want to leave it alone if it was already
+	   enabled. Secondly for shared use as Arjan says we want refcounting
+	   
+	   Checking dev->is_enabled is insufficient as this is not set at
+	   boot for the primary video which is BIOS enabled
+         */
+         
+	rc = pci_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	rc = pci_request_regions(pdev, DRV_NAME);
+	if (rc) {
+		disable_dev_on_err = 0;
+		goto err_out;
+	}
+
+	/* FIXME: Should use platform specific mappers for legacy port ranges */
+	if (legacy_mode) {
+		if (!request_region(0x1f0, 8, "libata")) {
+			struct resource *conflict, res;
+			res.start = 0x1f0;
+			res.end = 0x1f0 + 8 - 1;
+			conflict = ____request_resource(&ioport_resource, &res);
+			if (!strcmp(conflict->name, "libata"))
+				legacy_mode |= (1 << 0);
+			else {
+				disable_dev_on_err = 0;
+				printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
+			}
+		} else
+			legacy_mode |= (1 << 0);
+
+		if (!request_region(0x170, 8, "libata")) {
+			struct resource *conflict, res;
+			res.start = 0x170;
+			res.end = 0x170 + 8 - 1;
+			conflict = ____request_resource(&ioport_resource, &res);
+			if (!strcmp(conflict->name, "libata"))
+				legacy_mode |= (1 << 1);
+			else {
+				disable_dev_on_err = 0;
+				printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
+			}
+		} else
+			legacy_mode |= (1 << 1);
+	}
+
+	/* we have legacy mode, but all ports are unavailable */
+	if (legacy_mode == (1 << 3)) {
+		rc = -EBUSY;
+		goto err_out_regions;
+	}
+
+	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
+	if (rc)
+		goto err_out_regions;
+	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
+	if (rc)
+		goto err_out_regions;
+
+	if (legacy_mode) {
+		if (legacy_mode & (1 << 0))
+			probe_ent = ata_pci_init_legacy_port(pdev, port[0], 0);
+		if (legacy_mode & (1 << 1))
+			probe_ent2 = ata_pci_init_legacy_port(pdev, port[1], 1);
+	} else {
+		if (n_ports == 2)
+			probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
+		else
+			probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
+	}
+	if (!probe_ent && !probe_ent2) {
+		rc = -ENOMEM;
+		goto err_out_regions;
+	}
+
+	pci_set_master(pdev);
+
+	/* FIXME: check ata_device_add return */
+	if (legacy_mode) {
+		if (legacy_mode & (1 << 0))
+			ata_device_add(probe_ent);
+		if (legacy_mode & (1 << 1))
+			ata_device_add(probe_ent2);
+	} else
+		ata_device_add(probe_ent);
+
+	kfree(probe_ent);
+	kfree(probe_ent2);
+
+	return 0;
+
+err_out_regions:
+	if (legacy_mode & (1 << 0))
+		release_region(0x1f0, 8);
+	if (legacy_mode & (1 << 1))
+		release_region(0x170, 8);
+	pci_release_regions(pdev);
+err_out:
+	if (disable_dev_on_err)
+		pci_disable_device(pdev);
+	return rc;
+}
+
+/**
+ *	ata_pci_remove_one - PCI layer callback for device removal
+ *	@pdev: PCI device that was removed
+ *
+ *	PCI layer indicates to libata via this hook that
+ *	hot-unplug or module unload event has occurred.
+ *	Handle this by unregistering all objects associated
+ *	with this PCI device.  Free those objects.  Then finally
+ *	release PCI resources and disable device.
+ *
+ *	LOCKING:
+ *	Inherited from PCI layer (may sleep).
+ */
+
+void ata_pci_remove_one (struct pci_dev *pdev)
+{
+	struct device *dev = pci_dev_to_dev(pdev);
+	struct ata_host_set *host_set = dev_get_drvdata(dev);
+
+	ata_host_set_remove(host_set);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	dev_set_drvdata(dev, NULL);
+}
+
+/* move to PCI subsystem */
+int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
+{
+	unsigned long tmp = 0;
+
+	switch (bits->width) {
+	case 1: {
+		u8 tmp8 = 0;
+		pci_read_config_byte(pdev, bits->reg, &tmp8);
+		tmp = tmp8;
+		break;
+	}
+	case 2: {
+		u16 tmp16 = 0;
+		pci_read_config_word(pdev, bits->reg, &tmp16);
+		tmp = tmp16;
+		break;
+	}
+	case 4: {
+		u32 tmp32 = 0;
+		pci_read_config_dword(pdev, bits->reg, &tmp32);
+		tmp = tmp32;
+		break;
+	}
+
+	default:
+		return -EINVAL;
+	}
+
+	tmp &= bits->mask;
+
+	return (tmp == bits->val) ? 1 : 0;
+}
+#endif /* CONFIG_PCI */
+
+
+static int __init ata_init(void)
+{
+	ata_wq = create_workqueue("ata");
+	if (!ata_wq)
+		return -ENOMEM;
+
+	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
+	return 0;
+}
+
+static void __exit ata_exit(void)
+{
+	destroy_workqueue(ata_wq);
+}
+
+module_init(ata_init);
+module_exit(ata_exit);
+
+static unsigned long ratelimit_time;
+static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
+
+int ata_ratelimit(void)
+{
+	int rc;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ata_ratelimit_lock, flags);
+
+	if (time_after(jiffies, ratelimit_time)) {
+		rc = 1;
+		ratelimit_time = jiffies + (HZ/5);
+	} else
+		rc = 0;
+
+	spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
+
+	return rc;
+}
+
+/*
+ * libata is essentially a library of internal helper functions for
+ * low-level ATA host controller drivers.  As such, the API/ABI is
+ * likely to change as new drivers are added and updated.
+ * Do not depend on ABI/API stability.
+ */
+
+EXPORT_SYMBOL_GPL(ata_std_bios_param);
+EXPORT_SYMBOL_GPL(ata_std_ports);
+EXPORT_SYMBOL_GPL(ata_device_add);
+EXPORT_SYMBOL_GPL(ata_host_set_remove);
+EXPORT_SYMBOL_GPL(ata_sg_init);
+EXPORT_SYMBOL_GPL(ata_sg_init_one);
+EXPORT_SYMBOL_GPL(ata_qc_complete);
+EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
+EXPORT_SYMBOL_GPL(ata_eng_timeout);
+EXPORT_SYMBOL_GPL(ata_tf_load);
+EXPORT_SYMBOL_GPL(ata_tf_read);
+EXPORT_SYMBOL_GPL(ata_noop_dev_select);
+EXPORT_SYMBOL_GPL(ata_std_dev_select);
+EXPORT_SYMBOL_GPL(ata_tf_to_fis);
+EXPORT_SYMBOL_GPL(ata_tf_from_fis);
+EXPORT_SYMBOL_GPL(ata_check_status);
+EXPORT_SYMBOL_GPL(ata_altstatus);
+EXPORT_SYMBOL_GPL(ata_exec_command);
+EXPORT_SYMBOL_GPL(ata_port_start);
+EXPORT_SYMBOL_GPL(ata_port_stop);
+EXPORT_SYMBOL_GPL(ata_host_stop);
+EXPORT_SYMBOL_GPL(ata_interrupt);
+EXPORT_SYMBOL_GPL(ata_qc_prep);
+EXPORT_SYMBOL_GPL(ata_bmdma_setup);
+EXPORT_SYMBOL_GPL(ata_bmdma_start);
+EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
+EXPORT_SYMBOL_GPL(ata_bmdma_status);
+EXPORT_SYMBOL_GPL(ata_bmdma_stop);
+EXPORT_SYMBOL_GPL(ata_port_probe);
+EXPORT_SYMBOL_GPL(sata_phy_reset);
+EXPORT_SYMBOL_GPL(__sata_phy_reset);
+EXPORT_SYMBOL_GPL(ata_bus_reset);
+EXPORT_SYMBOL_GPL(ata_port_disable);
+EXPORT_SYMBOL_GPL(ata_ratelimit);
+EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
+EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
+EXPORT_SYMBOL_GPL(ata_scsi_error);
+EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
+EXPORT_SYMBOL_GPL(ata_scsi_release);
+EXPORT_SYMBOL_GPL(ata_host_intr);
+EXPORT_SYMBOL_GPL(ata_dev_classify);
+EXPORT_SYMBOL_GPL(ata_dev_id_string);
+EXPORT_SYMBOL_GPL(ata_dev_config);
+EXPORT_SYMBOL_GPL(ata_scsi_simulate);
+
+EXPORT_SYMBOL_GPL(ata_timing_compute);
+EXPORT_SYMBOL_GPL(ata_timing_merge);
+
+#ifdef CONFIG_PCI
+EXPORT_SYMBOL_GPL(pci_test_config_bits);
+EXPORT_SYMBOL_GPL(ata_pci_host_stop);
+EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
+EXPORT_SYMBOL_GPL(ata_pci_init_one);
+EXPORT_SYMBOL_GPL(ata_pci_remove_one);
+#endif /* CONFIG_PCI */
diff -urN oldtree/drivers/scsi/sr.c newtree/drivers/scsi/sr.c
--- oldtree/drivers/scsi/sr.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/scsi/sr.c	2006-01-29 11:18:20.609155728 +0000
@@ -66,7 +66,7 @@
 #define SR_CAPABILITIES \
 	(CDC_CLOSE_TRAY|CDC_OPEN_TRAY|CDC_LOCK|CDC_SELECT_SPEED| \
 	 CDC_SELECT_DISC|CDC_MULTI_SESSION|CDC_MCN|CDC_MEDIA_CHANGED| \
-	 CDC_PLAY_AUDIO|CDC_RESET|CDC_IOCTLS|CDC_DRIVE_STATUS| \
+	 CDC_PLAY_AUDIO|CDC_RESET|CDC_DRIVE_STATUS| \
 	 CDC_CD_R|CDC_CD_RW|CDC_DVD|CDC_DVD_R|CDC_DVD_RAM|CDC_GENERIC_PACKET| \
 	 CDC_MRW|CDC_MRW_W|CDC_RAM)
 
@@ -113,7 +113,6 @@
 	.get_mcn		= sr_get_mcn,
 	.reset			= sr_reset,
 	.audio_ioctl		= sr_audio_ioctl,
-	.dev_ioctl		= sr_dev_ioctl,
 	.capability		= SR_CAPABILITIES,
 	.generic_packet		= sr_packet,
 };
@@ -472,17 +471,33 @@
 {
 	struct scsi_cd *cd = scsi_cd(inode->i_bdev->bd_disk);
 	struct scsi_device *sdev = cd->device;
+	void __user *argp = (void __user *)arg;
+	int ret;
 
-        /*
-         * Send SCSI addressing ioctls directly to mid level, send other
-         * ioctls to cdrom/block level.
-         */
-        switch (cmd) {
-                case SCSI_IOCTL_GET_IDLUN:
-                case SCSI_IOCTL_GET_BUS_NUMBER:
-                        return scsi_ioctl(sdev, cmd, (void __user *)arg);
+	/*
+	 * Send SCSI addressing ioctls directly to mid level, send other
+	 * ioctls to cdrom/block level.
+	 */
+	switch (cmd) {
+	case SCSI_IOCTL_GET_IDLUN:
+	case SCSI_IOCTL_GET_BUS_NUMBER:
+		return scsi_ioctl(sdev, cmd, argp);
 	}
-	return cdrom_ioctl(file, &cd->cdi, inode, cmd, arg);
+
+	ret = cdrom_ioctl(file, &cd->cdi, inode, cmd, arg);
+	if (ret != ENOSYS)
+		return ret;
+
+	/*
+	 * ENODEV means that we didn't recognise the ioctl, or that we
+	 * cannot execute it in the current device state.  In either
+	 * case fall through to scsi_ioctl, which will return ENDOEV again
+	 * if it doesn't recognise the ioctl
+	 */
+	ret = scsi_nonblockable_ioctl(sdev, cmd, argp, NULL);
+	if (ret != -ENODEV)
+		return ret;
+	return scsi_ioctl(sdev, cmd, argp);
 }
 
 static int sr_block_media_changed(struct gendisk *disk)
diff -urN oldtree/drivers/scsi/sr.c.orig newtree/drivers/scsi/sr.c.orig
--- oldtree/drivers/scsi/sr.c.orig	1970-01-01 00:00:00.000000000 +0000
+++ newtree/drivers/scsi/sr.c.orig	2006-01-28 19:08:42.000000000 +0000
@@ -0,0 +1,902 @@
+/*
+ *  sr.c Copyright (C) 1992 David Giller
+ *           Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
+ *
+ *  adapted from:
+ *      sd.c Copyright (C) 1992 Drew Eckhardt
+ *      Linux scsi disk driver by
+ *              Drew Eckhardt <drew@colorado.edu>
+ *
+ *	Modified by Eric Youngdale ericy@andante.org to
+ *	add scatter-gather, multiple outstanding request, and other
+ *	enhancements.
+ *
+ *      Modified by Eric Youngdale eric@andante.org to support loadable
+ *      low-level scsi drivers.
+ *
+ *      Modified by Thomas Quinot thomas@melchior.cuivre.fdn.fr to
+ *      provide auto-eject.
+ *
+ *      Modified by Gerd Knorr <kraxel@cs.tu-berlin.de> to support the
+ *      generic cdrom interface
+ *
+ *      Modified by Jens Axboe <axboe@suse.de> - Uniform sr_packet()
+ *      interface, capabilities probe additions, ioctl cleanups, etc.
+ *
+ *	Modified by Richard Gooch <rgooch@atnf.csiro.au> to support devfs
+ *
+ *	Modified by Jens Axboe <axboe@suse.de> - support DVD-RAM
+ *	transparently and lose the GHOST hack
+ *
+ *	Modified by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ *	check resource allocation in sr_init and some cleanups
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/bio.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/cdrom.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <asm/uaccess.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_ioctl.h>	/* For the door lock/unlock commands */
+
+#include "scsi_logging.h"
+#include "sr.h"
+
+
+#define SR_DISKS	256
+
+#define MAX_RETRIES	3
+#define SR_TIMEOUT	(30 * HZ)
+#define SR_CAPABILITIES \
+	(CDC_CLOSE_TRAY|CDC_OPEN_TRAY|CDC_LOCK|CDC_SELECT_SPEED| \
+	 CDC_SELECT_DISC|CDC_MULTI_SESSION|CDC_MCN|CDC_MEDIA_CHANGED| \
+	 CDC_PLAY_AUDIO|CDC_RESET|CDC_IOCTLS|CDC_DRIVE_STATUS| \
+	 CDC_CD_R|CDC_CD_RW|CDC_DVD|CDC_DVD_R|CDC_DVD_RAM|CDC_GENERIC_PACKET| \
+	 CDC_MRW|CDC_MRW_W|CDC_RAM)
+
+static int sr_probe(struct device *);
+static int sr_remove(struct device *);
+static int sr_init_command(struct scsi_cmnd *);
+
+static struct scsi_driver sr_template = {
+	.owner			= THIS_MODULE,
+	.gendrv = {
+		.name   	= "sr",
+		.probe		= sr_probe,
+		.remove		= sr_remove,
+	},
+	.init_command		= sr_init_command,
+};
+
+static unsigned long sr_index_bits[SR_DISKS / BITS_PER_LONG];
+static DEFINE_SPINLOCK(sr_index_lock);
+
+/* This semaphore is used to mediate the 0->1 reference get in the
+ * face of object destruction (i.e. we can't allow a get on an
+ * object after last put) */
+static DECLARE_MUTEX(sr_ref_sem);
+
+static int sr_open(struct cdrom_device_info *, int);
+static void sr_release(struct cdrom_device_info *);
+
+static void get_sectorsize(struct scsi_cd *);
+static void get_capabilities(struct scsi_cd *);
+
+static int sr_media_change(struct cdrom_device_info *, int);
+static int sr_packet(struct cdrom_device_info *, struct packet_command *);
+
+static struct cdrom_device_ops sr_dops = {
+	.open			= sr_open,
+	.release	 	= sr_release,
+	.drive_status	 	= sr_drive_status,
+	.media_changed		= sr_media_change,
+	.tray_move		= sr_tray_move,
+	.lock_door		= sr_lock_door,
+	.select_speed		= sr_select_speed,
+	.get_last_session	= sr_get_last_session,
+	.get_mcn		= sr_get_mcn,
+	.reset			= sr_reset,
+	.audio_ioctl		= sr_audio_ioctl,
+	.dev_ioctl		= sr_dev_ioctl,
+	.capability		= SR_CAPABILITIES,
+	.generic_packet		= sr_packet,
+};
+
+static void sr_kref_release(struct kref *kref);
+
+static inline struct scsi_cd *scsi_cd(struct gendisk *disk)
+{
+	return container_of(disk->private_data, struct scsi_cd, driver);
+}
+
+/*
+ * The get and put routines for the struct scsi_cd.  Note this entity
+ * has a scsi_device pointer and owns a reference to this.
+ */
+static inline struct scsi_cd *scsi_cd_get(struct gendisk *disk)
+{
+	struct scsi_cd *cd = NULL;
+
+	down(&sr_ref_sem);
+	if (disk->private_data == NULL)
+		goto out;
+	cd = scsi_cd(disk);
+	kref_get(&cd->kref);
+	if (scsi_device_get(cd->device))
+		goto out_put;
+	goto out;
+
+ out_put:
+	kref_put(&cd->kref, sr_kref_release);
+	cd = NULL;
+ out:
+	up(&sr_ref_sem);
+	return cd;
+}
+
+static inline void scsi_cd_put(struct scsi_cd *cd)
+{
+	struct scsi_device *sdev = cd->device;
+
+	down(&sr_ref_sem);
+	kref_put(&cd->kref, sr_kref_release);
+	scsi_device_put(sdev);
+	up(&sr_ref_sem);
+}
+
+/*
+ * This function checks to see if the media has been changed in the
+ * CDROM drive.  It is possible that we have already sensed a change,
+ * or the drive may have sensed one and not yet reported it.  We must
+ * be ready for either case. This function always reports the current
+ * value of the changed bit.  If flag is 0, then the changed bit is reset.
+ * This function could be done as an ioctl, but we would need to have
+ * an inode for that to work, and we do not always have one.
+ */
+
+int sr_media_change(struct cdrom_device_info *cdi, int slot)
+{
+	struct scsi_cd *cd = cdi->handle;
+	int retval;
+
+	if (CDSL_CURRENT != slot) {
+		/* no changer support */
+		return -EINVAL;
+	}
+
+	retval = scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES);
+	if (retval) {
+		/* Unable to test, unit probably not ready.  This usually
+		 * means there is no disc in the drive.  Mark as changed,
+		 * and we will figure it out later once the drive is
+		 * available again.  */
+		cd->device->changed = 1;
+		return 1;	/* This will force a flush, if called from
+				 * check_disk_change */
+	};
+
+	retval = cd->device->changed;
+	cd->device->changed = 0;
+	/* If the disk changed, the capacity will now be different,
+	 * so we force a re-read of this information */
+	if (retval) {
+		/* check multisession offset etc */
+		sr_cd_check(cdi);
+
+		get_sectorsize(cd);
+	}
+	return retval;
+}
+ 
+/*
+ * rw_intr is the interrupt routine for the device driver.
+ *
+ * It will be notified on the end of a SCSI read / write, and will take on
+ * of several actions based on success or failure.
+ */
+static void rw_intr(struct scsi_cmnd * SCpnt)
+{
+	int result = SCpnt->result;
+	int this_count = SCpnt->bufflen;
+	int good_bytes = (result == 0 ? this_count : 0);
+	int block_sectors = 0;
+	long error_sector;
+	struct scsi_cd *cd = scsi_cd(SCpnt->request->rq_disk);
+
+#ifdef DEBUG
+	printk("sr.c done: %x\n", result);
+#endif
+
+	/*
+	 * Handle MEDIUM ERRORs or VOLUME OVERFLOWs that indicate partial
+	 * success.  Since this is a relatively rare error condition, no
+	 * care is taken to avoid unnecessary additional work such as
+	 * memcpy's that could be avoided.
+	 */
+	if (driver_byte(result) != 0 &&		/* An error occurred */
+	    (SCpnt->sense_buffer[0] & 0x7f) == 0x70) { /* Sense current */
+		switch (SCpnt->sense_buffer[2]) {
+		case MEDIUM_ERROR:
+		case VOLUME_OVERFLOW:
+		case ILLEGAL_REQUEST:
+			if (!(SCpnt->sense_buffer[0] & 0x90))
+				break;
+			if (!blk_fs_request(SCpnt->request))
+				break;
+			error_sector = (SCpnt->sense_buffer[3] << 24) |
+				(SCpnt->sense_buffer[4] << 16) |
+				(SCpnt->sense_buffer[5] << 8) |
+				SCpnt->sense_buffer[6];
+			if (SCpnt->request->bio != NULL)
+				block_sectors =
+					bio_sectors(SCpnt->request->bio);
+			if (block_sectors < 4)
+				block_sectors = 4;
+			if (cd->device->sector_size == 2048)
+				error_sector <<= 2;
+			error_sector &= ~(block_sectors - 1);
+			good_bytes = (error_sector - SCpnt->request->sector) << 9;
+			if (good_bytes < 0 || good_bytes >= this_count)
+				good_bytes = 0;
+			/*
+			 * The SCSI specification allows for the value
+			 * returned by READ CAPACITY to be up to 75 2K
+			 * sectors past the last readable block.
+			 * Therefore, if we hit a medium error within the
+			 * last 75 2K sectors, we decrease the saved size
+			 * value.
+			 */
+			if (error_sector < get_capacity(cd->disk) &&
+			    cd->capacity - error_sector < 4 * 75)
+				set_capacity(cd->disk, error_sector);
+			break;
+
+		case RECOVERED_ERROR:
+
+			/*
+			 * An error occured, but it recovered.  Inform the
+			 * user, but make sure that it's not treated as a
+			 * hard error.
+			 */
+			scsi_print_sense("sr", SCpnt);
+			SCpnt->result = 0;
+			SCpnt->sense_buffer[0] = 0x0;
+			good_bytes = this_count;
+			break;
+
+		default:
+			break;
+		}
+	}
+
+	/*
+	 * This calls the generic completion function, now that we know
+	 * how many actual sectors finished, and how many sectors we need
+	 * to say have failed.
+	 */
+	scsi_io_completion(SCpnt, good_bytes, block_sectors << 9);
+}
+
+static int sr_init_command(struct scsi_cmnd * SCpnt)
+{
+	int block=0, this_count, s_size, timeout = SR_TIMEOUT;
+	struct scsi_cd *cd = scsi_cd(SCpnt->request->rq_disk);
+
+	SCSI_LOG_HLQUEUE(1, printk("Doing sr request, dev = %s, block = %d\n",
+				cd->disk->disk_name, block));
+
+	if (!cd->device || !scsi_device_online(cd->device)) {
+		SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n",
+					SCpnt->request->nr_sectors));
+		SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt));
+		return 0;
+	}
+
+	if (cd->device->changed) {
+		/*
+		 * quietly refuse to do anything to a changed disc until the
+		 * changed bit has been reset
+		 */
+		return 0;
+	}
+
+	/*
+	 * these are already setup, just copy cdb basically
+	 */
+	if (SCpnt->request->flags & REQ_BLOCK_PC) {
+		scsi_setup_blk_pc_cmnd(SCpnt, MAX_RETRIES);
+
+		if (SCpnt->timeout_per_command)
+			timeout = SCpnt->timeout_per_command;
+
+		goto queue;
+	}
+
+	if (!(SCpnt->request->flags & REQ_CMD)) {
+		blk_dump_rq_flags(SCpnt->request, "sr unsup command");
+		return 0;
+	}
+
+	/*
+	 * we do lazy blocksize switching (when reading XA sectors,
+	 * see CDROMREADMODE2 ioctl) 
+	 */
+	s_size = cd->device->sector_size;
+	if (s_size > 2048) {
+		if (!in_interrupt())
+			sr_set_blocklength(cd, 2048);
+		else
+			printk("sr: can't switch blocksize: in interrupt\n");
+	}
+
+	if (s_size != 512 && s_size != 1024 && s_size != 2048) {
+		scmd_printk(KERN_ERR, SCpnt, "bad sector size %d\n", s_size);
+		return 0;
+	}
+
+	if (rq_data_dir(SCpnt->request) == WRITE) {
+		if (!cd->device->writeable)
+			return 0;
+		SCpnt->cmnd[0] = WRITE_10;
+		SCpnt->sc_data_direction = DMA_TO_DEVICE;
+ 	 	cd->cdi.media_written = 1;
+	} else if (rq_data_dir(SCpnt->request) == READ) {
+		SCpnt->cmnd[0] = READ_10;
+		SCpnt->sc_data_direction = DMA_FROM_DEVICE;
+	} else {
+		blk_dump_rq_flags(SCpnt->request, "Unknown sr command");
+		return 0;
+	}
+
+	{
+		struct scatterlist *sg = SCpnt->request_buffer;
+		int i, size = 0;
+		for (i = 0; i < SCpnt->use_sg; i++)
+			size += sg[i].length;
+
+		if (size != SCpnt->request_bufflen && SCpnt->use_sg) {
+			scmd_printk(KERN_ERR, SCpnt,
+				"mismatch count %d, bytes %d\n",
+				size, SCpnt->request_bufflen);
+			if (SCpnt->request_bufflen > size)
+				SCpnt->request_bufflen = SCpnt->bufflen = size;
+		}
+	}
+
+	/*
+	 * request doesn't start on hw block boundary, add scatter pads
+	 */
+	if (((unsigned int)SCpnt->request->sector % (s_size >> 9)) ||
+	    (SCpnt->request_bufflen % s_size)) {
+		scmd_printk(KERN_NOTICE, SCpnt, "unaligned transfer\n");
+		return 0;
+	}
+
+	this_count = (SCpnt->request_bufflen >> 9) / (s_size >> 9);
+
+
+	SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%ld 512 byte blocks.\n",
+				cd->cdi.name,
+				(rq_data_dir(SCpnt->request) == WRITE) ?
+					"writing" : "reading",
+				this_count, SCpnt->request->nr_sectors));
+
+	SCpnt->cmnd[1] = 0;
+	block = (unsigned int)SCpnt->request->sector / (s_size >> 9);
+
+	if (this_count > 0xffff) {
+		this_count = 0xffff;
+		SCpnt->request_bufflen = SCpnt->bufflen =
+				this_count * s_size;
+	}
+
+	SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff;
+	SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff;
+	SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff;
+	SCpnt->cmnd[5] = (unsigned char) block & 0xff;
+	SCpnt->cmnd[6] = SCpnt->cmnd[9] = 0;
+	SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff;
+	SCpnt->cmnd[8] = (unsigned char) this_count & 0xff;
+
+	/*
+	 * We shouldn't disconnect in the middle of a sector, so with a dumb
+	 * host adapter, it's safe to assume that we can at least transfer
+	 * this many bytes between each connect / disconnect.
+	 */
+	SCpnt->transfersize = cd->device->sector_size;
+	SCpnt->underflow = this_count << 9;
+
+queue:
+	SCpnt->allowed = MAX_RETRIES;
+	SCpnt->timeout_per_command = timeout;
+
+	/*
+	 * This is the completion routine we use.  This is matched in terms
+	 * of capability to this function.
+	 */
+	SCpnt->done = rw_intr;
+
+	/*
+	 * This indicates that the command is ready from our end to be
+	 * queued.
+	 */
+	return 1;
+}
+
+static int sr_block_open(struct inode *inode, struct file *file)
+{
+	struct gendisk *disk = inode->i_bdev->bd_disk;
+	struct scsi_cd *cd;
+	int ret = 0;
+
+	if(!(cd = scsi_cd_get(disk)))
+		return -ENXIO;
+
+	if((ret = cdrom_open(&cd->cdi, inode, file)) != 0)
+		scsi_cd_put(cd);
+
+	return ret;
+}
+
+static int sr_block_release(struct inode *inode, struct file *file)
+{
+	int ret;
+	struct scsi_cd *cd = scsi_cd(inode->i_bdev->bd_disk);
+	ret = cdrom_release(&cd->cdi, file);
+	if(ret)
+		return ret;
+	
+	scsi_cd_put(cd);
+
+	return 0;
+}
+
+static int sr_block_ioctl(struct inode *inode, struct file *file, unsigned cmd,
+			  unsigned long arg)
+{
+	struct scsi_cd *cd = scsi_cd(inode->i_bdev->bd_disk);
+	struct scsi_device *sdev = cd->device;
+
+        /*
+         * Send SCSI addressing ioctls directly to mid level, send other
+         * ioctls to cdrom/block level.
+         */
+        switch (cmd) {
+                case SCSI_IOCTL_GET_IDLUN:
+                case SCSI_IOCTL_GET_BUS_NUMBER:
+                        return scsi_ioctl(sdev, cmd, (void __user *)arg);
+	}
+	return cdrom_ioctl(file, &cd->cdi, inode, cmd, arg);
+}
+
+static int sr_block_media_changed(struct gendisk *disk)
+{
+	struct scsi_cd *cd = scsi_cd(disk);
+	return cdrom_media_changed(&cd->cdi);
+}
+
+static struct block_device_operations sr_bdops =
+{
+	.owner		= THIS_MODULE,
+	.open		= sr_block_open,
+	.release	= sr_block_release,
+	.ioctl		= sr_block_ioctl,
+	.media_changed	= sr_block_media_changed,
+	/* 
+	 * No compat_ioctl for now because sr_block_ioctl never
+	 * seems to pass arbitary ioctls down to host drivers.
+	 */
+};
+
+static int sr_open(struct cdrom_device_info *cdi, int purpose)
+{
+	struct scsi_cd *cd = cdi->handle;
+	struct scsi_device *sdev = cd->device;
+	int retval;
+
+	/*
+	 * If the device is in error recovery, wait until it is done.
+	 * If the device is offline, then disallow any access to it.
+	 */
+	retval = -ENXIO;
+	if (!scsi_block_when_processing_errors(sdev))
+		goto error_out;
+
+	return 0;
+
+error_out:
+	return retval;	
+}
+
+static void sr_release(struct cdrom_device_info *cdi)
+{
+	struct scsi_cd *cd = cdi->handle;
+
+	if (cd->device->sector_size > 2048)
+		sr_set_blocklength(cd, 2048);
+
+}
+
+static int sr_probe(struct device *dev)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	struct gendisk *disk;
+	struct scsi_cd *cd;
+	int minor, error;
+
+	error = -ENODEV;
+	if (sdev->type != TYPE_ROM && sdev->type != TYPE_WORM)
+		goto fail;
+
+	error = -ENOMEM;
+	cd = kmalloc(sizeof(*cd), GFP_KERNEL);
+	if (!cd)
+		goto fail;
+	memset(cd, 0, sizeof(*cd));
+
+	kref_init(&cd->kref);
+
+	disk = alloc_disk(1);
+	if (!disk)
+		goto fail_free;
+
+	spin_lock(&sr_index_lock);
+	minor = find_first_zero_bit(sr_index_bits, SR_DISKS);
+	if (minor == SR_DISKS) {
+		spin_unlock(&sr_index_lock);
+		error = -EBUSY;
+		goto fail_put;
+	}
+	__set_bit(minor, sr_index_bits);
+	spin_unlock(&sr_index_lock);
+
+	disk->major = SCSI_CDROM_MAJOR;
+	disk->first_minor = minor;
+	sprintf(disk->disk_name, "sr%d", minor);
+	disk->fops = &sr_bdops;
+	disk->flags = GENHD_FL_CD;
+
+	cd->device = sdev;
+	cd->disk = disk;
+	cd->driver = &sr_template;
+	cd->disk = disk;
+	cd->capacity = 0x1fffff;
+	cd->device->changed = 1;	/* force recheck CD type */
+	cd->use = 1;
+	cd->readcd_known = 0;
+	cd->readcd_cdda = 0;
+
+	cd->cdi.ops = &sr_dops;
+	cd->cdi.handle = cd;
+	cd->cdi.mask = 0;
+	cd->cdi.capacity = 1;
+	sprintf(cd->cdi.name, "sr%d", minor);
+
+	sdev->sector_size = 2048;	/* A guess, just in case */
+
+	/* FIXME: need to handle a get_capabilities failure properly ?? */
+	get_capabilities(cd);
+	sr_vendor_init(cd);
+
+	snprintf(disk->devfs_name, sizeof(disk->devfs_name),
+			"%s/cd", sdev->devfs_name);
+	disk->driverfs_dev = &sdev->sdev_gendev;
+	set_capacity(disk, cd->capacity);
+	disk->private_data = &cd->driver;
+	disk->queue = sdev->request_queue;
+	cd->cdi.disk = disk;
+
+	if (register_cdrom(&cd->cdi))
+		goto fail_put;
+
+	dev_set_drvdata(dev, cd);
+	disk->flags |= GENHD_FL_REMOVABLE;
+	add_disk(disk);
+
+	sdev_printk(KERN_DEBUG, sdev,
+		    "Attached scsi CD-ROM %s\n", cd->cdi.name);
+	return 0;
+
+fail_put:
+	put_disk(disk);
+fail_free:
+	kfree(cd);
+fail:
+	return error;
+}
+
+
+static void get_sectorsize(struct scsi_cd *cd)
+{
+	unsigned char cmd[10];
+	unsigned char *buffer;
+	int the_result, retries = 3;
+	int sector_size;
+	request_queue_t *queue;
+
+	buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
+	if (!buffer)
+		goto Enomem;
+
+	do {
+		cmd[0] = READ_CAPACITY;
+		memset((void *) &cmd[1], 0, 9);
+		memset(buffer, 0, 8);
+
+		/* Do the command and wait.. */
+		the_result = scsi_execute_req(cd->device, cmd, DMA_FROM_DEVICE,
+					      buffer, 8, NULL, SR_TIMEOUT,
+					      MAX_RETRIES);
+
+		retries--;
+
+	} while (the_result && retries);
+
+
+	if (the_result) {
+		cd->capacity = 0x1fffff;
+		sector_size = 2048;	/* A guess, just in case */
+	} else {
+#if 0
+		if (cdrom_get_last_written(&cd->cdi,
+					   &cd->capacity))
+#endif
+			cd->capacity = 1 + ((buffer[0] << 24) |
+						    (buffer[1] << 16) |
+						    (buffer[2] << 8) |
+						    buffer[3]);
+		sector_size = (buffer[4] << 24) |
+		    (buffer[5] << 16) | (buffer[6] << 8) | buffer[7];
+		switch (sector_size) {
+			/*
+			 * HP 4020i CD-Recorder reports 2340 byte sectors
+			 * Philips CD-Writers report 2352 byte sectors
+			 *
+			 * Use 2k sectors for them..
+			 */
+		case 0:
+		case 2340:
+		case 2352:
+			sector_size = 2048;
+			/* fall through */
+		case 2048:
+			cd->capacity *= 4;
+			/* fall through */
+		case 512:
+			break;
+		default:
+			printk("%s: unsupported sector size %d.\n",
+			       cd->cdi.name, sector_size);
+			cd->capacity = 0;
+		}
+
+		cd->device->sector_size = sector_size;
+
+		/*
+		 * Add this so that we have the ability to correctly gauge
+		 * what the device is capable of.
+		 */
+		set_capacity(cd->disk, cd->capacity);
+	}
+
+	queue = cd->device->request_queue;
+	blk_queue_hardsect_size(queue, sector_size);
+out:
+	kfree(buffer);
+	return;
+
+Enomem:
+	cd->capacity = 0x1fffff;
+	cd->device->sector_size = 2048;	/* A guess, just in case */
+	goto out;
+}
+
+static void get_capabilities(struct scsi_cd *cd)
+{
+	unsigned char *buffer;
+	struct scsi_mode_data data;
+	unsigned char cmd[MAX_COMMAND_SIZE];
+	struct scsi_sense_hdr sshdr;
+	unsigned int the_result;
+	int retries, rc, n;
+
+	static char *loadmech[] =
+	{
+		"caddy",
+		"tray",
+		"pop-up",
+		"",
+		"changer",
+		"cartridge changer",
+		"",
+		""
+	};
+
+
+	/* allocate transfer buffer */
+	buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
+	if (!buffer) {
+		printk(KERN_ERR "sr: out of memory.\n");
+		return;
+	}
+
+	/* issue TEST_UNIT_READY until the initial startup UNIT_ATTENTION
+	 * conditions are gone, or a timeout happens
+	 */
+	retries = 0;
+	do {
+		memset((void *)cmd, 0, MAX_COMMAND_SIZE);
+		cmd[0] = TEST_UNIT_READY;
+
+		the_result = scsi_execute_req (cd->device, cmd, DMA_NONE, NULL,
+					       0, &sshdr, SR_TIMEOUT,
+					       MAX_RETRIES);
+
+		retries++;
+	} while (retries < 5 && 
+		 (!scsi_status_is_good(the_result) ||
+		  (scsi_sense_valid(&sshdr) &&
+		   sshdr.sense_key == UNIT_ATTENTION)));
+
+	/* ask for mode page 0x2a */
+	rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128,
+			     SR_TIMEOUT, 3, &data, NULL);
+
+	if (!scsi_status_is_good(rc)) {
+		/* failed, drive doesn't have capabilities mode page */
+		cd->cdi.speed = 1;
+		cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R |
+					 CDC_DVD | CDC_DVD_RAM |
+					 CDC_SELECT_DISC | CDC_SELECT_SPEED);
+		kfree(buffer);
+		printk("%s: scsi-1 drive\n", cd->cdi.name);
+		return;
+	}
+
+	n = data.header_length + data.block_descriptor_length;
+	cd->cdi.speed = ((buffer[n + 8] << 8) + buffer[n + 9]) / 176;
+	cd->readcd_known = 1;
+	cd->readcd_cdda = buffer[n + 5] & 0x01;
+	/* print some capability bits */
+	printk("%s: scsi3-mmc drive: %dx/%dx %s%s%s%s%s%s\n", cd->cdi.name,
+	       ((buffer[n + 14] << 8) + buffer[n + 15]) / 176,
+	       cd->cdi.speed,
+	       buffer[n + 3] & 0x01 ? "writer " : "", /* CD Writer */
+	       buffer[n + 3] & 0x20 ? "dvd-ram " : "",
+	       buffer[n + 2] & 0x02 ? "cd/rw " : "", /* can read rewriteable */
+	       buffer[n + 4] & 0x20 ? "xa/form2 " : "",	/* can read xa/from2 */
+	       buffer[n + 5] & 0x01 ? "cdda " : "", /* can read audio data */
+	       loadmech[buffer[n + 6] >> 5]);
+	if ((buffer[n + 6] >> 5) == 0)
+		/* caddy drives can't close tray... */
+		cd->cdi.mask |= CDC_CLOSE_TRAY;
+	if ((buffer[n + 2] & 0x8) == 0)
+		/* not a DVD drive */
+		cd->cdi.mask |= CDC_DVD;
+	if ((buffer[n + 3] & 0x20) == 0) 
+		/* can't write DVD-RAM media */
+		cd->cdi.mask |= CDC_DVD_RAM;
+	if ((buffer[n + 3] & 0x10) == 0)
+		/* can't write DVD-R media */
+		cd->cdi.mask |= CDC_DVD_R;
+	if ((buffer[n + 3] & 0x2) == 0)
+		/* can't write CD-RW media */
+		cd->cdi.mask |= CDC_CD_RW;
+	if ((buffer[n + 3] & 0x1) == 0)
+		/* can't write CD-R media */
+		cd->cdi.mask |= CDC_CD_R;
+	if ((buffer[n + 6] & 0x8) == 0)
+		/* can't eject */
+		cd->cdi.mask |= CDC_OPEN_TRAY;
+
+	if ((buffer[n + 6] >> 5) == mechtype_individual_changer ||
+	    (buffer[n + 6] >> 5) == mechtype_cartridge_changer)
+		cd->cdi.capacity =
+		    cdrom_number_of_slots(&cd->cdi);
+	if (cd->cdi.capacity <= 1)
+		/* not a changer */
+		cd->cdi.mask |= CDC_SELECT_DISC;
+	/*else    I don't think it can close its tray
+		cd->cdi.mask |= CDC_CLOSE_TRAY; */
+
+	/*
+	 * if DVD-RAM, MRW-W or CD-RW, we are randomly writable
+	 */
+	if ((cd->cdi.mask & (CDC_DVD_RAM | CDC_MRW_W | CDC_RAM | CDC_CD_RW)) !=
+			(CDC_DVD_RAM | CDC_MRW_W | CDC_RAM | CDC_CD_RW)) {
+		cd->device->writeable = 1;
+	}
+
+	kfree(buffer);
+}
+
+/*
+ * sr_packet() is the entry point for the generic commands generated
+ * by the Uniform CD-ROM layer. 
+ */
+static int sr_packet(struct cdrom_device_info *cdi,
+		struct packet_command *cgc)
+{
+	if (cgc->timeout <= 0)
+		cgc->timeout = IOCTL_TIMEOUT;
+
+	sr_do_ioctl(cdi->handle, cgc);
+
+	return cgc->stat;
+}
+
+/**
+ *	sr_kref_release - Called to free the scsi_cd structure
+ *	@kref: pointer to embedded kref
+ *
+ *	sr_ref_sem must be held entering this routine.  Because it is
+ *	called on last put, you should always use the scsi_cd_get()
+ *	scsi_cd_put() helpers which manipulate the semaphore directly
+ *	and never do a direct kref_put().
+ **/
+static void sr_kref_release(struct kref *kref)
+{
+	struct scsi_cd *cd = container_of(kref, struct scsi_cd, kref);
+	struct gendisk *disk = cd->disk;
+
+	spin_lock(&sr_index_lock);
+	clear_bit(disk->first_minor, sr_index_bits);
+	spin_unlock(&sr_index_lock);
+
+	unregister_cdrom(&cd->cdi);
+
+	disk->private_data = NULL;
+
+	put_disk(disk);
+
+	kfree(cd);
+}
+
+static int sr_remove(struct device *dev)
+{
+	struct scsi_cd *cd = dev_get_drvdata(dev);
+
+	del_gendisk(cd->disk);
+
+	down(&sr_ref_sem);
+	kref_put(&cd->kref, sr_kref_release);
+	up(&sr_ref_sem);
+
+	return 0;
+}
+
+static int __init init_sr(void)
+{
+	int rc;
+
+	rc = register_blkdev(SCSI_CDROM_MAJOR, "sr");
+	if (rc)
+		return rc;
+	return scsi_register_driver(&sr_template.gendrv);
+}
+
+static void __exit exit_sr(void)
+{
+	scsi_unregister_driver(&sr_template.gendrv);
+	unregister_blkdev(SCSI_CDROM_MAJOR, "sr");
+}
+
+module_init(init_sr);
+module_exit(exit_sr);
+MODULE_LICENSE("GPL");
diff -urN oldtree/drivers/scsi/sr.h newtree/drivers/scsi/sr.h
--- oldtree/drivers/scsi/sr.h	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/scsi/sr.h	2006-01-29 11:18:20.610155576 +0000
@@ -55,7 +55,6 @@
 int sr_reset(struct cdrom_device_info *);
 int sr_select_speed(struct cdrom_device_info *cdi, int speed);
 int sr_audio_ioctl(struct cdrom_device_info *, unsigned int, void *);
-int sr_dev_ioctl(struct cdrom_device_info *, unsigned int, unsigned long);
 
 int sr_is_xa(Scsi_CD *);
 
diff -urN oldtree/drivers/scsi/sr_ioctl.c newtree/drivers/scsi/sr_ioctl.c
--- oldtree/drivers/scsi/sr_ioctl.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/drivers/scsi/sr_ioctl.c	2006-01-29 11:18:20.611155424 +0000
@@ -542,22 +542,3 @@
 #endif
 	return is_xa;
 }
-
-int sr_dev_ioctl(struct cdrom_device_info *cdi,
-		 unsigned int cmd, unsigned long arg)
-{
-	Scsi_CD *cd = cdi->handle;
-	int ret;
-	
-	ret = scsi_nonblockable_ioctl(cd->device, cmd,
-				      (void __user *)arg, NULL);
-	/*
-	 * ENODEV means that we didn't recognise the ioctl, or that we
-	 * cannot execute it in the current device state.  In either
-	 * case fall through to scsi_ioctl, which will return ENDOEV again
-	 * if it doesn't recognise the ioctl
-	 */
-	if (ret != -ENODEV)
-		return ret;
-	return scsi_ioctl(cd->device, cmd, (void __user *)arg);
-}
diff -urN oldtree/fs/bio.c newtree/fs/bio.c
--- oldtree/fs/bio.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/fs/bio.c	2006-01-29 11:18:10.832641984 +0000
@@ -1198,11 +1198,11 @@
 		scale = 4;
 
 	/*
-	 * scale number of entries
+	 * Limit number of entries reserved -- mempools are only used when
+	 * the system is completely unable to allocate memory, so we only
+	 * need enough to make progress.
 	 */
-	bvec_pool_entries = megabytes * 2;
-	if (bvec_pool_entries > 256)
-		bvec_pool_entries = 256;
+	bvec_pool_entries = 1 + scale;
 
 	fs_bio_set = bioset_create(BIO_POOL_SIZE, bvec_pool_entries, scale);
 	if (!fs_bio_set)
diff -urN oldtree/fs/ext3/balloc.c newtree/fs/ext3/balloc.c
--- oldtree/fs/ext3/balloc.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/fs/ext3/balloc.c	2006-01-29 11:18:10.833641832 +0000
@@ -654,9 +654,11 @@
  */
 static int
 ext3_try_to_allocate(struct super_block *sb, handle_t *handle, int group,
-	struct buffer_head *bitmap_bh, int goal, struct ext3_reserve_window *my_rsv)
+			struct buffer_head *bitmap_bh, int goal,
+			unsigned long *count, struct ext3_reserve_window *my_rsv)
 {
 	int group_first_block, start, end;
+	unsigned long num = 0;
 
 	/* we do allocation within the reservation window if we have a window */
 	if (my_rsv) {
@@ -714,8 +716,18 @@
 			goto fail_access;
 		goto repeat;
 	}
-	return goal;
+	num++;
+	goal++;
+	while (num < *count && goal < end
+		&& ext3_test_allocatable(goal, bitmap_bh)
+		&& claim_block(sb_bgl_lock(EXT3_SB(sb), group), goal, bitmap_bh)) {
+		num++;
+		goal++;
+	}
+	*count = num;
+	return goal - num;
 fail_access:
+	*count = num;
 	return -1;
 }
 
@@ -1000,6 +1012,31 @@
 	goto retry;
 }
 
+static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv,
+			struct super_block *sb, int size)
+{
+	struct ext3_reserve_window_node *next_rsv;
+	struct rb_node *next;
+	spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock;
+
+	if (!spin_trylock(rsv_lock))
+		return;
+
+	next = rb_next(&my_rsv->rsv_node);
+
+	if (!next)
+		my_rsv->rsv_end += size;
+	else {
+		next_rsv = list_entry(next, struct ext3_reserve_window_node, rsv_node);
+
+		if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size)
+			my_rsv->rsv_end += size;
+		else
+			my_rsv->rsv_end = next_rsv->rsv_start - 1;
+	}
+	spin_unlock(rsv_lock);
+}
+
 /*
  * This is the main function used to allocate a new block and its reservation
  * window.
@@ -1025,11 +1062,12 @@
 ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
 			unsigned int group, struct buffer_head *bitmap_bh,
 			int goal, struct ext3_reserve_window_node * my_rsv,
-			int *errp)
+			unsigned long *count, int *errp)
 {
 	unsigned long group_first_block;
 	int ret = 0;
 	int fatal;
+	unsigned long num = *count;
 
 	*errp = 0;
 
@@ -1052,7 +1090,8 @@
 	 * or last attempt to allocate a block with reservation turned on failed
 	 */
 	if (my_rsv == NULL ) {
-		ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, goal, NULL);
+		ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh,
+						goal, count, NULL);
 		goto out;
 	}
 	/*
@@ -1082,6 +1121,8 @@
 	while (1) {
 		if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) ||
 			!goal_in_my_reservation(&my_rsv->rsv_window, goal, group, sb)) {
+			if (my_rsv->rsv_goal_size < *count)
+				my_rsv->rsv_goal_size = *count;
 			ret = alloc_new_reservation(my_rsv, goal, sb,
 							group, bitmap_bh);
 			if (ret < 0)
@@ -1089,16 +1130,21 @@
 
 			if (!goal_in_my_reservation(&my_rsv->rsv_window, goal, group, sb))
 				goal = -1;
-		}
+		} else if (goal > 0 && (my_rsv->rsv_end-goal+1) < *count)
+			try_to_extend_reservation(my_rsv, sb,
+					*count-my_rsv->rsv_end + goal - 1);
+
 		if ((my_rsv->rsv_start >= group_first_block + EXT3_BLOCKS_PER_GROUP(sb))
 		    || (my_rsv->rsv_end < group_first_block))
 			BUG();
 		ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, goal,
-					   &my_rsv->rsv_window);
+					   &num, &my_rsv->rsv_window);
 		if (ret >= 0) {
-			my_rsv->rsv_alloc_hit++;
+			my_rsv->rsv_alloc_hit += num;
+			*count = num;
 			break;				/* succeed */
 		}
+		num = *count;
 	}
 out:
 	if (ret >= 0) {
@@ -1155,8 +1201,8 @@
  * bitmap, and then for any free bit if that fails.
  * This function also updates quota and i_blocks field.
  */
-int ext3_new_block(handle_t *handle, struct inode *inode,
-			unsigned long goal, int *errp)
+int ext3_new_blocks(handle_t *handle, struct inode *inode,
+			unsigned long goal, unsigned long *count, int *errp)
 {
 	struct buffer_head *bitmap_bh = NULL;
 	struct buffer_head *gdp_bh;
@@ -1179,6 +1225,7 @@
 	static int goal_hits, goal_attempts;
 #endif
 	unsigned long ngroups;
+	unsigned long num = *count;
 
 	*errp = -ENOSPC;
 	sb = inode->i_sb;
@@ -1190,7 +1237,7 @@
 	/*
 	 * Check quota for allocation of this block.
 	 */
-	if (DQUOT_ALLOC_BLOCK(inode, 1)) {
+	if (DQUOT_ALLOC_BLOCK(inode, num)) {
 		*errp = -EDQUOT;
 		return 0;
 	}
@@ -1245,7 +1292,7 @@
 		if (!bitmap_bh)
 			goto io_error;
 		ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no,
-					bitmap_bh, ret_block, my_rsv, &fatal);
+					bitmap_bh, ret_block, my_rsv, &num, &fatal);
 		if (fatal)
 			goto out;
 		if (ret_block >= 0)
@@ -1282,7 +1329,7 @@
 		if (!bitmap_bh)
 			goto io_error;
 		ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no,
-					bitmap_bh, -1, my_rsv, &fatal);
+					bitmap_bh, -1, my_rsv, &num, &fatal);
 		if (fatal)
 			goto out;
 		if (ret_block >= 0) 
@@ -1317,13 +1364,15 @@
 	target_block = ret_block + group_no * EXT3_BLOCKS_PER_GROUP(sb)
 				+ le32_to_cpu(es->s_first_data_block);
 
-	if (target_block == le32_to_cpu(gdp->bg_block_bitmap) ||
-	    target_block == le32_to_cpu(gdp->bg_inode_bitmap) ||
+	if (in_range(le32_to_cpu(gdp->bg_block_bitmap), target_block, num) ||
+	    in_range(le32_to_cpu(gdp->bg_inode_bitmap), target_block, num) ||
 	    in_range(target_block, le32_to_cpu(gdp->bg_inode_table),
+		      EXT3_SB(sb)->s_itb_per_group) ||
+	    in_range(target_block + num - 1, le32_to_cpu(gdp->bg_inode_table),
 		      EXT3_SB(sb)->s_itb_per_group))
 		ext3_error(sb, "ext3_new_block",
 			    "Allocating block in system zone - "
-			    "block = %u", target_block);
+			    "blocks from %u, length %lu", target_block, num);
 
 	performed_allocation = 1;
 
@@ -1342,10 +1391,14 @@
 	jbd_lock_bh_state(bitmap_bh);
 	spin_lock(sb_bgl_lock(sbi, group_no));
 	if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) {
-		if (ext3_test_bit(ret_block,
-				bh2jh(bitmap_bh)->b_committed_data)) {
-			printk("%s: block was unexpectedly set in "
-				"b_committed_data\n", __FUNCTION__);
+		int i;
+
+		for (i = 0; i < num; i++) {
+			if (ext3_test_bit(ret_block,
+					bh2jh(bitmap_bh)->b_committed_data)) {
+				printk("%s: block was unexpectedly set in "
+					"b_committed_data\n", __FUNCTION__);
+			}
 		}
 	}
 	ext3_debug("found bit %d\n", ret_block);
@@ -1356,7 +1409,7 @@
 	/* ret_block was blockgroup-relative.  Now it becomes fs-relative */
 	ret_block = target_block;
 
-	if (ret_block >= le32_to_cpu(es->s_blocks_count)) {
+	if (ret_block + num - 1 >= le32_to_cpu(es->s_blocks_count)) {
 		ext3_error(sb, "ext3_new_block",
 			    "block(%d) >= blocks count(%d) - "
 			    "block_group = %d, es == %p ", ret_block,
@@ -1374,9 +1427,9 @@
 
 	spin_lock(sb_bgl_lock(sbi, group_no));
 	gdp->bg_free_blocks_count =
-			cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) - 1);
+			cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) - num);
 	spin_unlock(sb_bgl_lock(sbi, group_no));
-	percpu_counter_mod(&sbi->s_freeblocks_counter, -1);
+	percpu_counter_mod(&sbi->s_freeblocks_counter, -num);
 
 	BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor");
 	err = ext3_journal_dirty_metadata(handle, gdp_bh);
@@ -1389,6 +1442,8 @@
 
 	*errp = 0;
 	brelse(bitmap_bh);
+	*count = num;
+	DQUOT_FREE_BLOCK(inode, *count-num);
 	return ret_block;
 
 io_error:
@@ -1402,11 +1457,19 @@
 	 * Undo the block allocation
 	 */
 	if (!performed_allocation)
-		DQUOT_FREE_BLOCK(inode, 1);
+		DQUOT_FREE_BLOCK(inode, *count);
 	brelse(bitmap_bh);
 	return 0;
 }
 
+int ext3_new_block(handle_t *handle, struct inode *inode,
+			unsigned long goal, int *errp)
+{
+	unsigned long count = 1;
+
+	return ext3_new_blocks(handle, inode, goal, &count, errp);
+}
+
 unsigned long ext3_count_free_blocks(struct super_block *sb)
 {
 	unsigned long desc_count;
diff -urN oldtree/fs/ext3/dir.c newtree/fs/ext3/dir.c
--- oldtree/fs/ext3/dir.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/fs/ext3/dir.c	2006-01-29 11:18:10.834641680 +0000
@@ -95,11 +95,10 @@
 			 void * dirent, filldir_t filldir)
 {
 	int error = 0;
-	unsigned long offset, blk;
-	int i, num, stored;
-	struct buffer_head * bh, * tmp, * bha[16];
-	struct ext3_dir_entry_2 * de;
-	struct super_block * sb;
+	unsigned long offset;
+	int i, stored;
+	struct ext3_dir_entry_2 *de;
+	struct super_block *sb;
 	int err;
 	struct inode *inode = filp->f_dentry->d_inode;
 	int ret = 0;
@@ -124,12 +123,30 @@
 	}
 #endif
 	stored = 0;
-	bh = NULL;
 	offset = filp->f_pos & (sb->s_blocksize - 1);
 
 	while (!error && !stored && filp->f_pos < inode->i_size) {
-		blk = (filp->f_pos) >> EXT3_BLOCK_SIZE_BITS(sb);
-		bh = ext3_bread(NULL, inode, blk, 0, &err);
+		unsigned long blk = filp->f_pos >> EXT3_BLOCK_SIZE_BITS(sb);
+		struct buffer_head map_bh;
+		struct buffer_head *bh = NULL;
+
+		map_bh.b_state = 0;
+		err = ext3_get_blocks_handle(NULL, inode, blk, 1,
+						&map_bh, 0, 0);
+		if (err > 0) {
+			page_cache_readahead(sb->s_bdev->bd_inode->i_mapping,
+				&filp->f_ra,
+				filp,
+				map_bh.b_blocknr >>
+					(PAGE_CACHE_SHIFT - inode->i_blkbits),
+				1);
+			bh = ext3_bread(NULL, inode, blk, 0, &err);
+		}
+
+		/*
+		 * We ignore I/O errors on directories so users have a chance
+		 * of recovering data when there's a bad sector
+		 */
 		if (!bh) {
 			ext3_error (sb, "ext3_readdir",
 				"directory #%lu contains a hole at offset %lu",
@@ -138,26 +155,6 @@
 			continue;
 		}
 
-		/*
-		 * Do the readahead
-		 */
-		if (!offset) {
-			for (i = 16 >> (EXT3_BLOCK_SIZE_BITS(sb) - 9), num = 0;
-			     i > 0; i--) {
-				tmp = ext3_getblk (NULL, inode, ++blk, 0, &err);
-				if (tmp && !buffer_uptodate(tmp) &&
-						!buffer_locked(tmp))
-					bha[num++] = tmp;
-				else
-					brelse (tmp);
-			}
-			if (num) {
-				ll_rw_block (READA, num, bha);
-				for (i = 0; i < num; i++)
-					brelse (bha[i]);
-			}
-		}
-
 revalidate:
 		/* If the dir block has changed since the last call to
 		 * readdir(2), then we might be pointing to an invalid
diff -urN oldtree/fs/ext3/inode.c newtree/fs/ext3/inode.c
--- oldtree/fs/ext3/inode.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/fs/ext3/inode.c	2006-01-29 11:18:10.836641376 +0000
@@ -235,16 +235,6 @@
 	clear_inode(inode);	/* We must guarantee clearing of inode... */
 }
 
-static int ext3_alloc_block (handle_t *handle,
-			struct inode * inode, unsigned long goal, int *err)
-{
-	unsigned long result;
-
-	result = ext3_new_block(handle, inode, goal, err);
-	return result;
-}
-
-
 typedef struct {
 	__le32	*p;
 	__le32	key;
@@ -330,7 +320,7 @@
 		ext3_warning (inode->i_sb, "ext3_block_to_path", "block > big");
 	}
 	if (boundary)
-		*boundary = (i_block & (ptrs - 1)) == (final - 1);
+		*boundary = final - 1 - (i_block & (ptrs - 1));
 	return n;
 }
 
@@ -476,15 +466,115 @@
 
 	return ext3_find_near(inode, partial);
 }
+/**
+ *	ext3_blks_to_allocate: Look up the block map and count the number
+ *	of direct blocks need to be allocated for the given branch.
+ *
+ * 	@branch: chain of indirect blocks
+ *	@k: number of blocks need for indirect blocks
+ *	@blks: number of data blocks to be mapped.
+ *	@blocks_to_boundary:  the offset in the indirect block
+ *
+ *	return the total number of blocks to be allocate, including the
+ *	direct and indirect blocks.
+ */
+static int
+ext3_blks_to_allocate(Indirect * branch, int k, unsigned long blks,
+		int blocks_to_boundary)
+{
+	unsigned long count = 0;
+
+	/*
+	 * Simple case, [t,d]Indirect block(s) has not allocated yet
+	 * then it's clear blocks on that path have not allocated
+	 */
+	if (k > 0) {
+		/* right now don't hanel cross boundary allocation */
+		if (blks < blocks_to_boundary + 1)
+			count += blks;
+		else
+			count += blocks_to_boundary + 1;
+		return count;
+	}
+
+	count++;
+	while (count < blks && count <= blocks_to_boundary
+		&& le32_to_cpu(*(branch[0].p + count)) == 0) {
+		count++;
+	}
+	return count;
+}
+/**
+ *	ext3_alloc_blocks: multiple allocate blocks needed for a branch
+ *	@indirect_blks: the number of blocks need to allocate for indirect
+ *			blocks
+ *
+ *	@new_blocks: on return it will store the new block numbers for
+ *	the indirect blocks(if needed) and the first direct block,
+ *	@blks:	on return it will store the total number of allocated
+ *		direct blocks
+ */
+static int ext3_alloc_blocks(handle_t *handle, struct inode *inode,
+			unsigned long goal, int indirect_blks, int blks,
+			unsigned long long new_blocks[4], int *err)
+{
+	int target;
+	unsigned long count;
+	int index, i;
+	unsigned long current_block = 0;
+	int ret = 0;
+
+	/*
+	 * Here we try to allocate the requested multiple blocks at once,
+	 * on a best-effort basis.
+	 * To build a branch, we should allocate blocks for
+	 * the indirect blocks(if not allocated yet), and at least
+	 * the first direct block of this branch.  That's the
+	 * minimum number of blocks need to allocate(required)
+	 *
+	 */
+	target = blks + indirect_blks;
+	index = 0; count = 0;
+
+	while (1) {
+		count = target;
+		/* allocating blocks for indirect blocks and direct blocks */
+		current_block = ext3_new_blocks(handle, inode, goal, &count, err);
+		if (*err)
+			goto failed_out;
+
+		target -= count;
+		/* allocate blocks for indirect blocks */
+		while (index < indirect_blks && count) {
+			new_blocks[index++] = current_block++;
+			count--;
+		}
+
+		if (count > 0)
+			break;
+	}
+
+	/* save the new block number for the first direct block*/
+	new_blocks[index] = current_block;
+	/* total number of blocks allocated for direct blocks */
+	ret = count;
+	*err = 0;
+	return ret;
+failed_out:
+	for (i = 0; i <index; i++)
+		ext3_free_blocks(handle, inode, new_blocks[i], 1);
+	return ret;
+}
 
 /**
  *	ext3_alloc_branch - allocate and set up a chain of blocks.
  *	@inode: owner
- *	@num: depth of the chain (number of blocks to allocate)
+ *	@indirect_blks: number of allocated indirect blocks
+ *	@blks: number of allocated direct blocks
  *	@offsets: offsets (in the blocks) to store the pointers to next.
  *	@branch: place to store the chain in.
  *
- *	This function allocates @num blocks, zeroes out all but the last one,
+ *	This function allocates blocks, zeroes out all but the last one,
  *	links them into chain and (if we are synchronous) writes them to disk.
  *	In other words, it prepares a branch that can be spliced onto the
  *	inode. It stores the information about that chain in the branch[], in
@@ -503,71 +593,79 @@
  */
 
 static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
-			     int num,
-			     unsigned long goal,
-			     int *offsets,
-			     Indirect *branch)
+			int indirect_blks, int *blks, unsigned long goal,
+			int *offsets, Indirect *branch)
 {
 	int blocksize = inode->i_sb->s_blocksize;
-	int n = 0, keys = 0;
+	int i, n = 0;
 	int err = 0;
-	int i;
-	int parent = ext3_alloc_block(handle, inode, goal, &err);
-
-	branch[0].key = cpu_to_le32(parent);
-	if (parent) {
-		for (n = 1; n < num; n++) {
-			struct buffer_head *bh;
-			/* Allocate the next block */
-			int nr = ext3_alloc_block(handle, inode, parent, &err);
-			if (!nr)
-				break;
-			branch[n].key = cpu_to_le32(nr);
+	struct buffer_head *bh;
+	int num;
+	unsigned long long new_blocks[4];
+	unsigned long long current_block;
 
-			/*
-			 * Get buffer_head for parent block, zero it out
-			 * and set the pointer to new one, then send
-			 * parent to disk.  
-			 */
-			bh = sb_getblk(inode->i_sb, parent);
-			if (!bh)
-				break;
-			keys = n+1;
-			branch[n].bh = bh;
-			lock_buffer(bh);
-			BUFFER_TRACE(bh, "call get_create_access");
-			err = ext3_journal_get_create_access(handle, bh);
-			if (err) {
-				unlock_buffer(bh);
-				brelse(bh);
-				break;
-			}
+	num = ext3_alloc_blocks(handle, inode, goal, indirect_blks,
+					*blks, new_blocks, &err);
+	if (err)
+		return err;
 
-			memset(bh->b_data, 0, blocksize);
-			branch[n].p = (__le32*) bh->b_data + offsets[n];
-			*branch[n].p = branch[n].key;
-			BUFFER_TRACE(bh, "marking uptodate");
-			set_buffer_uptodate(bh);
+	branch[0].key = cpu_to_le32(new_blocks[0]);
+	/*
+	 * metadata blocks and data blocks are allocated.
+	 */
+	for (n = 1; n <= indirect_blks;  n++) {
+		/*
+		 * Get buffer_head for parent block, zero it out
+		 * and set the pointer to new one, then send
+		 * parent to disk.
+		 */
+		bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
+		branch[n].bh = bh;
+		lock_buffer(bh);
+		BUFFER_TRACE(bh, "call get_create_access");
+		err = ext3_journal_get_create_access(handle, bh);
+		if (err) {
 			unlock_buffer(bh);
+			brelse(bh);
+			goto failed;
+		}
 
-			BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
-			err = ext3_journal_dirty_metadata(handle, bh);
-			if (err)
-				break;
-
-			parent = nr;
+		memset(bh->b_data, 0, blocksize);
+		branch[n].p = (__le32*) bh->b_data + offsets[n];
+		branch[n].key = cpu_to_le32(new_blocks[n]);
+		*branch[n].p = branch[n].key;
+		if ( n == indirect_blks) {
+			current_block = new_blocks[n];
+			/*
+			 * End of chain, update the last new metablock of
+			 * the chain to point to the new allocated
+			 * data blocks numbers
+			 */
+			for (i=1; i < num; i++)
+				*(branch[n].p + i) = cpu_to_le32(++current_block);
 		}
-	}
-	if (n == num)
-		return 0;
+		BUFFER_TRACE(bh, "marking uptodate");
+		set_buffer_uptodate(bh);
+		unlock_buffer(bh);
 
+		BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
+		err = ext3_journal_dirty_metadata(handle, bh);
+		if (err)
+			goto failed;
+	}
+	*blks = num;
+	return err;
+failed:
 	/* Allocation failed, free what we already allocated */
-	for (i = 1; i < keys; i++) {
+	for (i = 1; i <= n ; i++) {
 		BUFFER_TRACE(branch[i].bh, "call journal_forget");
 		ext3_journal_forget(handle, branch[i].bh);
 	}
-	for (i = 0; i < keys; i++)
-		ext3_free_blocks(handle, inode, le32_to_cpu(branch[i].key), 1);
+	for (i = 0; i <indirect_blks; i++)
+		ext3_free_blocks(handle, inode, new_blocks[i], 1);
+
+	ext3_free_blocks(handle, inode, new_blocks[i], num);
+
 	return err;
 }
 
@@ -578,7 +676,8 @@
  *	@chain: chain of indirect blocks (with a missing link - see
  *		ext3_alloc_branch)
  *	@where: location of missing link
- *	@num:   number of blocks we are adding
+ *	@num:   number of indirect blocks we are adding
+ *	@blks:  number of direct blocks we are adding
  *
  *	This function fills the missing link and does all housekeeping needed in
  *	inode (->i_blocks, etc.). In case of success we end up with the full
@@ -586,12 +685,12 @@
  */
 
 static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
-			      Indirect chain[4], Indirect *where, int num)
+			      Indirect *where, int num, int blks)
 {
 	int i;
 	int err = 0;
 	struct ext3_block_alloc_info *block_i = EXT3_I(inode)->i_block_alloc_info;
-
+	unsigned long current_block;
 	/*
 	 * If we're splicing into a [td]indirect block (as opposed to the
 	 * inode) then we need to get write access to the [td]indirect block
@@ -606,6 +705,13 @@
 	/* That's it */
 
 	*where->p = where->key;
+	/* update host bufferhead or inode to point to
+	 * more just allocated direct blocks blocks */
+	if (num == 0 && blks > 1) {
+		current_block = le32_to_cpu(where->key + 1);
+		for (i = 1; i < blks; i++)
+			*(where->p + i ) = cpu_to_le32(current_block++);
+	}
 
 	/*
 	 * update the most recently allocated logical & physical block
@@ -613,8 +719,8 @@
 	 * allocation
 	 */
 	if (block_i) {
-		block_i->last_alloc_logical_block = block;
-		block_i->last_alloc_physical_block = le32_to_cpu(where[num-1].key);
+		block_i->last_alloc_logical_block = block + blks - 1;
+		block_i->last_alloc_physical_block = le32_to_cpu(where[num].key + blks - 1);
 	}
 
 	/* We are done with atomic stuff, now do the rest of housekeeping */
@@ -647,10 +753,13 @@
 	return err;
 
 err_out:
-	for (i = 1; i < num; i++) {
+	for (i = 1; i <= num; i++) {
 		BUFFER_TRACE(where[i].bh, "call journal_forget");
 		ext3_journal_forget(handle, where[i].bh);
+		ext3_free_blocks(handle, inode, le32_to_cpu(where[i-1].key), 1);
 	}
+	ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks);
+
 	return err;
 }
 
@@ -669,23 +778,30 @@
  * akpm: `handle' can be NULL if create == 0.
  *
  * The BKL may not be held on entry here.  Be sure to take it early.
+ * return > 0, # of blocks mapped or allocated
+ * return < 0, error case
  */
 
-static int
-ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
-		struct buffer_head *bh_result, int create, int extend_disksize)
+int
+ext3_get_blocks_handle(handle_t *handle, struct inode *inode, sector_t iblock,
+		unsigned long maxblocks, struct buffer_head *bh_result,
+		int create, int extend_disksize)
 {
 	int err = -EIO;
 	int offsets[4];
 	Indirect chain[4];
 	Indirect *partial;
 	unsigned long goal;
-	int left;
-	int boundary = 0;
-	const int depth = ext3_block_to_path(inode, iblock, offsets, &boundary);
+	int indirect_blks;
+	int blocks_to_boundary = 0;
+	int depth;
 	struct ext3_inode_info *ei = EXT3_I(inode);
+	int count = 0;
+	unsigned long first_block = 0;
+
 
 	J_ASSERT(handle != NULL || create == 0);
+	depth = ext3_block_to_path(inode, iblock, offsets, &blocks_to_boundary);
 
 	if (depth == 0)
 		goto out;
@@ -694,7 +810,29 @@
 
 	/* Simplest case - block found, no allocation needed */
 	if (!partial) {
+		first_block = chain[depth - 1].key;
 		clear_buffer_new(bh_result);
+		count++;
+		/*map more blocks*/
+		while (count < maxblocks && count <= blocks_to_boundary) {
+			if (!verify_chain(chain, partial)) {
+				/*
+				 * Indirect block might be removed by
+				 * truncate while we were reading it.
+				 * Handling of that case: forget what we've
+				 * got now. Flag the err as EAGAIN, so it
+				 * will reread.
+				 */
+				err = -EAGAIN;
+				count = 0;
+				break;
+			}
+			if (le32_to_cpu(*(chain[depth-1].p+count) ==
+					(first_block + count)))
+				count++;
+			else
+				break;
+		}
 		goto got_it;
 	}
 
@@ -723,6 +861,7 @@
 		}
 		partial = ext3_get_branch(inode, depth, offsets, chain, &err);
 		if (!partial) {
+			count++;
 			up(&ei->truncate_sem);
 			if (err)
 				goto cleanup;
@@ -740,12 +879,19 @@
 
 	goal = ext3_find_goal(inode, iblock, chain, partial);
 
-	left = (chain + depth) - partial;
+	/* the number of blocks need to allocate for [d,t]indirect blocks */
+	indirect_blks = (chain + depth) - partial - 1;
 
 	/*
+	 * Next look up the indirect map to count the totoal number of
+	 * direct blocks to allocate for this branch.
+	 */
+	count = ext3_blks_to_allocate(partial, indirect_blks,
+					maxblocks, blocks_to_boundary);
+	/*
 	 * Block out ext3_truncate while we alter the tree
 	 */
-	err = ext3_alloc_branch(handle, inode, left, goal,
+	err = ext3_alloc_branch(handle, inode, indirect_blks, &count, goal,
 				offsets + (partial - chain), partial);
 
 	/*
@@ -756,8 +902,8 @@
 	 * may need to return -EAGAIN upwards in the worst case.  --sct
 	 */
 	if (!err)
-		err = ext3_splice_branch(handle, inode, iblock, chain,
-					 partial, left);
+		err = ext3_splice_branch(handle, inode, iblock,
+					partial, indirect_blks, count);
 	/*
 	 * i_disksize growing is protected by truncate_sem.  Don't forget to
 	 * protect it if you're about to implement concurrent
@@ -772,8 +918,9 @@
 	set_buffer_new(bh_result);
 got_it:
 	map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
-	if (boundary)
+	if (blocks_to_boundary == 0)
 		set_buffer_boundary(bh_result);
+	err = count;
 	/* Clean up and exit */
 	partial = chain + depth - 1;	/* the whole chain */
 cleanup:
@@ -787,21 +934,6 @@
 	return err;
 }
 
-static int ext3_get_block(struct inode *inode, sector_t iblock,
-			struct buffer_head *bh_result, int create)
-{
-	handle_t *handle = NULL;
-	int ret;
-
-	if (create) {
-		handle = ext3_journal_current_handle();
-		J_ASSERT(handle != 0);
-	}
-	ret = ext3_get_block_handle(handle, inode, iblock,
-				bh_result, create, 1);
-	return ret;
-}
-
 #define DIO_CREDITS (EXT3_RESERVE_TRANS_BLOCKS + 32)
 
 static int
@@ -815,6 +947,9 @@
 	if (!handle)
 		goto get_block;		/* A read */
 
+	if (max_blocks == 1)
+		goto get_block;		/* A single block get */
+
 	if (handle->h_transaction->t_state == T_LOCKED) {
 		/*
 		 * Huge direct-io writes can hold off commits for long
@@ -841,13 +976,31 @@
 	}
 
 get_block:
-	if (ret == 0)
-		ret = ext3_get_block_handle(handle, inode, iblock,
-					bh_result, create, 0);
-	bh_result->b_size = (1 << inode->i_blkbits);
+	if (ret == 0) {
+		ret = ext3_get_blocks_handle(handle, inode, iblock,
+					max_blocks, bh_result, create, 0);
+		if (ret > 0) {
+			bh_result->b_size = (ret << inode->i_blkbits);
+			ret = 0;
+		}
+	}
 	return ret;
 }
 
+static int ext3_get_blocks(struct inode *inode, sector_t iblock,
+		unsigned long maxblocks, struct buffer_head *bh_result,
+		int create)
+{
+	return ext3_direct_io_get_blocks(inode, iblock, maxblocks,
+					bh_result, create);
+}
+
+static int ext3_get_block(struct inode *inode, sector_t iblock,
+			struct buffer_head *bh_result, int create)
+{
+	return ext3_get_blocks(inode, iblock, 1, bh_result, create);
+}
+
 /*
  * `handle' can be NULL if create is zero
  */
@@ -862,8 +1015,8 @@
 	dummy.b_state = 0;
 	dummy.b_blocknr = -1000;
 	buffer_trace_init(&dummy.b_history);
-	*errp = ext3_get_block_handle(handle, inode, block, &dummy, create, 1);
-	if (!*errp && buffer_mapped(&dummy)) {
+	*errp = ext3_get_blocks_handle(handle, inode, block, 1, &dummy, create, 1);
+	if ((*errp == 1 ) && buffer_mapped(&dummy)) {
 		struct buffer_head *bh;
 		bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
 		if (!bh) {
diff -urN oldtree/fs/select.c newtree/fs/select.c
--- oldtree/fs/select.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/fs/select.c	2006-01-29 11:18:10.837641224 +0000
@@ -29,12 +29,6 @@
 #define ROUND_UP(x,y) (((x)+(y)-1)/(y))
 #define DEFAULT_POLLMASK (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)
 
-struct poll_table_entry {
-	struct file * filp;
-	wait_queue_t wait;
-	wait_queue_head_t * wait_address;
-};
-
 struct poll_table_page {
 	struct poll_table_page * next;
 	struct poll_table_entry * entry;
@@ -64,13 +58,23 @@
 	init_poll_funcptr(&pwq->pt, __pollwait);
 	pwq->error = 0;
 	pwq->table = NULL;
+	pwq->inline_index = 0;
 }
 
 EXPORT_SYMBOL(poll_initwait);
 
+static void free_poll_entry(struct poll_table_entry *entry)
+{
+	remove_wait_queue(entry->wait_address,&entry->wait);
+	fput(entry->filp);
+}
+
 void poll_freewait(struct poll_wqueues *pwq)
 {
 	struct poll_table_page * p = pwq->table;
+	int i;
+	for (i = 0; i < pwq->inline_index; i++)
+		free_poll_entry(pwq->inline_entries + i);
 	while (p) {
 		struct poll_table_entry * entry;
 		struct poll_table_page *old;
@@ -78,8 +82,7 @@
 		entry = p->entry;
 		do {
 			entry--;
-			remove_wait_queue(entry->wait_address,&entry->wait);
-			fput(entry->filp);
+			free_poll_entry(entry);
 		} while (entry > p->entries);
 		old = p;
 		p = p->next;
@@ -89,12 +92,14 @@
 
 EXPORT_SYMBOL(poll_freewait);
 
-static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
-		       poll_table *_p)
+static struct poll_table_entry *poll_get_entry(poll_table *_p)
 {
 	struct poll_wqueues *p = container_of(_p, struct poll_wqueues, pt);
 	struct poll_table_page *table = p->table;
 
+	if (p->inline_index < N_INLINE_POLL_ENTRIES)
+		return p->inline_entries + p->inline_index++;
+
 	if (!table || POLL_TABLE_FULL(table)) {
 		struct poll_table_page *new_table;
 
@@ -102,7 +107,7 @@
 		if (!new_table) {
 			p->error = -ENOMEM;
 			__set_current_state(TASK_RUNNING);
-			return;
+			return NULL;
 		}
 		new_table->entry = new_table->entries;
 		new_table->next = table;
@@ -110,16 +115,21 @@
 		table = new_table;
 	}
 
-	/* Add a new entry */
-	{
-		struct poll_table_entry * entry = table->entry;
-		table->entry = entry+1;
-	 	get_file(filp);
-	 	entry->filp = filp;
-		entry->wait_address = wait_address;
-		init_waitqueue_entry(&entry->wait, current);
-		add_wait_queue(wait_address,&entry->wait);
-	}
+	return table->entry++;
+}
+
+/* Add a new entry */
+static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
+		       poll_table *p)
+{
+	struct poll_table_entry *entry = poll_get_entry(p);
+	if (!entry)
+		return;
+	get_file(filp);
+	entry->filp = filp;
+	entry->wait_address = wait_address;
+	init_waitqueue_entry(&entry->wait, current);
+	add_wait_queue(wait_address,&entry->wait);
 }
 
 #define FDS_IN(fds, n)		(fds->in + n)
@@ -274,16 +284,6 @@
 	return retval;
 }
 
-static void *select_bits_alloc(int size)
-{
-	return kmalloc(6 * size, GFP_KERNEL);
-}
-
-static void select_bits_free(void *bits, int size)
-{
-	kfree(bits);
-}
-
 /*
  * We can actually return ERESTARTSYS instead of EINTR, but I'd
  * like to be certain this leads to no problems. So I return
@@ -303,6 +303,8 @@
 	long timeout;
 	int ret, size, max_fdset;
 	struct fdtable *fdt;
+	/* Allocate small arguments on the stack to save memory and be faster */
+	char stack_fds[SELECT_STACK_ALLOC];
 
 	timeout = MAX_SCHEDULE_TIMEOUT;
 	if (tvp) {
@@ -344,7 +346,10 @@
 	 */
 	ret = -ENOMEM;
 	size = FDS_BYTES(n);
-	bits = select_bits_alloc(size);
+	if (6*size < SELECT_STACK_ALLOC)
+		bits = stack_fds;
+	else
+		bits = kmalloc(6 * size, GFP_KERNEL);
 	if (!bits)
 		goto out_nofds;
 	fds.in      = (unsigned long *)  bits;
@@ -390,7 +395,8 @@
 		ret = -EFAULT;
 
 out:
-	select_bits_free(bits, size);
+	if (bits != stack_fds)
+		kfree(bits);
 out_nofds:
 	return ret;
 }
@@ -464,6 +470,8 @@
 	return count;
 }
 
+#define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list))  / sizeof(struct pollfd))
+
 asmlinkage long sys_poll(struct pollfd __user * ufds, unsigned int nfds, long timeout)
 {
 	struct poll_wqueues table;
@@ -473,6 +481,9 @@
  	struct poll_list *walk;
 	struct fdtable *fdt;
 	int max_fdset;
+	/* Allocate small arguments on the stack to save memory and be faster */
+	char stack_pps[POLL_STACK_ALLOC];
+	struct poll_list *stack_pp = NULL;
 
 	/* Do a sanity check on nfds ... */
 	rcu_read_lock();
@@ -498,14 +509,23 @@
 	err = -ENOMEM;
 	while(i!=0) {
 		struct poll_list *pp;
-		pp = kmalloc(sizeof(struct poll_list)+
-				sizeof(struct pollfd)*
-				(i>POLLFD_PER_PAGE?POLLFD_PER_PAGE:i),
-					GFP_KERNEL);
-		if(pp==NULL)
-			goto out_fds;
+		int num, size;
+		if (stack_pp == NULL)
+			num = N_STACK_PPS;
+		else
+			num = POLLFD_PER_PAGE;
+		if (num > i)
+			num = i;
+		size = sizeof(struct poll_list) + sizeof(struct pollfd)*num;
+		if (!stack_pp)
+			stack_pp = pp = (struct poll_list *)stack_pps;
+		else {
+			pp = kmalloc(size, GFP_KERNEL);
+			if (!pp)
+				goto out_fds;
+		}
 		pp->next=NULL;
-		pp->len = (i>POLLFD_PER_PAGE?POLLFD_PER_PAGE:i);
+		pp->len = num;
 		if (head == NULL)
 			head = pp;
 		else
@@ -513,7 +533,7 @@
 
 		walk = pp;
 		if (copy_from_user(pp->entries, ufds + nfds-i, 
-				sizeof(struct pollfd)*pp->len)) {
+				sizeof(struct pollfd)*num)) {
 			err = -EFAULT;
 			goto out_fds;
 		}
@@ -541,7 +561,8 @@
 	walk = head;
 	while(walk!=NULL) {
 		struct poll_list *pp = walk->next;
-		kfree(walk);
+		if (walk != stack_pp)
+			kfree(walk);
 		walk = pp;
 	}
 	poll_freewait(&table);
diff -urN oldtree/fs/smbfs/dir.c newtree/fs/smbfs/dir.c
--- oldtree/fs/smbfs/dir.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/fs/smbfs/dir.c	2006-01-29 11:18:20.616154664 +0000
@@ -209,6 +209,8 @@
 	ctl.valid  = 1;
 read_really:
 	result = server->ops->readdir(filp, dirent, filldir, &ctl);
+	if (result == -ERESTARTSYS && page)
+		ClearPageUptodate(page);
 	if (ctl.idx == -1)
 		goto invalid_cache;	/* retry */
 	ctl.head.end = ctl.fpos - 1;
@@ -217,7 +219,8 @@
 	if (page) {
 		cache->head = ctl.head;
 		kunmap(page);
-		SetPageUptodate(page);
+		if (result != -ERESTARTSYS)
+			SetPageUptodate(page);
 		unlock_page(page);
 		page_cache_release(page);
 	}
diff -urN oldtree/include/asm-i386/uaccess.h newtree/include/asm-i386/uaccess.h
--- oldtree/include/asm-i386/uaccess.h	2006-01-28 19:08:42.000000000 +0000
+++ newtree/include/asm-i386/uaccess.h	2006-01-29 11:18:20.621153904 +0000
@@ -389,6 +389,8 @@
 				const void *from, unsigned long n);
 unsigned long __must_check __copy_from_user_ll(void *to,
 				const void __user *from, unsigned long n);
+unsigned long __must_check __copy_from_user_ll_nocache(void *to,
+				const void __user *from, unsigned long n);
 
 /*
  * Here we special-case 1, 2 and 4-byte copy_*_user invocations.  On a fault
@@ -477,12 +479,43 @@
 	return __copy_from_user_ll(to, from, n);
 }
 
+#define ARCH_HAS_NOCACHE_UACCESS
+
+static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
+				const void __user *from, unsigned long n)
+{
+	if (__builtin_constant_p(n)) {
+		unsigned long ret;
+
+		switch (n) {
+		case 1:
+			__get_user_size(*(u8 *)to, from, 1, ret, 1);
+			return ret;
+		case 2:
+			__get_user_size(*(u16 *)to, from, 2, ret, 2);
+			return ret;
+		case 4:
+			__get_user_size(*(u32 *)to, from, 4, ret, 4);
+			return ret;
+		}
+	}
+	return __copy_from_user_ll_nocache(to, from, n);
+}
+
 static inline unsigned long
 __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
        might_sleep();
        return __copy_from_user_inatomic(to, from, n);
 }
+
+static inline unsigned long
+__copy_from_user_nocache(void *to, const void __user *from, unsigned long n)
+{
+       might_sleep();
+       return __copy_from_user_inatomic_nocache(to, from, n);
+}
+
 unsigned long __must_check copy_to_user(void __user *to,
 				const void *from, unsigned long n);
 unsigned long __must_check copy_from_user(void *to,
diff -urN oldtree/include/linux/cdrom.h newtree/include/linux/cdrom.h
--- oldtree/include/linux/cdrom.h	2006-01-28 19:08:42.000000000 +0000
+++ newtree/include/linux/cdrom.h	2006-01-29 11:18:20.612155272 +0000
@@ -378,7 +378,6 @@
 #define CDC_MEDIA_CHANGED 	0x80    /* media changed */
 #define CDC_PLAY_AUDIO		0x100   /* audio functions */
 #define CDC_RESET               0x200   /* hard reset device */
-#define CDC_IOCTLS              0x400   /* driver has non-standard ioctls */
 #define CDC_DRIVE_STATUS        0x800   /* driver implements drive status */
 #define CDC_GENERIC_PACKET	0x1000	/* driver implements generic packets */
 #define CDC_CD_R		0x2000	/* drive is a CD-R */
@@ -974,9 +973,7 @@
 	int (*reset) (struct cdrom_device_info *);
 	/* play stuff */
 	int (*audio_ioctl) (struct cdrom_device_info *,unsigned int, void *);
-	/* dev-specific */
- 	int (*dev_ioctl) (struct cdrom_device_info *,
-			  unsigned int, unsigned long);
+
 /* driver specifications */
 	const int capability;   /* capability flags */
 	int n_minors;           /* number of active minor devices */
diff -urN oldtree/include/linux/ext3_fs.h newtree/include/linux/ext3_fs.h
--- oldtree/include/linux/ext3_fs.h	2006-01-28 19:08:42.000000000 +0000
+++ newtree/include/linux/ext3_fs.h	2006-01-29 11:18:10.838641072 +0000
@@ -36,7 +36,8 @@
  * Define EXT3_RESERVATION to reserve data blocks for expanding files
  */
 #define EXT3_DEFAULT_RESERVE_BLOCKS     8
-#define EXT3_MAX_RESERVE_BLOCKS         1024
+/*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */
+#define EXT3_MAX_RESERVE_BLOCKS         1027
 #define EXT3_RESERVE_WINDOW_NOT_ALLOCATED 0
 /*
  * Always enable hashed directories
@@ -732,6 +733,8 @@
 extern int ext3_bg_has_super(struct super_block *sb, int group);
 extern unsigned long ext3_bg_num_gdb(struct super_block *sb, int group);
 extern int ext3_new_block (handle_t *, struct inode *, unsigned long, int *);
+extern int ext3_new_blocks (handle_t *, struct inode *, unsigned long,
+			unsigned long *, int *);
 extern void ext3_free_blocks (handle_t *, struct inode *, unsigned long,
 			      unsigned long);
 extern void ext3_free_blocks_sb (handle_t *, struct super_block *,
@@ -772,9 +775,12 @@
 
 
 /* inode.c */
-extern int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int);
-extern struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
-extern struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
+int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int);
+struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
+struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
+int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
+	sector_t iblock, unsigned long maxblocks, struct buffer_head *bh_result,
+	int create, int extend_disksize);
 
 extern void ext3_read_inode (struct inode *);
 extern int  ext3_write_inode (struct inode *, int);
diff -urN oldtree/include/linux/init.h newtree/include/linux/init.h
--- oldtree/include/linux/init.h	2006-01-28 19:08:42.000000000 +0000
+++ newtree/include/linux/init.h	2006-01-29 11:18:20.626153144 +0000
@@ -241,6 +241,18 @@
 #define __cpuexitdata	__exitdata
 #endif
 
+#ifdef CONFIG_MEMORY_HOTPLUG
+#define __meminit
+#define __meminitdata
+#define __memexit
+#define __memexitdata
+#else
+#define __meminit	__init
+#define __meminitdata __initdata
+#define __memexit __exit
+#define __memexitdata	__exitdata
+#endif
+
 /* Functions marked as __devexit may be discarded at kernel link time, depending
    on config options.  Newer versions of binutils detect references from
    retained sections to discarded sections and flag an error.  Pointers to
diff -urN oldtree/include/linux/io.h newtree/include/linux/io.h
--- oldtree/include/linux/io.h	1970-01-01 00:00:00.000000000 +0000
+++ newtree/include/linux/io.h	2006-01-29 11:18:20.569161808 +0000
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2006 PathScale, Inc.  All Rights Reserved.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _LINUX_IO_H
+#define _LINUX_IO_H
+
+#include <asm/io.h>
+
+void __iowrite32_copy(void __iomem *to, const void *from, size_t count);
+
+#endif /* _LINUX_IO_H */
diff -urN oldtree/include/linux/pci_ids.h newtree/include/linux/pci_ids.h
--- oldtree/include/linux/pci_ids.h	2006-01-28 19:08:42.000000000 +0000
+++ newtree/include/linux/pci_ids.h	2006-01-29 11:18:10.840640768 +0000
@@ -623,6 +623,7 @@
 #define PCI_DEVICE_ID_SI_965		0x0965
 #define PCI_DEVICE_ID_SI_5511		0x5511
 #define PCI_DEVICE_ID_SI_5513		0x5513
+#define PCI_DEVICE_ID_SI_5517		0x5517
 #define PCI_DEVICE_ID_SI_5518		0x5518
 #define PCI_DEVICE_ID_SI_5571		0x5571
 #define PCI_DEVICE_ID_SI_5581		0x5581
diff -urN oldtree/include/linux/pci_ids.h.orig newtree/include/linux/pci_ids.h.orig
--- oldtree/include/linux/pci_ids.h.orig	1970-01-01 00:00:00.000000000 +0000
+++ newtree/include/linux/pci_ids.h.orig	2006-01-28 19:08:42.000000000 +0000
@@ -0,0 +1,2194 @@
+/*
+ *	PCI Class, Vendor and Device IDs
+ *
+ *	Please keep sorted.
+ */
+
+/* Device classes and subclasses */
+
+#define PCI_CLASS_NOT_DEFINED		0x0000
+#define PCI_CLASS_NOT_DEFINED_VGA	0x0001
+
+#define PCI_BASE_CLASS_STORAGE		0x01
+#define PCI_CLASS_STORAGE_SCSI		0x0100
+#define PCI_CLASS_STORAGE_IDE		0x0101
+#define PCI_CLASS_STORAGE_FLOPPY	0x0102
+#define PCI_CLASS_STORAGE_IPI		0x0103
+#define PCI_CLASS_STORAGE_RAID		0x0104
+#define PCI_CLASS_STORAGE_OTHER		0x0180
+
+#define PCI_BASE_CLASS_NETWORK		0x02
+#define PCI_CLASS_NETWORK_ETHERNET	0x0200
+#define PCI_CLASS_NETWORK_TOKEN_RING	0x0201
+#define PCI_CLASS_NETWORK_FDDI		0x0202
+#define PCI_CLASS_NETWORK_ATM		0x0203
+#define PCI_CLASS_NETWORK_OTHER		0x0280
+
+#define PCI_BASE_CLASS_DISPLAY		0x03
+#define PCI_CLASS_DISPLAY_VGA		0x0300
+#define PCI_CLASS_DISPLAY_XGA		0x0301
+#define PCI_CLASS_DISPLAY_3D		0x0302
+#define PCI_CLASS_DISPLAY_OTHER		0x0380
+
+#define PCI_BASE_CLASS_MULTIMEDIA	0x04
+#define PCI_CLASS_MULTIMEDIA_VIDEO	0x0400
+#define PCI_CLASS_MULTIMEDIA_AUDIO	0x0401
+#define PCI_CLASS_MULTIMEDIA_PHONE	0x0402
+#define PCI_CLASS_MULTIMEDIA_OTHER	0x0480
+
+#define PCI_BASE_CLASS_MEMORY		0x05
+#define PCI_CLASS_MEMORY_RAM		0x0500
+#define PCI_CLASS_MEMORY_FLASH		0x0501
+#define PCI_CLASS_MEMORY_OTHER		0x0580
+
+#define PCI_BASE_CLASS_BRIDGE		0x06
+#define PCI_CLASS_BRIDGE_HOST		0x0600
+#define PCI_CLASS_BRIDGE_ISA		0x0601
+#define PCI_CLASS_BRIDGE_EISA		0x0602
+#define PCI_CLASS_BRIDGE_MC		0x0603
+#define PCI_CLASS_BRIDGE_PCI		0x0604
+#define PCI_CLASS_BRIDGE_PCMCIA		0x0605
+#define PCI_CLASS_BRIDGE_NUBUS		0x0606
+#define PCI_CLASS_BRIDGE_CARDBUS	0x0607
+#define PCI_CLASS_BRIDGE_RACEWAY	0x0608
+#define PCI_CLASS_BRIDGE_OTHER		0x0680
+
+#define PCI_BASE_CLASS_COMMUNICATION	0x07
+#define PCI_CLASS_COMMUNICATION_SERIAL	0x0700
+#define PCI_CLASS_COMMUNICATION_PARALLEL 0x0701
+#define PCI_CLASS_COMMUNICATION_MULTISERIAL 0x0702
+#define PCI_CLASS_COMMUNICATION_MODEM	0x0703
+#define PCI_CLASS_COMMUNICATION_OTHER	0x0780
+
+#define PCI_BASE_CLASS_SYSTEM		0x08
+#define PCI_CLASS_SYSTEM_PIC		0x0800
+#define PCI_CLASS_SYSTEM_PIC_IOAPIC	0x080010
+#define PCI_CLASS_SYSTEM_PIC_IOXAPIC	0x080020
+#define PCI_CLASS_SYSTEM_DMA		0x0801
+#define PCI_CLASS_SYSTEM_TIMER		0x0802
+#define PCI_CLASS_SYSTEM_RTC		0x0803
+#define PCI_CLASS_SYSTEM_PCI_HOTPLUG	0x0804
+#define PCI_CLASS_SYSTEM_OTHER		0x0880
+
+#define PCI_BASE_CLASS_INPUT		0x09
+#define PCI_CLASS_INPUT_KEYBOARD	0x0900
+#define PCI_CLASS_INPUT_PEN		0x0901
+#define PCI_CLASS_INPUT_MOUSE		0x0902
+#define PCI_CLASS_INPUT_SCANNER		0x0903
+#define PCI_CLASS_INPUT_GAMEPORT	0x0904
+#define PCI_CLASS_INPUT_OTHER		0x0980
+
+#define PCI_BASE_CLASS_DOCKING		0x0a
+#define PCI_CLASS_DOCKING_GENERIC	0x0a00
+#define PCI_CLASS_DOCKING_OTHER		0x0a80
+
+#define PCI_BASE_CLASS_PROCESSOR	0x0b
+#define PCI_CLASS_PROCESSOR_386		0x0b00
+#define PCI_CLASS_PROCESSOR_486		0x0b01
+#define PCI_CLASS_PROCESSOR_PENTIUM	0x0b02
+#define PCI_CLASS_PROCESSOR_ALPHA	0x0b10
+#define PCI_CLASS_PROCESSOR_POWERPC	0x0b20
+#define PCI_CLASS_PROCESSOR_MIPS	0x0b30
+#define PCI_CLASS_PROCESSOR_CO		0x0b40
+
+#define PCI_BASE_CLASS_SERIAL		0x0c
+#define PCI_CLASS_SERIAL_FIREWIRE	0x0c00
+#define PCI_CLASS_SERIAL_ACCESS		0x0c01
+#define PCI_CLASS_SERIAL_SSA		0x0c02
+#define PCI_CLASS_SERIAL_USB		0x0c03
+#define PCI_CLASS_SERIAL_USB_UHCI	0x0c0300
+#define PCI_CLASS_SERIAL_USB_OHCI	0x0c0310
+#define PCI_CLASS_SERIAL_USB_EHCI	0x0c0320
+#define PCI_CLASS_SERIAL_FIBER		0x0c04
+#define PCI_CLASS_SERIAL_SMBUS		0x0c05
+
+#define PCI_BASE_CLASS_INTELLIGENT	0x0e
+#define PCI_CLASS_INTELLIGENT_I2O	0x0e00
+
+#define PCI_BASE_CLASS_SATELLITE	0x0f
+#define PCI_CLASS_SATELLITE_TV		0x0f00
+#define PCI_CLASS_SATELLITE_AUDIO	0x0f01
+#define PCI_CLASS_SATELLITE_VOICE	0x0f03
+#define PCI_CLASS_SATELLITE_DATA	0x0f04
+
+#define PCI_BASE_CLASS_CRYPT		0x10
+#define PCI_CLASS_CRYPT_NETWORK		0x1000
+#define PCI_CLASS_CRYPT_ENTERTAINMENT	0x1001
+#define PCI_CLASS_CRYPT_OTHER		0x1080
+
+#define PCI_BASE_CLASS_SIGNAL_PROCESSING 0x11
+#define PCI_CLASS_SP_DPIO		0x1100
+#define PCI_CLASS_SP_OTHER		0x1180
+
+#define PCI_CLASS_OTHERS		0xff
+
+/* Vendors and devices.  Sort key: vendor first, device next. */
+
+#define PCI_VENDOR_ID_DYNALINK		0x0675
+#define PCI_DEVICE_ID_DYNALINK_IS64PH	0x1702
+
+#define PCI_VENDOR_ID_BERKOM			0x0871
+#define PCI_DEVICE_ID_BERKOM_A1T		0xffa1
+#define PCI_DEVICE_ID_BERKOM_T_CONCEPT		0xffa2
+#define PCI_DEVICE_ID_BERKOM_A4T		0xffa4
+#define PCI_DEVICE_ID_BERKOM_SCITEL_QUADRO	0xffa8
+
+#define PCI_VENDOR_ID_COMPAQ		0x0e11
+#define PCI_DEVICE_ID_COMPAQ_TOKENRING	0x0508
+#define PCI_DEVICE_ID_COMPAQ_TACHYON	0xa0fc
+#define PCI_DEVICE_ID_COMPAQ_SMART2P	0xae10
+#define PCI_DEVICE_ID_COMPAQ_NETEL100	0xae32
+#define PCI_DEVICE_ID_COMPAQ_NETEL10	0xae34
+#define PCI_DEVICE_ID_COMPAQ_TRIFLEX_IDE 0xae33
+#define PCI_DEVICE_ID_COMPAQ_NETFLEX3I	0xae35
+#define PCI_DEVICE_ID_COMPAQ_NETEL100D	0xae40
+#define PCI_DEVICE_ID_COMPAQ_NETEL100PI	0xae43
+#define PCI_DEVICE_ID_COMPAQ_NETEL100I	0xb011
+#define PCI_DEVICE_ID_COMPAQ_CISS	0xb060
+#define PCI_DEVICE_ID_COMPAQ_CISSB	0xb178
+#define PCI_DEVICE_ID_COMPAQ_CISSC	0x46
+#define PCI_DEVICE_ID_COMPAQ_THUNDER	0xf130
+#define PCI_DEVICE_ID_COMPAQ_NETFLEX3B	0xf150
+
+#define PCI_VENDOR_ID_NCR		0x1000
+#define PCI_VENDOR_ID_LSI_LOGIC		0x1000
+#define PCI_DEVICE_ID_NCR_53C810	0x0001
+#define PCI_DEVICE_ID_NCR_53C820	0x0002
+#define PCI_DEVICE_ID_NCR_53C825	0x0003
+#define PCI_DEVICE_ID_NCR_53C815	0x0004
+#define PCI_DEVICE_ID_LSI_53C810AP	0x0005
+#define PCI_DEVICE_ID_NCR_53C860	0x0006
+#define PCI_DEVICE_ID_LSI_53C1510	0x000a
+#define PCI_DEVICE_ID_NCR_53C896	0x000b
+#define PCI_DEVICE_ID_NCR_53C895	0x000c
+#define PCI_DEVICE_ID_NCR_53C885	0x000d
+#define PCI_DEVICE_ID_NCR_53C875	0x000f
+#define PCI_DEVICE_ID_NCR_53C1510	0x0010
+#define PCI_DEVICE_ID_LSI_53C895A	0x0012
+#define PCI_DEVICE_ID_LSI_53C875A	0x0013
+#define PCI_DEVICE_ID_LSI_53C1010_33	0x0020
+#define PCI_DEVICE_ID_LSI_53C1010_66	0x0021
+#define PCI_DEVICE_ID_LSI_53C1030	0x0030
+#define PCI_DEVICE_ID_LSI_1030_53C1035	0x0032
+#define PCI_DEVICE_ID_LSI_53C1035	0x0040
+#define PCI_DEVICE_ID_NCR_53C875J	0x008f
+#define PCI_DEVICE_ID_LSI_FC909		0x0621
+#define PCI_DEVICE_ID_LSI_FC929		0x0622
+#define PCI_DEVICE_ID_LSI_FC929_LAN	0x0623
+#define PCI_DEVICE_ID_LSI_FC919		0x0624
+#define PCI_DEVICE_ID_LSI_FC919_LAN	0x0625
+#define PCI_DEVICE_ID_LSI_FC929X	0x0626
+#define PCI_DEVICE_ID_LSI_FC939X	0x0642
+#define PCI_DEVICE_ID_LSI_FC949X	0x0640
+#define PCI_DEVICE_ID_LSI_FC919X	0x0628
+#define PCI_DEVICE_ID_NCR_YELLOWFIN	0x0701
+#define PCI_DEVICE_ID_LSI_61C102	0x0901
+#define PCI_DEVICE_ID_LSI_63C815	0x1000
+#define PCI_DEVICE_ID_LSI_SAS1064	0x0050
+#define PCI_DEVICE_ID_LSI_SAS1064R	0x0411
+#define PCI_DEVICE_ID_LSI_SAS1066	0x005E
+#define PCI_DEVICE_ID_LSI_SAS1068	0x0054
+#define PCI_DEVICE_ID_LSI_SAS1064A	0x005C
+#define PCI_DEVICE_ID_LSI_SAS1064E	0x0056
+#define PCI_DEVICE_ID_LSI_SAS1066E	0x005A
+#define PCI_DEVICE_ID_LSI_SAS1068E	0x0058
+#define PCI_DEVICE_ID_LSI_SAS1078	0x0060
+
+#define PCI_VENDOR_ID_ATI		0x1002
+/* Mach64 */
+#define PCI_DEVICE_ID_ATI_68800		0x4158
+#define PCI_DEVICE_ID_ATI_215CT222	0x4354
+#define PCI_DEVICE_ID_ATI_210888CX	0x4358
+#define PCI_DEVICE_ID_ATI_215ET222	0x4554
+/* Mach64 / Rage */
+#define PCI_DEVICE_ID_ATI_215GB		0x4742
+#define PCI_DEVICE_ID_ATI_215GD		0x4744
+#define PCI_DEVICE_ID_ATI_215GI		0x4749
+#define PCI_DEVICE_ID_ATI_215GP		0x4750
+#define PCI_DEVICE_ID_ATI_215GQ		0x4751
+#define PCI_DEVICE_ID_ATI_215XL		0x4752
+#define PCI_DEVICE_ID_ATI_215GT		0x4754
+#define PCI_DEVICE_ID_ATI_215GTB	0x4755
+#define PCI_DEVICE_ID_ATI_215_IV	0x4756
+#define PCI_DEVICE_ID_ATI_215_IW	0x4757
+#define PCI_DEVICE_ID_ATI_215_IZ	0x475A
+#define PCI_DEVICE_ID_ATI_210888GX	0x4758
+#define PCI_DEVICE_ID_ATI_215_LB	0x4c42
+#define PCI_DEVICE_ID_ATI_215_LD	0x4c44
+#define PCI_DEVICE_ID_ATI_215_LG	0x4c47
+#define PCI_DEVICE_ID_ATI_215_LI	0x4c49
+#define PCI_DEVICE_ID_ATI_215_LM	0x4c4D
+#define PCI_DEVICE_ID_ATI_215_LN	0x4c4E
+#define PCI_DEVICE_ID_ATI_215_LR	0x4c52
+#define PCI_DEVICE_ID_ATI_215_LS	0x4c53
+#define PCI_DEVICE_ID_ATI_264_LT	0x4c54
+/* Mach64 VT */
+#define PCI_DEVICE_ID_ATI_264VT		0x5654
+#define PCI_DEVICE_ID_ATI_264VU		0x5655
+#define PCI_DEVICE_ID_ATI_264VV		0x5656
+/* Rage128 GL */
+#define PCI_DEVICE_ID_ATI_RAGE128_RE	0x5245
+#define PCI_DEVICE_ID_ATI_RAGE128_RF	0x5246
+#define PCI_DEVICE_ID_ATI_RAGE128_RG	0x5247
+/* Rage128 VR */
+#define PCI_DEVICE_ID_ATI_RAGE128_RK	0x524b
+#define PCI_DEVICE_ID_ATI_RAGE128_RL	0x524c
+#define PCI_DEVICE_ID_ATI_RAGE128_SE	0x5345
+#define PCI_DEVICE_ID_ATI_RAGE128_SF	0x5346
+#define PCI_DEVICE_ID_ATI_RAGE128_SG	0x5347
+#define PCI_DEVICE_ID_ATI_RAGE128_SH	0x5348
+#define PCI_DEVICE_ID_ATI_RAGE128_SK	0x534b
+#define PCI_DEVICE_ID_ATI_RAGE128_SL	0x534c
+#define PCI_DEVICE_ID_ATI_RAGE128_SM	0x534d
+#define PCI_DEVICE_ID_ATI_RAGE128_SN	0x534e
+/* Rage128 Ultra */
+#define PCI_DEVICE_ID_ATI_RAGE128_TF	0x5446
+#define PCI_DEVICE_ID_ATI_RAGE128_TL	0x544c
+#define PCI_DEVICE_ID_ATI_RAGE128_TR	0x5452
+#define PCI_DEVICE_ID_ATI_RAGE128_TS	0x5453
+#define PCI_DEVICE_ID_ATI_RAGE128_TT	0x5454
+#define PCI_DEVICE_ID_ATI_RAGE128_TU	0x5455
+/* Rage128 M3 */
+#define PCI_DEVICE_ID_ATI_RAGE128_LE	0x4c45
+#define PCI_DEVICE_ID_ATI_RAGE128_LF	0x4c46
+/* Rage128 M4 */
+#define PCI_DEVICE_ID_ATI_RAGE128_MF    0x4d46
+#define PCI_DEVICE_ID_ATI_RAGE128_ML    0x4d4c
+/* Rage128 Pro GL */
+#define PCI_DEVICE_ID_ATI_RAGE128_PA	0x5041
+#define PCI_DEVICE_ID_ATI_RAGE128_PB	0x5042
+#define PCI_DEVICE_ID_ATI_RAGE128_PC	0x5043
+#define PCI_DEVICE_ID_ATI_RAGE128_PD	0x5044
+#define PCI_DEVICE_ID_ATI_RAGE128_PE	0x5045
+#define PCI_DEVICE_ID_ATI_RAGE128_PF	0x5046
+/* Rage128 Pro VR */
+#define PCI_DEVICE_ID_ATI_RAGE128_PG	0x5047
+#define PCI_DEVICE_ID_ATI_RAGE128_PH	0x5048
+#define PCI_DEVICE_ID_ATI_RAGE128_PI	0x5049
+#define PCI_DEVICE_ID_ATI_RAGE128_PJ	0x504A
+#define PCI_DEVICE_ID_ATI_RAGE128_PK	0x504B
+#define PCI_DEVICE_ID_ATI_RAGE128_PL	0x504C
+#define PCI_DEVICE_ID_ATI_RAGE128_PM	0x504D
+#define PCI_DEVICE_ID_ATI_RAGE128_PN	0x504E
+#define PCI_DEVICE_ID_ATI_RAGE128_PO	0x504F
+#define PCI_DEVICE_ID_ATI_RAGE128_PP	0x5050
+#define PCI_DEVICE_ID_ATI_RAGE128_PQ	0x5051
+#define PCI_DEVICE_ID_ATI_RAGE128_PR	0x5052
+#define PCI_DEVICE_ID_ATI_RAGE128_PS	0x5053
+#define PCI_DEVICE_ID_ATI_RAGE128_PT	0x5054
+#define PCI_DEVICE_ID_ATI_RAGE128_PU	0x5055
+#define PCI_DEVICE_ID_ATI_RAGE128_PV	0x5056
+#define PCI_DEVICE_ID_ATI_RAGE128_PW	0x5057
+#define PCI_DEVICE_ID_ATI_RAGE128_PX	0x5058
+/* Rage128 M4 */
+/* Radeon R100 */
+#define PCI_DEVICE_ID_ATI_RADEON_QD	0x5144
+#define PCI_DEVICE_ID_ATI_RADEON_QE	0x5145
+#define PCI_DEVICE_ID_ATI_RADEON_QF	0x5146
+#define PCI_DEVICE_ID_ATI_RADEON_QG	0x5147
+/* Radeon RV100 (VE) */
+#define PCI_DEVICE_ID_ATI_RADEON_QY	0x5159
+#define PCI_DEVICE_ID_ATI_RADEON_QZ	0x515a
+/* Radeon R200 (8500) */
+#define PCI_DEVICE_ID_ATI_RADEON_QL	0x514c
+#define PCI_DEVICE_ID_ATI_RADEON_QN	0x514e
+#define PCI_DEVICE_ID_ATI_RADEON_QO	0x514f
+#define PCI_DEVICE_ID_ATI_RADEON_Ql	0x516c
+#define PCI_DEVICE_ID_ATI_RADEON_BB	0x4242
+/* Radeon R200 (9100) */
+#define PCI_DEVICE_ID_ATI_RADEON_QM	0x514d
+/* Radeon RV200 (7500) */
+#define PCI_DEVICE_ID_ATI_RADEON_QW	0x5157
+#define PCI_DEVICE_ID_ATI_RADEON_QX	0x5158
+/* Radeon NV-100 */
+/* Radeon RV250 (9000) */
+#define PCI_DEVICE_ID_ATI_RADEON_Id	0x4964
+#define PCI_DEVICE_ID_ATI_RADEON_Ie	0x4965
+#define PCI_DEVICE_ID_ATI_RADEON_If	0x4966
+#define PCI_DEVICE_ID_ATI_RADEON_Ig	0x4967
+/* Radeon RV280 (9200) */
+#define PCI_DEVICE_ID_ATI_RADEON_Ya	0x5961
+#define PCI_DEVICE_ID_ATI_RADEON_Yd	0x5964
+/* Radeon R300 (9500) */
+/* Radeon R300 (9700) */
+#define PCI_DEVICE_ID_ATI_RADEON_ND	0x4e44
+#define PCI_DEVICE_ID_ATI_RADEON_NE	0x4e45
+#define PCI_DEVICE_ID_ATI_RADEON_NF	0x4e46
+#define PCI_DEVICE_ID_ATI_RADEON_NG	0x4e47
+/* Radeon R350 (9800) */
+/* Radeon RV350 (9600) */
+/* Radeon M6 */
+#define PCI_DEVICE_ID_ATI_RADEON_LY	0x4c59
+#define PCI_DEVICE_ID_ATI_RADEON_LZ	0x4c5a
+/* Radeon M7 */
+#define PCI_DEVICE_ID_ATI_RADEON_LW	0x4c57
+#define PCI_DEVICE_ID_ATI_RADEON_LX	0x4c58
+/* Radeon M9 */
+#define PCI_DEVICE_ID_ATI_RADEON_Ld	0x4c64
+#define PCI_DEVICE_ID_ATI_RADEON_Le	0x4c65
+#define PCI_DEVICE_ID_ATI_RADEON_Lf	0x4c66
+#define PCI_DEVICE_ID_ATI_RADEON_Lg	0x4c67
+/* Radeon */
+/* RadeonIGP */
+#define PCI_DEVICE_ID_ATI_RS100		0xcab0
+#define PCI_DEVICE_ID_ATI_RS200		0xcab2
+#define PCI_DEVICE_ID_ATI_RS200_B	0xcbb2
+#define PCI_DEVICE_ID_ATI_RS250		0xcab3
+#define PCI_DEVICE_ID_ATI_RS300_100	0x5830
+#define PCI_DEVICE_ID_ATI_RS300_133	0x5831
+#define PCI_DEVICE_ID_ATI_RS300_166	0x5832
+#define PCI_DEVICE_ID_ATI_RS300_200	0x5833
+#define PCI_DEVICE_ID_ATI_RS350_100     0x7830
+#define PCI_DEVICE_ID_ATI_RS350_133     0x7831
+#define PCI_DEVICE_ID_ATI_RS350_166     0x7832
+#define PCI_DEVICE_ID_ATI_RS350_200     0x7833
+#define PCI_DEVICE_ID_ATI_RS400_100     0x5a30
+#define PCI_DEVICE_ID_ATI_RS400_133     0x5a31
+#define PCI_DEVICE_ID_ATI_RS400_166     0x5a32
+#define PCI_DEVICE_ID_ATI_RS400_200     0x5a33
+#define PCI_DEVICE_ID_ATI_RS480         0x5950
+/* ATI IXP Chipset */
+#define PCI_DEVICE_ID_ATI_IXP200_IDE	0x4349
+#define PCI_DEVICE_ID_ATI_IXP300_IDE	0x4369
+#define PCI_DEVICE_ID_ATI_IXP300_SATA   0x436e
+#define PCI_DEVICE_ID_ATI_IXP400_IDE	0x4376
+#define PCI_DEVICE_ID_ATI_IXP400_SATA   0x4379
+
+#define PCI_VENDOR_ID_VLSI		0x1004
+#define PCI_DEVICE_ID_VLSI_82C592	0x0005
+#define PCI_DEVICE_ID_VLSI_82C593	0x0006
+#define PCI_DEVICE_ID_VLSI_82C594	0x0007
+#define PCI_DEVICE_ID_VLSI_82C597	0x0009
+#define PCI_DEVICE_ID_VLSI_82C541	0x000c
+#define PCI_DEVICE_ID_VLSI_82C543	0x000d
+#define PCI_DEVICE_ID_VLSI_82C532	0x0101
+#define PCI_DEVICE_ID_VLSI_82C534	0x0102
+#define PCI_DEVICE_ID_VLSI_82C535	0x0104
+#define PCI_DEVICE_ID_VLSI_82C147	0x0105
+#define PCI_DEVICE_ID_VLSI_VAS96011	0x0702
+
+#define PCI_VENDOR_ID_ADL		0x1005
+#define PCI_DEVICE_ID_ADL_2301		0x2301
+
+#define PCI_VENDOR_ID_NS		0x100b
+#define PCI_DEVICE_ID_NS_87415		0x0002
+#define PCI_DEVICE_ID_NS_87560_LIO	0x000e
+#define PCI_DEVICE_ID_NS_87560_USB	0x0012
+#define PCI_DEVICE_ID_NS_83815		0x0020
+#define PCI_DEVICE_ID_NS_83820		0x0022
+#define PCI_DEVICE_ID_NS_SATURN		0x0035
+#define PCI_DEVICE_ID_NS_SCx200_BRIDGE	0x0500
+#define PCI_DEVICE_ID_NS_SCx200_SMI	0x0501
+#define PCI_DEVICE_ID_NS_SCx200_IDE	0x0502
+#define PCI_DEVICE_ID_NS_SCx200_AUDIO	0x0503
+#define PCI_DEVICE_ID_NS_SCx200_VIDEO	0x0504
+#define PCI_DEVICE_ID_NS_SCx200_XBUS	0x0505
+#define PCI_DEVICE_ID_NS_SC1100_BRIDGE	0x0510
+#define PCI_DEVICE_ID_NS_SC1100_SMI	0x0511
+#define PCI_DEVICE_ID_NS_SC1100_XBUS	0x0515
+#define PCI_DEVICE_ID_NS_87410		0xd001
+#define PCI_DEVICE_ID_NS_CS5535_IDE	0x002d
+
+#define PCI_VENDOR_ID_TSENG		0x100c
+#define PCI_DEVICE_ID_TSENG_W32P_2	0x3202
+#define PCI_DEVICE_ID_TSENG_W32P_b	0x3205
+#define PCI_DEVICE_ID_TSENG_W32P_c	0x3206
+#define PCI_DEVICE_ID_TSENG_W32P_d	0x3207
+#define PCI_DEVICE_ID_TSENG_ET6000	0x3208
+
+#define PCI_VENDOR_ID_WEITEK		0x100e
+#define PCI_DEVICE_ID_WEITEK_P9000	0x9001
+#define PCI_DEVICE_ID_WEITEK_P9100	0x9100
+
+#define PCI_VENDOR_ID_DEC		0x1011
+#define PCI_DEVICE_ID_DEC_BRD		0x0001
+#define PCI_DEVICE_ID_DEC_TULIP		0x0002
+#define PCI_DEVICE_ID_DEC_TGA		0x0004
+#define PCI_DEVICE_ID_DEC_TULIP_FAST	0x0009
+#define PCI_DEVICE_ID_DEC_TGA2		0x000D
+#define PCI_DEVICE_ID_DEC_FDDI		0x000F
+#define PCI_DEVICE_ID_DEC_TULIP_PLUS	0x0014
+#define PCI_DEVICE_ID_DEC_21142		0x0019
+#define PCI_DEVICE_ID_DEC_21052		0x0021
+#define PCI_DEVICE_ID_DEC_21150		0x0022
+#define PCI_DEVICE_ID_DEC_21152		0x0024
+#define PCI_DEVICE_ID_DEC_21153		0x0025
+#define PCI_DEVICE_ID_DEC_21154		0x0026
+#define PCI_DEVICE_ID_DEC_21285		0x1065
+#define PCI_DEVICE_ID_COMPAQ_42XX	0x0046
+
+#define PCI_VENDOR_ID_CIRRUS		0x1013
+#define PCI_DEVICE_ID_CIRRUS_7548	0x0038
+#define PCI_DEVICE_ID_CIRRUS_5430	0x00a0
+#define PCI_DEVICE_ID_CIRRUS_5434_4	0x00a4
+#define PCI_DEVICE_ID_CIRRUS_5434_8	0x00a8
+#define PCI_DEVICE_ID_CIRRUS_5436	0x00ac
+#define PCI_DEVICE_ID_CIRRUS_5446	0x00b8
+#define PCI_DEVICE_ID_CIRRUS_5480	0x00bc
+#define PCI_DEVICE_ID_CIRRUS_5462	0x00d0
+#define PCI_DEVICE_ID_CIRRUS_5464	0x00d4
+#define PCI_DEVICE_ID_CIRRUS_5465	0x00d6
+#define PCI_DEVICE_ID_CIRRUS_6729	0x1100
+#define PCI_DEVICE_ID_CIRRUS_6832	0x1110
+#define PCI_DEVICE_ID_CIRRUS_7543	0x1202
+#define PCI_DEVICE_ID_CIRRUS_4610	0x6001
+#define PCI_DEVICE_ID_CIRRUS_4612	0x6003
+#define PCI_DEVICE_ID_CIRRUS_4615	0x6004
+
+#define PCI_VENDOR_ID_IBM		0x1014
+#define PCI_DEVICE_ID_IBM_TR		0x0018
+#define PCI_DEVICE_ID_IBM_TR_WAKE	0x003e
+#define PCI_DEVICE_ID_IBM_CPC710_PCI64	0x00fc
+#define PCI_DEVICE_ID_IBM_SNIPE		0x0180
+#define PCI_DEVICE_ID_IBM_CITRINE		0x028C
+#define PCI_DEVICE_ID_IBM_GEMSTONE		0xB166
+#define PCI_DEVICE_ID_IBM_OBSIDIAN		0x02BD
+#define PCI_DEVICE_ID_IBM_ICOM_DEV_ID_1	0x0031
+#define PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2	0x0219
+#define PCI_DEVICE_ID_IBM_ICOM_V2_TWO_PORTS_RVX		0x021A
+#define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM	0x0251
+#define PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL	0x252
+
+#define PCI_VENDOR_ID_COMPEX2		0x101a /* pci.ids says "AT&T GIS (NCR)" */
+#define PCI_DEVICE_ID_COMPEX2_100VG	0x0005
+
+#define PCI_VENDOR_ID_WD		0x101c
+#define PCI_DEVICE_ID_WD_90C		0xc24a
+
+#define PCI_VENDOR_ID_AMI		0x101e
+#define PCI_DEVICE_ID_AMI_MEGARAID3	0x1960
+#define PCI_DEVICE_ID_AMI_MEGARAID	0x9010
+#define PCI_DEVICE_ID_AMI_MEGARAID2	0x9060
+
+#define PCI_VENDOR_ID_AMD		0x1022
+#define PCI_DEVICE_ID_AMD_K8_NB		0x1100
+#define PCI_DEVICE_ID_AMD_LANCE		0x2000
+#define PCI_DEVICE_ID_AMD_LANCE_HOME	0x2001
+#define PCI_DEVICE_ID_AMD_SCSI		0x2020
+#define PCI_DEVICE_ID_AMD_SERENADE	0x36c0
+#define PCI_DEVICE_ID_AMD_FE_GATE_7006	0x7006
+#define PCI_DEVICE_ID_AMD_FE_GATE_7007	0x7007
+#define PCI_DEVICE_ID_AMD_FE_GATE_700C	0x700C
+#define PCI_DEVICE_ID_AMD_FE_GATE_700E	0x700E
+#define PCI_DEVICE_ID_AMD_COBRA_7401	0x7401
+#define PCI_DEVICE_ID_AMD_VIPER_7409	0x7409
+#define PCI_DEVICE_ID_AMD_VIPER_740B	0x740B
+#define PCI_DEVICE_ID_AMD_VIPER_7410	0x7410
+#define PCI_DEVICE_ID_AMD_VIPER_7411	0x7411
+#define PCI_DEVICE_ID_AMD_VIPER_7413	0x7413
+#define PCI_DEVICE_ID_AMD_VIPER_7440	0x7440
+#define PCI_DEVICE_ID_AMD_OPUS_7441	0x7441
+#define PCI_DEVICE_ID_AMD_OPUS_7443	0x7443
+#define PCI_DEVICE_ID_AMD_VIPER_7443	0x7443
+#define PCI_DEVICE_ID_AMD_OPUS_7445	0x7445
+#define PCI_DEVICE_ID_AMD_8111_LPC	0x7468
+#define PCI_DEVICE_ID_AMD_8111_IDE	0x7469
+#define PCI_DEVICE_ID_AMD_8111_SMBUS2	0x746a
+#define PCI_DEVICE_ID_AMD_8111_SMBUS	0x746b
+#define PCI_DEVICE_ID_AMD_8111_AUDIO	0x746d
+#define PCI_DEVICE_ID_AMD_8151_0	0x7454
+#define PCI_DEVICE_ID_AMD_8131_APIC     0x7450
+
+#define PCI_DEVICE_ID_AMD_CS5536_IDE	0x209A
+
+#define PCI_VENDOR_ID_TRIDENT		0x1023
+#define PCI_DEVICE_ID_TRIDENT_4DWAVE_DX	0x2000
+#define PCI_DEVICE_ID_TRIDENT_4DWAVE_NX	0x2001
+#define PCI_DEVICE_ID_TRIDENT_9320	0x9320
+#define PCI_DEVICE_ID_TRIDENT_9388	0x9388
+#define PCI_DEVICE_ID_TRIDENT_9397	0x9397
+#define PCI_DEVICE_ID_TRIDENT_939A	0x939A
+#define PCI_DEVICE_ID_TRIDENT_9520	0x9520
+#define PCI_DEVICE_ID_TRIDENT_9525	0x9525
+#define PCI_DEVICE_ID_TRIDENT_9420	0x9420
+#define PCI_DEVICE_ID_TRIDENT_9440	0x9440
+#define PCI_DEVICE_ID_TRIDENT_9660	0x9660
+#define PCI_DEVICE_ID_TRIDENT_9750	0x9750
+#define PCI_DEVICE_ID_TRIDENT_9850	0x9850
+#define PCI_DEVICE_ID_TRIDENT_9880	0x9880
+#define PCI_DEVICE_ID_TRIDENT_8400	0x8400
+#define PCI_DEVICE_ID_TRIDENT_8420	0x8420
+#define PCI_DEVICE_ID_TRIDENT_8500	0x8500
+
+#define PCI_VENDOR_ID_AI		0x1025
+#define PCI_DEVICE_ID_AI_M1435		0x1435
+
+#define PCI_VENDOR_ID_DELL		0x1028
+#define PCI_DEVICE_ID_DELL_RACIII	0x0008
+#define PCI_DEVICE_ID_DELL_RAC4		0x0012
+#define PCI_DEVICE_ID_DELL_PERC5	0x0015
+
+#define PCI_VENDOR_ID_MATROX		0x102B
+#define PCI_DEVICE_ID_MATROX_MGA_2	0x0518
+#define PCI_DEVICE_ID_MATROX_MIL	0x0519
+#define PCI_DEVICE_ID_MATROX_MYS	0x051A
+#define PCI_DEVICE_ID_MATROX_MIL_2	0x051b
+#define PCI_DEVICE_ID_MATROX_MYS_AGP	0x051e
+#define PCI_DEVICE_ID_MATROX_MIL_2_AGP	0x051f
+#define PCI_DEVICE_ID_MATROX_MGA_IMP	0x0d10
+#define PCI_DEVICE_ID_MATROX_G100_MM	0x1000
+#define PCI_DEVICE_ID_MATROX_G100_AGP	0x1001
+#define PCI_DEVICE_ID_MATROX_G200_PCI	0x0520
+#define PCI_DEVICE_ID_MATROX_G200_AGP	0x0521
+#define	PCI_DEVICE_ID_MATROX_G400	0x0525
+#define PCI_DEVICE_ID_MATROX_G550	0x2527
+#define PCI_DEVICE_ID_MATROX_VIA	0x4536
+
+#define PCI_VENDOR_ID_CT		0x102c
+#define PCI_DEVICE_ID_CT_69000		0x00c0
+#define PCI_DEVICE_ID_CT_65545		0x00d8
+#define PCI_DEVICE_ID_CT_65548		0x00dc
+#define PCI_DEVICE_ID_CT_65550		0x00e0
+#define PCI_DEVICE_ID_CT_65554		0x00e4
+#define PCI_DEVICE_ID_CT_65555		0x00e5
+
+#define PCI_VENDOR_ID_MIRO		0x1031
+#define PCI_DEVICE_ID_MIRO_36050	0x5601
+#define PCI_DEVICE_ID_MIRO_DC10PLUS	0x7efe
+#define PCI_DEVICE_ID_MIRO_DC30PLUS	0xd801
+
+#define PCI_VENDOR_ID_NEC		0x1033
+#define PCI_DEVICE_ID_NEC_CBUS_1	0x0001 /* PCI-Cbus Bridge */
+#define PCI_DEVICE_ID_NEC_LOCAL		0x0002 /* Local Bridge */
+#define PCI_DEVICE_ID_NEC_ATM		0x0003 /* ATM LAN Controller */
+#define PCI_DEVICE_ID_NEC_R4000		0x0004 /* R4000 Bridge */
+#define PCI_DEVICE_ID_NEC_486		0x0005 /* 486 Like Peripheral Bus Bridge */
+#define PCI_DEVICE_ID_NEC_ACCEL_1	0x0006 /* Graphic Accelerator */
+#define PCI_DEVICE_ID_NEC_UXBUS		0x0007 /* UX-Bus Bridge */
+#define PCI_DEVICE_ID_NEC_ACCEL_2	0x0008 /* Graphic Accelerator */
+#define PCI_DEVICE_ID_NEC_GRAPH		0x0009 /* PCI-CoreGraph Bridge */
+#define PCI_DEVICE_ID_NEC_VL		0x0016 /* PCI-VL Bridge */
+#define PCI_DEVICE_ID_NEC_STARALPHA2	0x002c /* STAR ALPHA2 */
+#define PCI_DEVICE_ID_NEC_CBUS_2	0x002d /* PCI-Cbus Bridge */
+#define PCI_DEVICE_ID_NEC_USB		0x0035 /* PCI-USB Host */
+#define PCI_DEVICE_ID_NEC_CBUS_3	0x003b
+#define PCI_DEVICE_ID_NEC_NAPCCARD	0x003e
+#define PCI_DEVICE_ID_NEC_PCX2		0x0046 /* PowerVR */
+#define PCI_DEVICE_ID_NEC_NILE4		0x005a
+#define PCI_DEVICE_ID_NEC_VRC5476       0x009b
+#define PCI_DEVICE_ID_NEC_VRC4173	0x00a5
+#define PCI_DEVICE_ID_NEC_VRC5477_AC97  0x00a6
+#define PCI_DEVICE_ID_NEC_PC9821CS01    0x800c /* PC-9821-CS01 */
+#define PCI_DEVICE_ID_NEC_PC9821NRB06   0x800d /* PC-9821NR-B06 */
+
+#define PCI_VENDOR_ID_FD		0x1036
+#define PCI_DEVICE_ID_FD_36C70		0x0000
+
+#define PCI_VENDOR_ID_SI		0x1039
+#define PCI_DEVICE_ID_SI_5591_AGP	0x0001
+#define PCI_DEVICE_ID_SI_6202		0x0002
+#define PCI_DEVICE_ID_SI_503		0x0008
+#define PCI_DEVICE_ID_SI_ACPI		0x0009
+#define PCI_DEVICE_ID_SI_SMBUS		0x0016
+#define PCI_DEVICE_ID_SI_LPC		0x0018
+#define PCI_DEVICE_ID_SI_5597_VGA	0x0200
+#define PCI_DEVICE_ID_SI_6205		0x0205
+#define PCI_DEVICE_ID_SI_501		0x0406
+#define PCI_DEVICE_ID_SI_496		0x0496
+#define PCI_DEVICE_ID_SI_300		0x0300
+#define PCI_DEVICE_ID_SI_315H		0x0310
+#define PCI_DEVICE_ID_SI_315		0x0315
+#define PCI_DEVICE_ID_SI_315PRO		0x0325
+#define PCI_DEVICE_ID_SI_530		0x0530
+#define PCI_DEVICE_ID_SI_540		0x0540
+#define PCI_DEVICE_ID_SI_550		0x0550
+#define PCI_DEVICE_ID_SI_540_VGA	0x5300
+#define PCI_DEVICE_ID_SI_550_VGA	0x5315
+#define PCI_DEVICE_ID_SI_620		0x0620
+#define PCI_DEVICE_ID_SI_630		0x0630
+#define PCI_DEVICE_ID_SI_633		0x0633
+#define PCI_DEVICE_ID_SI_635		0x0635
+#define PCI_DEVICE_ID_SI_640		0x0640
+#define PCI_DEVICE_ID_SI_645		0x0645
+#define PCI_DEVICE_ID_SI_646		0x0646
+#define PCI_DEVICE_ID_SI_648		0x0648
+#define PCI_DEVICE_ID_SI_650		0x0650
+#define PCI_DEVICE_ID_SI_651		0x0651
+#define PCI_DEVICE_ID_SI_655		0x0655
+#define PCI_DEVICE_ID_SI_661		0x0661
+#define PCI_DEVICE_ID_SI_730		0x0730
+#define PCI_DEVICE_ID_SI_733		0x0733
+#define PCI_DEVICE_ID_SI_630_VGA	0x6300
+#define PCI_DEVICE_ID_SI_735		0x0735
+#define PCI_DEVICE_ID_SI_740		0x0740
+#define PCI_DEVICE_ID_SI_741		0x0741
+#define PCI_DEVICE_ID_SI_745		0x0745
+#define PCI_DEVICE_ID_SI_746		0x0746
+#define PCI_DEVICE_ID_SI_755		0x0755
+#define PCI_DEVICE_ID_SI_760		0x0760
+#define PCI_DEVICE_ID_SI_900		0x0900
+#define PCI_DEVICE_ID_SI_961		0x0961
+#define PCI_DEVICE_ID_SI_962		0x0962
+#define PCI_DEVICE_ID_SI_963		0x0963
+#define PCI_DEVICE_ID_SI_965		0x0965
+#define PCI_DEVICE_ID_SI_5511		0x5511
+#define PCI_DEVICE_ID_SI_5513		0x5513
+#define PCI_DEVICE_ID_SI_5518		0x5518
+#define PCI_DEVICE_ID_SI_5571		0x5571
+#define PCI_DEVICE_ID_SI_5581		0x5581
+#define PCI_DEVICE_ID_SI_5582		0x5582
+#define PCI_DEVICE_ID_SI_5591		0x5591
+#define PCI_DEVICE_ID_SI_5596		0x5596
+#define PCI_DEVICE_ID_SI_5597		0x5597
+#define PCI_DEVICE_ID_SI_5598		0x5598
+#define PCI_DEVICE_ID_SI_5600		0x5600
+#define PCI_DEVICE_ID_SI_7012		0x7012
+#define PCI_DEVICE_ID_SI_7013		0x7013
+#define PCI_DEVICE_ID_SI_7016		0x7016
+#define PCI_DEVICE_ID_SI_7018		0x7018
+
+#define PCI_VENDOR_ID_HP		0x103c
+#define PCI_DEVICE_ID_HP_VISUALIZE_EG	0x1005
+#define PCI_DEVICE_ID_HP_VISUALIZE_FX6	0x1006
+#define PCI_DEVICE_ID_HP_VISUALIZE_FX4	0x1008
+#define PCI_DEVICE_ID_HP_VISUALIZE_FX2	0x100a
+#define PCI_DEVICE_ID_HP_TACHYON	0x1028
+#define PCI_DEVICE_ID_HP_TACHLITE	0x1029
+#define PCI_DEVICE_ID_HP_J2585A		0x1030
+#define PCI_DEVICE_ID_HP_J2585B		0x1031
+#define PCI_DEVICE_ID_HP_J2973A		0x1040
+#define PCI_DEVICE_ID_HP_J2970A		0x1042
+#define PCI_DEVICE_ID_HP_DIVA		0x1048
+#define PCI_DEVICE_ID_HP_DIVA_TOSCA1	0x1049
+#define PCI_DEVICE_ID_HP_DIVA_TOSCA2	0x104A
+#define PCI_DEVICE_ID_HP_DIVA_MAESTRO	0x104B
+#define PCI_DEVICE_ID_HP_REO_IOC	0x10f1
+#define PCI_DEVICE_ID_HP_VISUALIZE_FXE	0x108b
+#define PCI_DEVICE_ID_HP_DIVA_HALFDOME	0x1223
+#define PCI_DEVICE_ID_HP_DIVA_KEYSTONE	0x1226
+#define PCI_DEVICE_ID_HP_DIVA_POWERBAR	0x1227
+#define PCI_DEVICE_ID_HP_ZX1_IOC	0x122a
+#define PCI_DEVICE_ID_HP_PCIX_LBA	0x122e
+#define PCI_DEVICE_ID_HP_SX1000_IOC	0x127c
+#define PCI_DEVICE_ID_HP_DIVA_EVEREST	0x1282
+#define PCI_DEVICE_ID_HP_DIVA_AUX	0x1290
+#define PCI_DEVICE_ID_HP_DIVA_RMP3	0x1301
+#define PCI_DEVICE_ID_HP_DIVA_HURRICANE	0x132a
+#define PCI_DEVICE_ID_HP_CISSA		0x3220
+#define PCI_DEVICE_ID_HP_CISSC		0x3230
+#define PCI_DEVICE_ID_HP_CISSD		0x3238
+#define PCI_DEVICE_ID_HP_ZX2_IOC	0x4031
+
+#define PCI_VENDOR_ID_PCTECH		0x1042
+#define PCI_DEVICE_ID_PCTECH_RZ1000	0x1000
+#define PCI_DEVICE_ID_PCTECH_RZ1001	0x1001
+#define PCI_DEVICE_ID_PCTECH_SAMURAI_IDE 0x3020
+
+#define PCI_VENDOR_ID_ASUSTEK		0x1043
+#define PCI_DEVICE_ID_ASUSTEK_0675	0x0675
+
+#define PCI_VENDOR_ID_DPT		0x1044
+#define PCI_DEVICE_ID_DPT		0xa400
+
+#define PCI_VENDOR_ID_OPTI		0x1045
+#define PCI_DEVICE_ID_OPTI_82C558	0xc558
+#define PCI_DEVICE_ID_OPTI_82C621	0xc621
+#define PCI_DEVICE_ID_OPTI_82C700	0xc700
+#define PCI_DEVICE_ID_OPTI_82C825	0xd568
+
+#define PCI_VENDOR_ID_ELSA		0x1048
+#define PCI_DEVICE_ID_ELSA_MICROLINK	0x1000
+#define PCI_DEVICE_ID_ELSA_QS3000	0x3000
+
+
+#define PCI_VENDOR_ID_BUSLOGIC		      0x104B
+#define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC 0x0140
+#define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER    0x1040
+#define PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT     0x8130
+
+#define PCI_VENDOR_ID_TI		0x104c
+#define PCI_DEVICE_ID_TI_TVP4020	0x3d07
+#define PCI_DEVICE_ID_TI_4450		0x8011
+#define PCI_DEVICE_ID_TI_XX21_XX11	0x8031
+#define PCI_DEVICE_ID_TI_X515		0x8036
+#define PCI_DEVICE_ID_TI_1130		0xac12
+#define PCI_DEVICE_ID_TI_1031		0xac13
+#define PCI_DEVICE_ID_TI_1131		0xac15
+#define PCI_DEVICE_ID_TI_1250		0xac16
+#define PCI_DEVICE_ID_TI_1220		0xac17
+#define PCI_DEVICE_ID_TI_1221		0xac19
+#define PCI_DEVICE_ID_TI_1210		0xac1a
+#define PCI_DEVICE_ID_TI_1450		0xac1b
+#define PCI_DEVICE_ID_TI_1225		0xac1c
+#define PCI_DEVICE_ID_TI_1251A		0xac1d
+#define PCI_DEVICE_ID_TI_1211		0xac1e
+#define PCI_DEVICE_ID_TI_1251B		0xac1f
+#define PCI_DEVICE_ID_TI_4410		0xac41
+#define PCI_DEVICE_ID_TI_4451		0xac42
+#define PCI_DEVICE_ID_TI_4510		0xac44
+#define PCI_DEVICE_ID_TI_4520		0xac46
+#define PCI_DEVICE_ID_TI_7510		0xac47
+#define PCI_DEVICE_ID_TI_7610		0xac48
+#define PCI_DEVICE_ID_TI_7410		0xac49
+#define PCI_DEVICE_ID_TI_1410		0xac50
+#define PCI_DEVICE_ID_TI_1420		0xac51
+#define PCI_DEVICE_ID_TI_1451A		0xac52
+#define PCI_DEVICE_ID_TI_1620		0xac54
+#define PCI_DEVICE_ID_TI_1520		0xac55
+#define PCI_DEVICE_ID_TI_1510		0xac56
+#define PCI_DEVICE_ID_TI_X620		0xac8d
+#define PCI_DEVICE_ID_TI_X420		0xac8e
+
+#define PCI_VENDOR_ID_SONY		0x104d
+
+
+/* Winbond have two vendor IDs! See 0x10ad as well */
+#define PCI_VENDOR_ID_WINBOND2		0x1050
+#define PCI_DEVICE_ID_WINBOND2_89C940F	0x5a5a
+#define PCI_DEVICE_ID_WINBOND2_6692	0x6692
+
+#define PCI_VENDOR_ID_ANIGMA		0x1051
+#define PCI_DEVICE_ID_ANIGMA_MC145575	0x0100
+  
+#define PCI_VENDOR_ID_EFAR		0x1055
+#define PCI_DEVICE_ID_EFAR_SLC90E66_1	0x9130
+#define PCI_DEVICE_ID_EFAR_SLC90E66_3	0x9463
+
+#define PCI_VENDOR_ID_MOTOROLA		0x1057
+#define PCI_DEVICE_ID_MOTOROLA_MPC105	0x0001
+#define PCI_DEVICE_ID_MOTOROLA_MPC106	0x0002
+#define PCI_DEVICE_ID_MOTOROLA_MPC107	0x0004
+#define PCI_DEVICE_ID_MOTOROLA_RAVEN	0x4801
+#define PCI_DEVICE_ID_MOTOROLA_FALCON	0x4802
+#define PCI_DEVICE_ID_MOTOROLA_HAWK	0x4803
+#define PCI_DEVICE_ID_MOTOROLA_HARRIER	0x480b
+#define PCI_DEVICE_ID_MOTOROLA_MPC5200	0x5803
+
+#define PCI_VENDOR_ID_PROMISE		0x105a
+#define PCI_DEVICE_ID_PROMISE_20265	0x0d30
+#define PCI_DEVICE_ID_PROMISE_20267	0x4d30
+#define PCI_DEVICE_ID_PROMISE_20246	0x4d33
+#define PCI_DEVICE_ID_PROMISE_20262	0x4d38
+#define PCI_DEVICE_ID_PROMISE_20263	0x0D38
+#define PCI_DEVICE_ID_PROMISE_20268	0x4d68
+#define PCI_DEVICE_ID_PROMISE_20269	0x4d69
+#define PCI_DEVICE_ID_PROMISE_20270	0x6268
+#define PCI_DEVICE_ID_PROMISE_20271	0x6269
+#define PCI_DEVICE_ID_PROMISE_20275	0x1275
+#define PCI_DEVICE_ID_PROMISE_20276	0x5275
+#define PCI_DEVICE_ID_PROMISE_20277	0x7275
+
+
+#define PCI_VENDOR_ID_UMC		0x1060
+#define PCI_DEVICE_ID_UMC_UM8673F	0x0101
+#define PCI_DEVICE_ID_UMC_UM8886BF	0x673a
+#define PCI_DEVICE_ID_UMC_UM8886A	0x886a
+
+
+#define PCI_VENDOR_ID_MYLEX		0x1069
+#define PCI_DEVICE_ID_MYLEX_DAC960_P	0x0001
+#define PCI_DEVICE_ID_MYLEX_DAC960_PD	0x0002
+#define PCI_DEVICE_ID_MYLEX_DAC960_PG	0x0010
+#define PCI_DEVICE_ID_MYLEX_DAC960_LA	0x0020
+#define PCI_DEVICE_ID_MYLEX_DAC960_LP	0x0050
+#define PCI_DEVICE_ID_MYLEX_DAC960_BA	0xBA56
+#define PCI_DEVICE_ID_MYLEX_DAC960_GEM	0xB166
+
+
+#define PCI_VENDOR_ID_APPLE		0x106b
+#define PCI_DEVICE_ID_APPLE_BANDIT	0x0001
+#define PCI_DEVICE_ID_APPLE_HYDRA	0x000e
+#define PCI_DEVICE_ID_APPLE_UNI_N_FW	0x0018
+#define PCI_DEVICE_ID_APPLE_UNI_N_AGP	0x0020
+#define PCI_DEVICE_ID_APPLE_UNI_N_GMAC	0x0021
+#define PCI_DEVICE_ID_APPLE_UNI_N_GMACP	0x0024
+#define PCI_DEVICE_ID_APPLE_UNI_N_AGP_P	0x0027
+#define PCI_DEVICE_ID_APPLE_UNI_N_AGP15	0x002d
+#define PCI_DEVICE_ID_APPLE_UNI_N_PCI15	0x002e
+#define PCI_DEVICE_ID_APPLE_UNI_N_GMAC2	0x0032
+#define PCI_DEVICE_ID_APPLE_UNI_N_ATA	0x0033
+#define PCI_DEVICE_ID_APPLE_UNI_N_AGP2	0x0034
+#define PCI_DEVICE_ID_APPLE_IPID_ATA100	0x003b
+#define PCI_DEVICE_ID_APPLE_K2_ATA100	0x0043
+#define PCI_DEVICE_ID_APPLE_U3_AGP	0x004b
+#define PCI_DEVICE_ID_APPLE_K2_GMAC	0x004c
+#define PCI_DEVICE_ID_APPLE_SH_ATA      0x0050
+#define PCI_DEVICE_ID_APPLE_SH_SUNGEM   0x0051
+#define PCI_DEVICE_ID_APPLE_U3L_AGP	0x0058
+#define PCI_DEVICE_ID_APPLE_U3H_AGP	0x0059
+#define PCI_DEVICE_ID_APPLE_IPID2_AGP	0x0066
+#define PCI_DEVICE_ID_APPLE_IPID2_ATA	0x0069
+#define PCI_DEVICE_ID_APPLE_IPID2_FW	0x006a
+#define PCI_DEVICE_ID_APPLE_IPID2_GMAC	0x006b
+#define PCI_DEVICE_ID_APPLE_TIGON3	0x1645
+
+#define PCI_VENDOR_ID_YAMAHA		0x1073
+#define PCI_DEVICE_ID_YAMAHA_724	0x0004
+#define PCI_DEVICE_ID_YAMAHA_724F	0x000d
+#define PCI_DEVICE_ID_YAMAHA_740	0x000a
+#define PCI_DEVICE_ID_YAMAHA_740C	0x000c
+#define PCI_DEVICE_ID_YAMAHA_744	0x0010
+#define PCI_DEVICE_ID_YAMAHA_754	0x0012
+
+
+#define PCI_VENDOR_ID_QLOGIC		0x1077
+#define PCI_DEVICE_ID_QLOGIC_ISP1020	0x1020
+#define PCI_DEVICE_ID_QLOGIC_ISP2100	0x2100
+#define PCI_DEVICE_ID_QLOGIC_ISP2200	0x2200
+#define PCI_DEVICE_ID_QLOGIC_ISP2300	0x2300
+#define PCI_DEVICE_ID_QLOGIC_ISP2312	0x2312
+#define PCI_DEVICE_ID_QLOGIC_ISP2322	0x2322
+#define PCI_DEVICE_ID_QLOGIC_ISP6312	0x6312
+#define PCI_DEVICE_ID_QLOGIC_ISP6322	0x6322
+#define PCI_DEVICE_ID_QLOGIC_ISP2422	0x2422
+#define PCI_DEVICE_ID_QLOGIC_ISP2432	0x2432
+#define PCI_DEVICE_ID_QLOGIC_ISP2512	0x2512
+#define PCI_DEVICE_ID_QLOGIC_ISP2522	0x2522
+
+#define PCI_VENDOR_ID_CYRIX		0x1078
+#define PCI_DEVICE_ID_CYRIX_5510	0x0000
+#define PCI_DEVICE_ID_CYRIX_PCI_MASTER	0x0001
+#define PCI_DEVICE_ID_CYRIX_5520	0x0002
+#define PCI_DEVICE_ID_CYRIX_5530_LEGACY	0x0100
+#define PCI_DEVICE_ID_CYRIX_5530_IDE	0x0102
+#define PCI_DEVICE_ID_CYRIX_5530_AUDIO	0x0103
+#define PCI_DEVICE_ID_CYRIX_5530_VIDEO	0x0104
+
+
+
+#define PCI_VENDOR_ID_CONTAQ		0x1080
+#define PCI_DEVICE_ID_CONTAQ_82C693	0xc693
+
+
+#define PCI_VENDOR_ID_OLICOM		0x108d
+#define PCI_DEVICE_ID_OLICOM_OC2325	0x0012
+#define PCI_DEVICE_ID_OLICOM_OC2183	0x0013
+#define PCI_DEVICE_ID_OLICOM_OC2326	0x0014
+
+#define PCI_VENDOR_ID_SUN		0x108e
+#define PCI_DEVICE_ID_SUN_EBUS		0x1000
+#define PCI_DEVICE_ID_SUN_HAPPYMEAL	0x1001
+#define PCI_DEVICE_ID_SUN_RIO_EBUS	0x1100
+#define PCI_DEVICE_ID_SUN_RIO_GEM	0x1101
+#define PCI_DEVICE_ID_SUN_RIO_1394	0x1102
+#define PCI_DEVICE_ID_SUN_RIO_USB	0x1103
+#define PCI_DEVICE_ID_SUN_GEM		0x2bad
+#define PCI_DEVICE_ID_SUN_SIMBA		0x5000
+#define PCI_DEVICE_ID_SUN_PBM		0x8000
+#define PCI_DEVICE_ID_SUN_SCHIZO	0x8001
+#define PCI_DEVICE_ID_SUN_SABRE		0xa000
+#define PCI_DEVICE_ID_SUN_HUMMINGBIRD	0xa001
+#define PCI_DEVICE_ID_SUN_TOMATILLO	0xa801
+#define PCI_DEVICE_ID_SUN_CASSINI	0xabba
+
+#define PCI_VENDOR_ID_CMD		0x1095
+#define PCI_DEVICE_ID_CMD_643		0x0643
+#define PCI_DEVICE_ID_CMD_646		0x0646
+#define PCI_DEVICE_ID_CMD_648		0x0648
+#define PCI_DEVICE_ID_CMD_649		0x0649
+
+#define PCI_DEVICE_ID_SII_680		0x0680
+#define PCI_DEVICE_ID_SII_3112		0x3112
+#define PCI_DEVICE_ID_SII_1210SA	0x0240
+
+
+#define PCI_VENDOR_ID_BROOKTREE		0x109e
+#define PCI_DEVICE_ID_BROOKTREE_878	0x0878
+#define PCI_DEVICE_ID_BROOKTREE_879	0x0879
+
+
+#define PCI_VENDOR_ID_SGI		0x10a9
+#define PCI_DEVICE_ID_SGI_IOC3		0x0003
+#define PCI_DEVICE_ID_SGI_IOC4		0x100a
+#define PCI_VENDOR_ID_SGI_LITHIUM	0x1002
+
+
+#define PCI_VENDOR_ID_WINBOND		0x10ad
+#define PCI_DEVICE_ID_WINBOND_82C105	0x0105
+#define PCI_DEVICE_ID_WINBOND_83C553	0x0565
+
+
+#define PCI_VENDOR_ID_PLX		0x10b5
+#define PCI_DEVICE_ID_PLX_R685		0x1030
+#define PCI_DEVICE_ID_PLX_ROMULUS	0x106a
+#define PCI_DEVICE_ID_PLX_SPCOM800	0x1076
+#define PCI_DEVICE_ID_PLX_1077		0x1077
+#define PCI_DEVICE_ID_PLX_SPCOM200	0x1103
+#define PCI_DEVICE_ID_PLX_DJINN_ITOO	0x1151
+#define PCI_DEVICE_ID_PLX_R753		0x1152
+#define PCI_DEVICE_ID_PLX_OLITEC	0x1187
+#define PCI_DEVICE_ID_PLX_9050		0x9050
+#define PCI_DEVICE_ID_PLX_9080		0x9080
+#define PCI_DEVICE_ID_PLX_GTEK_SERIAL2	0xa001
+
+#define PCI_VENDOR_ID_MADGE		0x10b6
+#define PCI_DEVICE_ID_MADGE_MK2		0x0002
+
+#define PCI_VENDOR_ID_3COM		0x10b7
+#define PCI_DEVICE_ID_3COM_3C985	0x0001
+#define PCI_DEVICE_ID_3COM_3C940	0x1700
+#define PCI_DEVICE_ID_3COM_3C339	0x3390
+#define PCI_DEVICE_ID_3COM_3C359	0x3590
+#define PCI_DEVICE_ID_3COM_3C940B	0x80eb
+#define PCI_DEVICE_ID_3COM_3CR990	0x9900
+#define PCI_DEVICE_ID_3COM_3CR990_TX_95	0x9902
+#define PCI_DEVICE_ID_3COM_3CR990_TX_97	0x9903
+#define PCI_DEVICE_ID_3COM_3CR990B	0x9904
+#define PCI_DEVICE_ID_3COM_3CR990_FX	0x9905
+#define PCI_DEVICE_ID_3COM_3CR990SVR95	0x9908
+#define PCI_DEVICE_ID_3COM_3CR990SVR97	0x9909
+#define PCI_DEVICE_ID_3COM_3CR990SVR	0x990a
+
+
+#define PCI_VENDOR_ID_AL		0x10b9
+#define PCI_DEVICE_ID_AL_M1533		0x1533
+#define PCI_DEVICE_ID_AL_M1535 		0x1535
+#define PCI_DEVICE_ID_AL_M1541		0x1541
+#define PCI_DEVICE_ID_AL_M1563		0x1563
+#define PCI_DEVICE_ID_AL_M1621		0x1621
+#define PCI_DEVICE_ID_AL_M1631		0x1631
+#define PCI_DEVICE_ID_AL_M1632		0x1632
+#define PCI_DEVICE_ID_AL_M1641		0x1641
+#define PCI_DEVICE_ID_AL_M1644		0x1644
+#define PCI_DEVICE_ID_AL_M1647		0x1647
+#define PCI_DEVICE_ID_AL_M1651		0x1651
+#define PCI_DEVICE_ID_AL_M1671		0x1671
+#define PCI_DEVICE_ID_AL_M1681		0x1681
+#define PCI_DEVICE_ID_AL_M1683		0x1683
+#define PCI_DEVICE_ID_AL_M1689		0x1689
+#define PCI_DEVICE_ID_AL_M5219		0x5219
+#define PCI_DEVICE_ID_AL_M5228		0x5228
+#define PCI_DEVICE_ID_AL_M5229		0x5229
+#define PCI_DEVICE_ID_AL_M5451		0x5451
+#define PCI_DEVICE_ID_AL_M7101		0x7101
+
+
+
+#define PCI_VENDOR_ID_NEOMAGIC		0x10c8
+#define PCI_DEVICE_ID_NEOMAGIC_NM256AV_AUDIO 0x8005
+#define PCI_DEVICE_ID_NEOMAGIC_NM256ZX_AUDIO 0x8006
+#define PCI_DEVICE_ID_NEOMAGIC_NM256XL_PLUS_AUDIO 0x8016
+
+
+#define PCI_VENDOR_ID_TCONRAD		0x10da
+#define PCI_DEVICE_ID_TCONRAD_TOKENRING	0x0508
+
+
+#define PCI_VENDOR_ID_NVIDIA			0x10de
+#define PCI_DEVICE_ID_NVIDIA_TNT		0x0020
+#define PCI_DEVICE_ID_NVIDIA_TNT2		0x0028
+#define PCI_DEVICE_ID_NVIDIA_UTNT2		0x0029
+#define PCI_DEVICE_ID_NVIDIA_TNT_UNKNOWN        0x002a
+#define PCI_DEVICE_ID_NVIDIA_VTNT2		0x002C
+#define PCI_DEVICE_ID_NVIDIA_UVTNT2		0x002D
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE	0x0035
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA	0x0036
+#define PCI_DEVICE_ID_NVIDIA_NVENET_10		0x0037
+#define PCI_DEVICE_ID_NVIDIA_NVENET_11		0x0038
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2	0x003e
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_ULTRA 0x0040
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800       0x0041
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_LE    0x0042
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_GT    0x0045
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_4000     0x004E
+#define PCI_DEVICE_ID_NVIDIA_NFORCE4_SMBUS	0x0052
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE	0x0053
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA	0x0054
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2	0x0055
+#define PCI_DEVICE_ID_NVIDIA_NVENET_8		0x0056
+#define PCI_DEVICE_ID_NVIDIA_NVENET_9		0x0057
+#define PCI_DEVICE_ID_NVIDIA_CK804_AUDIO	0x0059
+#define PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS	0x0064
+#define PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE	0x0065
+#define PCI_DEVICE_ID_NVIDIA_NVENET_2		0x0066
+#define PCI_DEVICE_ID_NVIDIA_MCP2_MODEM		0x0069
+#define PCI_DEVICE_ID_NVIDIA_MCP2_AUDIO		0x006a
+#define PCI_DEVICE_ID_NVIDIA_NFORCE2S_SMBUS	0x0084
+#define PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE	0x0085
+#define PCI_DEVICE_ID_NVIDIA_NVENET_4		0x0086
+#define PCI_DEVICE_ID_NVIDIA_MCP2S_MODEM	0x0089
+#define PCI_DEVICE_ID_NVIDIA_CK8_AUDIO		0x008a
+#define PCI_DEVICE_ID_NVIDIA_NVENET_5		0x008c
+#define PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA	0x008e
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_7800_GT   0x0090
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_7800_GTX	0x0091
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_7800   0x0098
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_7800_GTX 0x0099
+#define PCI_DEVICE_ID_NVIDIA_ITNT2		0x00A0
+#define PCI_DEVICE_ID_GEFORCE_6800A             0x00c1
+#define PCI_DEVICE_ID_GEFORCE_6800A_LE          0x00c2
+#define PCI_DEVICE_ID_GEFORCE_GO_6800           0x00c8
+#define PCI_DEVICE_ID_GEFORCE_GO_6800_ULTRA     0x00c9
+#define PCI_DEVICE_ID_QUADRO_FX_GO1400          0x00cc
+#define PCI_DEVICE_ID_QUADRO_FX_1400            0x00ce
+#define PCI_DEVICE_ID_NVIDIA_NFORCE3		0x00d1
+#define PCI_DEVICE_ID_NVIDIA_NFORCE3_SMBUS	0x00d4
+#define PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE	0x00d5
+#define PCI_DEVICE_ID_NVIDIA_NVENET_3		0x00d6
+#define PCI_DEVICE_ID_NVIDIA_MCP3_MODEM		0x00d9
+#define PCI_DEVICE_ID_NVIDIA_MCP3_AUDIO		0x00da
+#define PCI_DEVICE_ID_NVIDIA_NVENET_7		0x00df
+#define PCI_DEVICE_ID_NVIDIA_NFORCE3S		0x00e1
+#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA	0x00e3
+#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SMBUS	0x00e4
+#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE	0x00e5
+#define PCI_DEVICE_ID_NVIDIA_NVENET_6		0x00e6
+#define PCI_DEVICE_ID_NVIDIA_CK8S_AUDIO		0x00ea
+#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2	0x00ee
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_SDR	0x0100
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_DDR	0x0101
+#define PCI_DEVICE_ID_NVIDIA_QUADRO		0x0103
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_MX	0x0110
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_MX2	0x0111
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_GO	0x0112
+#define PCI_DEVICE_ID_NVIDIA_QUADRO2_MXR	0x0113
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6600_GT	0x0140
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6600	0x0141
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6610_XL	0x0145
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_540	0x014E
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6200	0x014F
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_GTS	0x0150
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_GTS2	0x0151
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_ULTRA	0x0152
+#define PCI_DEVICE_ID_NVIDIA_QUADRO2_PRO	0x0153
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6200_TURBOCACHE 0x0161
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6200    0x0164
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6250    0x0166
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6200_1  0x0167
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6250_1  0x0168
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_460	0x0170
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440	0x0171
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_420	0x0172
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440_SE	0x0173
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_440_GO	0x0174
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_420_GO	0x0175
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_420_GO_M32 0x0176
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_460_GO    0x0177
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_500XGL	0x0178
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_440_GO_M64 0x0179
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_200	0x017A
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_550XGL	0x017B
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_500_GOGL	0x017C
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_410_GO_M16 0x017D
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440_8X 0x0181
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440SE_8X 0x0182
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_420_8X 0x0183
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_448_GO    0x0186
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_488_GO    0x0187
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_580_XGL    0x0188
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_MAC    0x0189
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_280_NVS    0x018A
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_380_XGL    0x018B
+#define PCI_DEVICE_ID_NVIDIA_IGEFORCE2		0x01a0
+#define PCI_DEVICE_ID_NVIDIA_NFORCE		0x01a4
+#define PCI_DEVICE_ID_NVIDIA_MCP1_AUDIO		0x01b1
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_SMBUS	0x01b4
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_IDE		0x01bc
+#define PCI_DEVICE_ID_NVIDIA_MCP1_MODEM		0x01c1
+#define PCI_DEVICE_ID_NVIDIA_NVENET_1		0x01c3
+#define PCI_DEVICE_ID_NVIDIA_NFORCE2		0x01e0
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE3		0x0200
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE3_1		0x0201
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE3_2		0x0202
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_DDC		0x0203
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800B      0x0211
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800B_LE   0x0212
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800B_GT   0x0215
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4600	0x0250
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4400	0x0251
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4200	0x0253
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_900XGL	0x0258
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_750XGL	0x0259
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_700XGL	0x025B
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE	0x0265
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA	0x0266
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2	0x0267
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE	0x036E
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA	0x037E
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2	0x037F
+#define PCI_DEVICE_ID_NVIDIA_NVENET_12		0x0268
+#define PCI_DEVICE_ID_NVIDIA_NVENET_13		0x0269
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800	0x0280
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800_8X    0x0281
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800SE     0x0282
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_4200_GO       0x0286
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_980_XGL        0x0288
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_780_XGL        0x0289
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_700_GOGL       0x028C
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5800_ULTRA  0x0301
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5800        0x0302
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_2000         0x0308
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1000         0x0309
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5600_ULTRA  0x0311
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5600        0x0312
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5600SE      0x0314
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5600      0x031A
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5650      0x031B
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_GO700        0x031C
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200        0x0320
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200_ULTRA  0x0321
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200_1      0x0322
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200SE      0x0323
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5200      0x0324
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5250      0x0325
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5500        0x0326
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5100        0x0327
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5250_32   0x0328
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO_5200	    0x0329
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_NVS_280_PCI     0x032A
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_500          0x032B
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5300      0x032C
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5100      0x032D
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900_ULTRA  0x0330
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900        0x0331
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900XT      0x0332
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5950_ULTRA  0x0333
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900ZT      0x0334
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_3000         0x0338
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_700          0x033F
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700_ULTRA  0x0341
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700        0x0342
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700LE      0x0343
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700VE      0x0344
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5700_1    0x0347
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5700_2    0x0348
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_GO1000       0x034C
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1100         0x034E
+#define PCI_DEVICE_ID_NVIDIA_NVENET_14              0x0372
+#define PCI_DEVICE_ID_NVIDIA_NVENET_15              0x0373
+
+#define PCI_VENDOR_ID_IMS		0x10e0
+#define PCI_DEVICE_ID_IMS_TT128		0x9128
+#define PCI_DEVICE_ID_IMS_TT3D		0x9135
+
+
+
+
+#define PCI_VENDOR_ID_INTERG		0x10ea
+#define PCI_DEVICE_ID_INTERG_1682	0x1682
+#define PCI_DEVICE_ID_INTERG_2000	0x2000
+#define PCI_DEVICE_ID_INTERG_2010	0x2010
+#define PCI_DEVICE_ID_INTERG_5000	0x5000
+#define PCI_DEVICE_ID_INTERG_5050	0x5050
+
+#define PCI_VENDOR_ID_REALTEK		0x10ec
+#define PCI_DEVICE_ID_REALTEK_8139	0x8139
+
+#define PCI_VENDOR_ID_XILINX		0x10ee
+#define PCI_DEVICE_ID_RME_DIGI96	0x3fc0
+#define PCI_DEVICE_ID_RME_DIGI96_8	0x3fc1
+#define PCI_DEVICE_ID_RME_DIGI96_8_PRO	0x3fc2
+#define PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST 0x3fc3
+#define PCI_DEVICE_ID_XILINX_HAMMERFALL_DSP 0x3fc5
+#define PCI_DEVICE_ID_XILINX_HAMMERFALL_DSP_MADI 0x3fc6
+
+
+#define PCI_VENDOR_ID_INIT		0x1101
+
+#define PCI_VENDOR_ID_CREATIVE		0x1102 /* duplicate: ECTIVA */
+#define PCI_DEVICE_ID_CREATIVE_EMU10K1	0x0002
+
+#define PCI_VENDOR_ID_ECTIVA		0x1102 /* duplicate: CREATIVE */
+#define PCI_DEVICE_ID_ECTIVA_EV1938	0x8938
+
+#define PCI_VENDOR_ID_TTI		0x1103
+#define PCI_DEVICE_ID_TTI_HPT343	0x0003
+#define PCI_DEVICE_ID_TTI_HPT366	0x0004
+#define PCI_DEVICE_ID_TTI_HPT372	0x0005
+#define PCI_DEVICE_ID_TTI_HPT302	0x0006
+#define PCI_DEVICE_ID_TTI_HPT371	0x0007
+#define PCI_DEVICE_ID_TTI_HPT374	0x0008
+#define PCI_DEVICE_ID_TTI_HPT372N	0x0009	/* apparently a 372N variant? */
+
+#define PCI_VENDOR_ID_VIA		0x1106
+#define PCI_DEVICE_ID_VIA_8763_0	0x0198
+#define PCI_DEVICE_ID_VIA_8380_0	0x0204
+#define PCI_DEVICE_ID_VIA_3238_0	0x0238
+#define PCI_DEVICE_ID_VIA_PT880		0x0258
+#define PCI_DEVICE_ID_VIA_PX8X0_0	0x0259
+#define PCI_DEVICE_ID_VIA_3269_0	0x0269
+#define PCI_DEVICE_ID_VIA_K8T800PRO_0	0x0282
+#define PCI_DEVICE_ID_VIA_8363_0	0x0305
+#define PCI_DEVICE_ID_VIA_P4M800CE	0x0314
+#define PCI_DEVICE_ID_VIA_8371_0	0x0391
+#define PCI_DEVICE_ID_VIA_8501_0	0x0501
+#define PCI_DEVICE_ID_VIA_82C561	0x0561
+#define PCI_DEVICE_ID_VIA_82C586_1	0x0571
+#define PCI_DEVICE_ID_VIA_82C576	0x0576
+#define PCI_DEVICE_ID_VIA_82C586_0	0x0586
+#define PCI_DEVICE_ID_VIA_82C596	0x0596
+#define PCI_DEVICE_ID_VIA_82C597_0	0x0597
+#define PCI_DEVICE_ID_VIA_82C598_0	0x0598
+#define PCI_DEVICE_ID_VIA_8601_0	0x0601
+#define PCI_DEVICE_ID_VIA_8605_0	0x0605
+#define PCI_DEVICE_ID_VIA_82C686	0x0686
+#define PCI_DEVICE_ID_VIA_82C691_0	0x0691
+#define PCI_DEVICE_ID_VIA_82C576_1	0x1571
+#define PCI_DEVICE_ID_VIA_82C586_2	0x3038
+#define PCI_DEVICE_ID_VIA_82C586_3	0x3040
+#define PCI_DEVICE_ID_VIA_82C596_3	0x3050
+#define PCI_DEVICE_ID_VIA_82C596B_3	0x3051
+#define PCI_DEVICE_ID_VIA_82C686_4	0x3057
+#define PCI_DEVICE_ID_VIA_82C686_5	0x3058
+#define PCI_DEVICE_ID_VIA_8233_5	0x3059
+#define PCI_DEVICE_ID_VIA_8233_0	0x3074
+#define PCI_DEVICE_ID_VIA_8633_0	0x3091
+#define PCI_DEVICE_ID_VIA_8367_0	0x3099
+#define PCI_DEVICE_ID_VIA_8653_0	0x3101
+#define PCI_DEVICE_ID_VIA_8622		0x3102
+#define PCI_DEVICE_ID_VIA_8233C_0	0x3109
+#define PCI_DEVICE_ID_VIA_8361		0x3112
+#define PCI_DEVICE_ID_VIA_XM266		0x3116
+#define PCI_DEVICE_ID_VIA_612X		0x3119
+#define PCI_DEVICE_ID_VIA_862X_0	0x3123
+#define PCI_DEVICE_ID_VIA_8753_0	0x3128
+#define PCI_DEVICE_ID_VIA_8233A		0x3147
+#define PCI_DEVICE_ID_VIA_8703_51_0	0x3148
+#define PCI_DEVICE_ID_VIA_8237_SATA	0x3149
+#define PCI_DEVICE_ID_VIA_XN266		0x3156
+#define PCI_DEVICE_ID_VIA_6410		0x3164
+#define PCI_DEVICE_ID_VIA_8754C_0	0x3168
+#define PCI_DEVICE_ID_VIA_8235		0x3177
+#define PCI_DEVICE_ID_VIA_8385_0	0x3188
+#define PCI_DEVICE_ID_VIA_8377_0	0x3189
+#define PCI_DEVICE_ID_VIA_8378_0	0x3205
+#define PCI_DEVICE_ID_VIA_8783_0	0x3208
+#define PCI_DEVICE_ID_VIA_8237		0x3227
+#define PCI_DEVICE_ID_VIA_8251		0x3287
+#define PCI_DEVICE_ID_VIA_3296_0	0x0296
+#define PCI_DEVICE_ID_VIA_8231		0x8231
+#define PCI_DEVICE_ID_VIA_8231_4	0x8235
+#define PCI_DEVICE_ID_VIA_8365_1	0x8305
+#define PCI_DEVICE_ID_VIA_8371_1	0x8391
+#define PCI_DEVICE_ID_VIA_82C598_1	0x8598
+#define PCI_DEVICE_ID_VIA_838X_1	0xB188
+#define PCI_DEVICE_ID_VIA_83_87XX_1	0xB198
+
+#define PCI_VENDOR_ID_SIEMENS           0x110A
+#define PCI_DEVICE_ID_SIEMENS_DSCC4     0x2102
+
+
+#define PCI_VENDOR_ID_VORTEX		0x1119
+#define PCI_DEVICE_ID_VORTEX_GDT60x0	0x0000
+#define PCI_DEVICE_ID_VORTEX_GDT6000B	0x0001
+#define PCI_DEVICE_ID_VORTEX_GDT6x10	0x0002
+#define PCI_DEVICE_ID_VORTEX_GDT6x20	0x0003
+#define PCI_DEVICE_ID_VORTEX_GDT6530	0x0004
+#define PCI_DEVICE_ID_VORTEX_GDT6550	0x0005
+#define PCI_DEVICE_ID_VORTEX_GDT6x17	0x0006
+#define PCI_DEVICE_ID_VORTEX_GDT6x27	0x0007
+#define PCI_DEVICE_ID_VORTEX_GDT6537	0x0008
+#define PCI_DEVICE_ID_VORTEX_GDT6557	0x0009
+#define PCI_DEVICE_ID_VORTEX_GDT6x15	0x000a
+#define PCI_DEVICE_ID_VORTEX_GDT6x25	0x000b
+#define PCI_DEVICE_ID_VORTEX_GDT6535	0x000c
+#define PCI_DEVICE_ID_VORTEX_GDT6555	0x000d
+#define PCI_DEVICE_ID_VORTEX_GDT6x17RP	0x0100
+#define PCI_DEVICE_ID_VORTEX_GDT6x27RP	0x0101
+#define PCI_DEVICE_ID_VORTEX_GDT6537RP	0x0102
+#define PCI_DEVICE_ID_VORTEX_GDT6557RP	0x0103
+#define PCI_DEVICE_ID_VORTEX_GDT6x11RP	0x0104
+#define PCI_DEVICE_ID_VORTEX_GDT6x21RP	0x0105
+
+#define PCI_VENDOR_ID_EF		0x111a
+#define PCI_DEVICE_ID_EF_ATM_FPGA	0x0000
+#define PCI_DEVICE_ID_EF_ATM_ASIC	0x0002
+#define PCI_VENDOR_ID_EF_ATM_LANAI2	0x0003
+#define PCI_VENDOR_ID_EF_ATM_LANAIHB	0x0005
+
+#define PCI_VENDOR_ID_IDT		0x111d
+#define PCI_DEVICE_ID_IDT_IDT77201	0x0001
+
+#define PCI_VENDOR_ID_FORE		0x1127
+#define PCI_DEVICE_ID_FORE_PCA200E	0x0300
+
+
+#define PCI_VENDOR_ID_PHILIPS		0x1131
+#define PCI_DEVICE_ID_PHILIPS_SAA7146	0x7146
+#define PCI_DEVICE_ID_PHILIPS_SAA9730	0x9730
+
+#define PCI_VENDOR_ID_EICON		0x1133
+#define PCI_DEVICE_ID_EICON_DIVA20	0xe002
+#define PCI_DEVICE_ID_EICON_DIVA20_U	0xe004
+#define PCI_DEVICE_ID_EICON_DIVA201	0xe005
+#define PCI_DEVICE_ID_EICON_DIVA202	0xe00b
+#define PCI_DEVICE_ID_EICON_MAESTRA	0xe010
+#define PCI_DEVICE_ID_EICON_MAESTRAQ	0xe012
+#define PCI_DEVICE_ID_EICON_MAESTRAQ_U	0xe013
+#define PCI_DEVICE_ID_EICON_MAESTRAP	0xe014
+
+#define PCI_VENDOR_ID_ZIATECH		0x1138
+#define PCI_DEVICE_ID_ZIATECH_5550_HC	0x5550
+ 
+
+
+#define PCI_VENDOR_ID_SYSKONNECT	0x1148
+#define PCI_DEVICE_ID_SYSKONNECT_TR	0x4200
+#define PCI_DEVICE_ID_SYSKONNECT_GE	0x4300
+#define PCI_DEVICE_ID_SYSKONNECT_YU	0x4320
+#define PCI_DEVICE_ID_SYSKONNECT_9DXX	0x4400
+#define PCI_DEVICE_ID_SYSKONNECT_9MXX	0x4500
+
+
+#define PCI_VENDOR_ID_DIGI		0x114f
+#define PCI_DEVICE_ID_DIGI_DF_M_IOM2_E	0x0070
+#define PCI_DEVICE_ID_DIGI_DF_M_E	0x0071
+#define PCI_DEVICE_ID_DIGI_DF_M_IOM2_A	0x0072
+#define PCI_DEVICE_ID_DIGI_DF_M_A	0x0073
+#define PCI_DEVICE_ID_NEO_2DB9          0x00C8
+#define PCI_DEVICE_ID_NEO_2DB9PRI       0x00C9
+#define PCI_DEVICE_ID_NEO_2RJ45         0x00CA
+#define PCI_DEVICE_ID_NEO_2RJ45PRI      0x00CB
+
+
+#define PCI_VENDOR_ID_XIRCOM		0x115d
+#define PCI_DEVICE_ID_XIRCOM_RBM56G	0x0101
+#define PCI_DEVICE_ID_XIRCOM_X3201_MDM	0x0103
+
+
+#define PCI_VENDOR_ID_SERVERWORKS	  0x1166
+#define PCI_DEVICE_ID_SERVERWORKS_HE	  0x0008
+#define PCI_DEVICE_ID_SERVERWORKS_LE	  0x0009
+#define PCI_DEVICE_ID_SERVERWORKS_GCNB_LE 0x0017
+#define PCI_DEVICE_ID_SERVERWORKS_OSB4	  0x0200
+#define PCI_DEVICE_ID_SERVERWORKS_CSB5	  0x0201
+#define PCI_DEVICE_ID_SERVERWORKS_CSB6    0x0203
+#define PCI_DEVICE_ID_SERVERWORKS_OSB4IDE 0x0211
+#define PCI_DEVICE_ID_SERVERWORKS_CSB5IDE 0x0212
+#define PCI_DEVICE_ID_SERVERWORKS_CSB6IDE 0x0213
+#define PCI_DEVICE_ID_SERVERWORKS_HT1000IDE 0x0214
+#define PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2 0x0217
+#define PCI_DEVICE_ID_SERVERWORKS_CSB6LPC 0x0227
+
+#define PCI_VENDOR_ID_SBE		0x1176
+#define PCI_DEVICE_ID_SBE_WANXL100	0x0301
+#define PCI_DEVICE_ID_SBE_WANXL200	0x0302
+#define PCI_DEVICE_ID_SBE_WANXL400	0x0104
+
+#define PCI_VENDOR_ID_TOSHIBA		0x1179
+#define PCI_DEVICE_ID_TOSHIBA_PICCOLO	0x0102
+#define PCI_DEVICE_ID_TOSHIBA_PICCOLO_1	0x0103
+#define PCI_DEVICE_ID_TOSHIBA_PICCOLO_2	0x0105
+#define PCI_DEVICE_ID_TOSHIBA_TOPIC95	0x060a
+#define PCI_DEVICE_ID_TOSHIBA_TOPIC97	0x060f
+#define PCI_DEVICE_ID_TOSHIBA_TOPIC100	0x0617
+
+#define PCI_VENDOR_ID_TOSHIBA_2		0x102f
+#define PCI_DEVICE_ID_TOSHIBA_TC35815CF	0x0030
+#define PCI_DEVICE_ID_TOSHIBA_TC86C001_MISC	0x0108
+#define PCI_DEVICE_ID_TOSHIBA_SPIDER_NET 0x01b3
+
+#define PCI_VENDOR_ID_RICOH		0x1180
+#define PCI_DEVICE_ID_RICOH_RL5C465	0x0465
+#define PCI_DEVICE_ID_RICOH_RL5C466	0x0466
+#define PCI_DEVICE_ID_RICOH_RL5C475	0x0475
+#define PCI_DEVICE_ID_RICOH_RL5C476	0x0476
+#define PCI_DEVICE_ID_RICOH_RL5C478	0x0478
+
+#define PCI_VENDOR_ID_DLINK		0x1186
+#define PCI_DEVICE_ID_DLINK_DGE510T	0x4c00
+
+#define PCI_VENDOR_ID_ARTOP		0x1191
+#define PCI_DEVICE_ID_ARTOP_ATP850UF	0x0005
+#define PCI_DEVICE_ID_ARTOP_ATP860	0x0006
+#define PCI_DEVICE_ID_ARTOP_ATP860R	0x0007
+#define PCI_DEVICE_ID_ARTOP_ATP865	0x0008
+#define PCI_DEVICE_ID_ARTOP_ATP865R	0x0009
+#define PCI_DEVICE_ID_ARTOP_AEC7610	0x8002
+#define PCI_DEVICE_ID_ARTOP_AEC7612UW	0x8010
+#define PCI_DEVICE_ID_ARTOP_AEC7612U	0x8020
+#define PCI_DEVICE_ID_ARTOP_AEC7612S	0x8030
+#define PCI_DEVICE_ID_ARTOP_AEC7612D	0x8040
+#define PCI_DEVICE_ID_ARTOP_AEC7612SUW	0x8050
+#define PCI_DEVICE_ID_ARTOP_8060	0x8060
+
+#define PCI_VENDOR_ID_ZEITNET		0x1193
+#define PCI_DEVICE_ID_ZEITNET_1221	0x0001
+#define PCI_DEVICE_ID_ZEITNET_1225	0x0002
+
+
+#define PCI_VENDOR_ID_FUJITSU_ME	0x119e
+#define PCI_DEVICE_ID_FUJITSU_FS155	0x0001
+#define PCI_DEVICE_ID_FUJITSU_FS50	0x0003
+
+#define PCI_SUBVENDOR_ID_KEYSPAN	0x11a9
+#define PCI_SUBDEVICE_ID_KEYSPAN_SX2	0x5334
+
+#define PCI_VENDOR_ID_MARVELL		0x11ab
+#define PCI_DEVICE_ID_MARVELL_GT64111	0x4146
+#define PCI_DEVICE_ID_MARVELL_GT64260	0x6430
+#define PCI_DEVICE_ID_MARVELL_MV64360	0x6460
+#define PCI_DEVICE_ID_MARVELL_MV64460	0x6480
+#define PCI_DEVICE_ID_MARVELL_GT96100	0x9652
+#define PCI_DEVICE_ID_MARVELL_GT96100A	0x9653
+
+
+#define PCI_VENDOR_ID_V3		0x11b0
+#define PCI_DEVICE_ID_V3_V960		0x0001
+#define PCI_DEVICE_ID_V3_V351		0x0002
+
+
+#define PCI_VENDOR_ID_ATT		0x11c1
+#define PCI_DEVICE_ID_ATT_VENUS_MODEM	0x480
+
+
+#define PCI_VENDOR_ID_SPECIALIX		0x11cb
+#define PCI_DEVICE_ID_SPECIALIX_IO8	0x2000
+#define PCI_DEVICE_ID_SPECIALIX_RIO	0x8000
+#define PCI_SUBDEVICE_ID_SPECIALIX_SPEED4 0xa004
+
+
+#define PCI_VENDOR_ID_ANALOG_DEVICES	0x11d4
+#define PCI_DEVICE_ID_AD1889JS		0x1889
+
+
+#define PCI_DEVICE_ID_SEGA_BBA		0x1234
+
+#define PCI_VENDOR_ID_ZORAN		0x11de
+#define PCI_DEVICE_ID_ZORAN_36057	0x6057
+#define PCI_DEVICE_ID_ZORAN_36120	0x6120
+
+
+#define PCI_VENDOR_ID_COMPEX		0x11f6
+#define PCI_DEVICE_ID_COMPEX_ENET100VG4	0x0112
+
+#define PCI_VENDOR_ID_RP		0x11fe
+#define PCI_DEVICE_ID_RP32INTF		0x0001
+#define PCI_DEVICE_ID_RP8INTF		0x0002
+#define PCI_DEVICE_ID_RP16INTF		0x0003
+#define PCI_DEVICE_ID_RP4QUAD		0x0004
+#define PCI_DEVICE_ID_RP8OCTA		0x0005
+#define PCI_DEVICE_ID_RP8J		0x0006
+#define PCI_DEVICE_ID_RP4J		0x0007
+#define PCI_DEVICE_ID_RP8SNI		0x0008	
+#define PCI_DEVICE_ID_RP16SNI		0x0009	
+#define PCI_DEVICE_ID_RPP4		0x000A
+#define PCI_DEVICE_ID_RPP8		0x000B
+#define PCI_DEVICE_ID_RP4M		0x000D
+#define PCI_DEVICE_ID_RP2_232		0x000E
+#define PCI_DEVICE_ID_RP2_422		0x000F
+#define PCI_DEVICE_ID_URP32INTF		0x0801
+#define PCI_DEVICE_ID_URP8INTF		0x0802
+#define PCI_DEVICE_ID_URP16INTF		0x0803
+#define PCI_DEVICE_ID_URP8OCTA		0x0805
+#define PCI_DEVICE_ID_UPCI_RM3_8PORT	0x080C       
+#define PCI_DEVICE_ID_UPCI_RM3_4PORT	0x080D
+#define PCI_DEVICE_ID_CRP16INTF		0x0903       
+
+#define PCI_VENDOR_ID_CYCLADES		0x120e
+#define PCI_DEVICE_ID_CYCLOM_Y_Lo	0x0100
+#define PCI_DEVICE_ID_CYCLOM_Y_Hi	0x0101
+#define PCI_DEVICE_ID_CYCLOM_4Y_Lo	0x0102
+#define PCI_DEVICE_ID_CYCLOM_4Y_Hi	0x0103
+#define PCI_DEVICE_ID_CYCLOM_8Y_Lo	0x0104
+#define PCI_DEVICE_ID_CYCLOM_8Y_Hi	0x0105
+#define PCI_DEVICE_ID_CYCLOM_Z_Lo	0x0200
+#define PCI_DEVICE_ID_CYCLOM_Z_Hi	0x0201
+#define PCI_DEVICE_ID_PC300_RX_2	0x0300
+#define PCI_DEVICE_ID_PC300_RX_1	0x0301
+#define PCI_DEVICE_ID_PC300_TE_2	0x0310
+#define PCI_DEVICE_ID_PC300_TE_1	0x0311
+#define PCI_DEVICE_ID_PC300_TE_M_2	0x0320
+#define PCI_DEVICE_ID_PC300_TE_M_1	0x0321
+
+#define PCI_VENDOR_ID_ESSENTIAL		0x120f
+#define PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER	0x0001
+
+#define PCI_VENDOR_ID_O2		0x1217
+#define PCI_DEVICE_ID_O2_6729		0x6729
+#define PCI_DEVICE_ID_O2_6730		0x673a
+#define PCI_DEVICE_ID_O2_6832		0x6832
+#define PCI_DEVICE_ID_O2_6836		0x6836
+
+#define PCI_VENDOR_ID_3DFX		0x121a
+#define PCI_DEVICE_ID_3DFX_VOODOO	0x0001
+#define PCI_DEVICE_ID_3DFX_VOODOO2	0x0002
+#define PCI_DEVICE_ID_3DFX_BANSHEE	0x0003
+#define PCI_DEVICE_ID_3DFX_VOODOO3	0x0005
+#define PCI_DEVICE_ID_3DFX_VOODOO5	0x0009
+
+
+
+#define PCI_VENDOR_ID_AVM		0x1244
+#define PCI_DEVICE_ID_AVM_B1		0x0700
+#define PCI_DEVICE_ID_AVM_C4		0x0800
+#define PCI_DEVICE_ID_AVM_A1		0x0a00
+#define PCI_DEVICE_ID_AVM_A1_V2		0x0e00
+#define PCI_DEVICE_ID_AVM_C2		0x1100
+#define PCI_DEVICE_ID_AVM_T1		0x1200
+
+
+#define PCI_VENDOR_ID_STALLION		0x124d
+
+/* Allied Telesyn */
+#define PCI_VENDOR_ID_AT    		0x1259
+#define PCI_SUBDEVICE_ID_AT_2700FX	0x2701
+#define PCI_SUBDEVICE_ID_AT_2701FX	0x2703
+
+#define PCI_VENDOR_ID_ESS		0x125d
+#define PCI_DEVICE_ID_ESS_ESS1968	0x1968
+#define PCI_DEVICE_ID_ESS_ESS1978	0x1978
+#define PCI_DEVICE_ID_ESS_ALLEGRO_1	0x1988
+#define PCI_DEVICE_ID_ESS_ALLEGRO	0x1989
+#define PCI_DEVICE_ID_ESS_CANYON3D_2LE	0x1990
+#define PCI_DEVICE_ID_ESS_CANYON3D_2	0x1992
+#define PCI_DEVICE_ID_ESS_MAESTRO3	0x1998
+#define PCI_DEVICE_ID_ESS_MAESTRO3_1	0x1999
+#define PCI_DEVICE_ID_ESS_MAESTRO3_HW	0x199a
+#define PCI_DEVICE_ID_ESS_MAESTRO3_2	0x199b
+
+#define PCI_VENDOR_ID_SATSAGEM		0x1267
+#define PCI_DEVICE_ID_SATSAGEM_NICCY	0x1016
+
+
+#define PCI_VENDOR_ID_ENSONIQ		0x1274
+#define PCI_DEVICE_ID_ENSONIQ_CT5880	0x5880
+#define PCI_DEVICE_ID_ENSONIQ_ES1370	0x5000
+#define PCI_DEVICE_ID_ENSONIQ_ES1371	0x1371
+
+#define PCI_VENDOR_ID_TRANSMETA		0x1279
+#define PCI_DEVICE_ID_EFFICEON		0x0060
+
+#define PCI_VENDOR_ID_ROCKWELL		0x127A
+
+#define PCI_VENDOR_ID_ITE		0x1283
+#define PCI_DEVICE_ID_ITE_IT8172G	0x8172
+#define PCI_DEVICE_ID_ITE_IT8172G_AUDIO 0x0801
+#define PCI_DEVICE_ID_ITE_8211		0x8211
+#define PCI_DEVICE_ID_ITE_8212		0x8212
+#define PCI_DEVICE_ID_ITE_8872		0x8872
+#define PCI_DEVICE_ID_ITE_IT8330G_0	0xe886
+
+/* formerly Platform Tech */
+#define PCI_DEVICE_ID_ESS_ESS0100	0x0100
+
+#define PCI_VENDOR_ID_ALTEON		0x12ae
+
+
+#define PCI_SUBVENDOR_ID_CONNECT_TECH			0x12c4
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_232		0x0001
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_232		0x0002
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_232		0x0003
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485		0x0004
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485_4_4	0x0005
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_485		0x0006
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_485_2_2	0x0007
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_485		0x0008
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485_2_6	0x0009
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH081101V1	0x000A
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH041101V1	0x000B
+
+
+#define PCI_VENDOR_ID_NVIDIA_SGS	0x12d2
+#define PCI_DEVICE_ID_NVIDIA_SGS_RIVA128 0x0018
+
+#define PCI_SUBVENDOR_ID_CHASE_PCIFAST		0x12E0
+#define PCI_SUBDEVICE_ID_CHASE_PCIFAST4		0x0031
+#define PCI_SUBDEVICE_ID_CHASE_PCIFAST8		0x0021
+#define PCI_SUBDEVICE_ID_CHASE_PCIFAST16	0x0011
+#define PCI_SUBDEVICE_ID_CHASE_PCIFAST16FMC	0x0041
+#define PCI_SUBVENDOR_ID_CHASE_PCIRAS		0x124D
+#define PCI_SUBDEVICE_ID_CHASE_PCIRAS4		0xF001
+#define PCI_SUBDEVICE_ID_CHASE_PCIRAS8		0xF010
+
+#define PCI_VENDOR_ID_AUREAL		0x12eb
+#define PCI_DEVICE_ID_AUREAL_VORTEX_1	0x0001
+#define PCI_DEVICE_ID_AUREAL_VORTEX_2	0x0002
+#define PCI_DEVICE_ID_AUREAL_ADVANTAGE	0x0003
+
+#define PCI_VENDOR_ID_ELECTRONICDESIGNGMBH 0x12f8
+#define PCI_DEVICE_ID_LML_33R10		0x8a02
+
+
+#define PCI_VENDOR_ID_SIIG		0x131f
+#define PCI_SUBVENDOR_ID_SIIG		0x131f
+#define PCI_DEVICE_ID_SIIG_1S_10x_550	0x1000
+#define PCI_DEVICE_ID_SIIG_1S_10x_650	0x1001
+#define PCI_DEVICE_ID_SIIG_1S_10x_850	0x1002
+#define PCI_DEVICE_ID_SIIG_1S1P_10x_550	0x1010
+#define PCI_DEVICE_ID_SIIG_1S1P_10x_650	0x1011
+#define PCI_DEVICE_ID_SIIG_1S1P_10x_850	0x1012
+#define PCI_DEVICE_ID_SIIG_1P_10x	0x1020
+#define PCI_DEVICE_ID_SIIG_2P_10x	0x1021
+#define PCI_DEVICE_ID_SIIG_2S_10x_550	0x1030
+#define PCI_DEVICE_ID_SIIG_2S_10x_650	0x1031
+#define PCI_DEVICE_ID_SIIG_2S_10x_850	0x1032
+#define PCI_DEVICE_ID_SIIG_2S1P_10x_550	0x1034
+#define PCI_DEVICE_ID_SIIG_2S1P_10x_650	0x1035
+#define PCI_DEVICE_ID_SIIG_2S1P_10x_850	0x1036
+#define PCI_DEVICE_ID_SIIG_4S_10x_550	0x1050
+#define PCI_DEVICE_ID_SIIG_4S_10x_650	0x1051
+#define PCI_DEVICE_ID_SIIG_4S_10x_850	0x1052
+#define PCI_DEVICE_ID_SIIG_1S_20x_550	0x2000
+#define PCI_DEVICE_ID_SIIG_1S_20x_650	0x2001
+#define PCI_DEVICE_ID_SIIG_1S_20x_850	0x2002
+#define PCI_DEVICE_ID_SIIG_1P_20x	0x2020
+#define PCI_DEVICE_ID_SIIG_2P_20x	0x2021
+#define PCI_DEVICE_ID_SIIG_2S_20x_550	0x2030
+#define PCI_DEVICE_ID_SIIG_2S_20x_650	0x2031
+#define PCI_DEVICE_ID_SIIG_2S_20x_850	0x2032
+#define PCI_DEVICE_ID_SIIG_2P1S_20x_550	0x2040
+#define PCI_DEVICE_ID_SIIG_2P1S_20x_650	0x2041
+#define PCI_DEVICE_ID_SIIG_2P1S_20x_850	0x2042
+#define PCI_DEVICE_ID_SIIG_1S1P_20x_550	0x2010
+#define PCI_DEVICE_ID_SIIG_1S1P_20x_650	0x2011
+#define PCI_DEVICE_ID_SIIG_1S1P_20x_850	0x2012
+#define PCI_DEVICE_ID_SIIG_4S_20x_550	0x2050
+#define PCI_DEVICE_ID_SIIG_4S_20x_650	0x2051
+#define PCI_DEVICE_ID_SIIG_4S_20x_850	0x2052
+#define PCI_DEVICE_ID_SIIG_2S1P_20x_550	0x2060
+#define PCI_DEVICE_ID_SIIG_2S1P_20x_650	0x2061
+#define PCI_DEVICE_ID_SIIG_2S1P_20x_850	0x2062
+#define PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL	0x2050
+
+#define PCI_VENDOR_ID_RADISYS		0x1331
+
+#define PCI_VENDOR_ID_DOMEX		0x134a
+#define PCI_DEVICE_ID_DOMEX_DMX3191D	0x0001
+
+#define PCI_VENDOR_ID_QUATECH		0x135C
+#define PCI_DEVICE_ID_QUATECH_QSC100	0x0010
+#define PCI_DEVICE_ID_QUATECH_DSC100	0x0020
+#define PCI_DEVICE_ID_QUATECH_ESC100D	0x0050
+#define PCI_DEVICE_ID_QUATECH_ESC100M	0x0060
+
+#define PCI_VENDOR_ID_SEALEVEL		0x135e
+#define PCI_DEVICE_ID_SEALEVEL_U530	0x7101
+#define PCI_DEVICE_ID_SEALEVEL_UCOMM2	0x7201
+#define PCI_DEVICE_ID_SEALEVEL_UCOMM422	0x7402
+#define PCI_DEVICE_ID_SEALEVEL_UCOMM232	0x7202
+#define PCI_DEVICE_ID_SEALEVEL_COMM4	0x7401
+#define PCI_DEVICE_ID_SEALEVEL_COMM8	0x7801
+#define PCI_DEVICE_ID_SEALEVEL_UCOMM8	0x7804
+
+#define PCI_VENDOR_ID_HYPERCOPE		0x1365
+#define PCI_DEVICE_ID_HYPERCOPE_PLX	0x9050
+#define PCI_SUBDEVICE_ID_HYPERCOPE_OLD_ERGO	0x0104
+#define PCI_SUBDEVICE_ID_HYPERCOPE_ERGO		0x0106
+#define PCI_SUBDEVICE_ID_HYPERCOPE_METRO	0x0107
+#define PCI_SUBDEVICE_ID_HYPERCOPE_CHAMP2	0x0108
+
+#define PCI_VENDOR_ID_KAWASAKI		0x136b
+#define PCI_DEVICE_ID_MCHIP_KL5A72002	0xff01
+
+#define PCI_VENDOR_ID_CNET		0x1371
+#define PCI_DEVICE_ID_CNET_GIGACARD	0x434e
+
+#define PCI_VENDOR_ID_LMC		0x1376
+#define PCI_DEVICE_ID_LMC_HSSI		0x0003
+#define PCI_DEVICE_ID_LMC_DS3		0x0004
+#define PCI_DEVICE_ID_LMC_SSI		0x0005
+#define PCI_DEVICE_ID_LMC_T1		0x0006
+
+
+#define PCI_VENDOR_ID_NETGEAR		0x1385
+#define PCI_DEVICE_ID_NETGEAR_GA620	0x620a
+
+#define PCI_VENDOR_ID_APPLICOM		0x1389
+#define PCI_DEVICE_ID_APPLICOM_PCIGENERIC 0x0001
+#define PCI_DEVICE_ID_APPLICOM_PCI2000IBS_CAN 0x0002
+#define PCI_DEVICE_ID_APPLICOM_PCI2000PFB 0x0003
+
+#define PCI_VENDOR_ID_MOXA		0x1393
+#define PCI_DEVICE_ID_MOXA_RC7000	0x0001
+#define PCI_DEVICE_ID_MOXA_CP102	0x1020
+#define PCI_DEVICE_ID_MOXA_CP102UL	0x1021
+#define PCI_DEVICE_ID_MOXA_CP102U	0x1022
+#define PCI_DEVICE_ID_MOXA_C104		0x1040
+#define PCI_DEVICE_ID_MOXA_CP104U	0x1041
+#define PCI_DEVICE_ID_MOXA_CP104JU	0x1042
+#define PCI_DEVICE_ID_MOXA_CT114	0x1140
+#define PCI_DEVICE_ID_MOXA_CP114	0x1141
+#define PCI_DEVICE_ID_MOXA_CP118U	0x1180
+#define PCI_DEVICE_ID_MOXA_CP132	0x1320
+#define PCI_DEVICE_ID_MOXA_CP132U	0x1321
+#define PCI_DEVICE_ID_MOXA_CP134U	0x1340
+#define PCI_DEVICE_ID_MOXA_C168		0x1680
+#define PCI_DEVICE_ID_MOXA_CP168U	0x1681
+
+#define PCI_VENDOR_ID_CCD		0x1397
+#define PCI_DEVICE_ID_CCD_2BD0		0x2bd0
+#define PCI_DEVICE_ID_CCD_B000		0xb000
+#define PCI_DEVICE_ID_CCD_B006		0xb006
+#define PCI_DEVICE_ID_CCD_B007		0xb007
+#define PCI_DEVICE_ID_CCD_B008		0xb008
+#define PCI_DEVICE_ID_CCD_B009		0xb009
+#define PCI_DEVICE_ID_CCD_B00A		0xb00a
+#define PCI_DEVICE_ID_CCD_B00B		0xb00b
+#define PCI_DEVICE_ID_CCD_B00C		0xb00c
+#define PCI_DEVICE_ID_CCD_B100		0xb100
+
+#define PCI_VENDOR_ID_EXAR		0x13a8
+#define PCI_DEVICE_ID_EXAR_XR17C152	0x0152
+#define PCI_DEVICE_ID_EXAR_XR17C154	0x0154
+#define PCI_DEVICE_ID_EXAR_XR17C158	0x0158
+
+#define PCI_VENDOR_ID_MICROGATE		0x13c0
+#define PCI_DEVICE_ID_MICROGATE_USC	0x0010
+#define PCI_DEVICE_ID_MICROGATE_SCA	0x0030
+
+#define PCI_VENDOR_ID_3WARE		0x13C1
+#define PCI_DEVICE_ID_3WARE_1000	0x1000
+#define PCI_DEVICE_ID_3WARE_7000	0x1001
+#define PCI_DEVICE_ID_3WARE_9000	0x1002
+
+#define PCI_VENDOR_ID_IOMEGA		0x13ca
+#define PCI_DEVICE_ID_IOMEGA_BUZ	0x4231
+
+#define PCI_VENDOR_ID_ABOCOM		0x13D1
+#define PCI_DEVICE_ID_ABOCOM_2BD1       0x2BD1
+
+#define PCI_VENDOR_ID_CMEDIA		0x13f6
+#define PCI_DEVICE_ID_CMEDIA_CM8338A	0x0100
+#define PCI_DEVICE_ID_CMEDIA_CM8338B	0x0101
+#define PCI_DEVICE_ID_CMEDIA_CM8738	0x0111
+#define PCI_DEVICE_ID_CMEDIA_CM8738B	0x0112
+
+#define PCI_VENDOR_ID_LAVA		0x1407
+#define PCI_DEVICE_ID_LAVA_DSERIAL	0x0100 /* 2x 16550 */
+#define PCI_DEVICE_ID_LAVA_QUATRO_A	0x0101 /* 2x 16550, half of 4 port */
+#define PCI_DEVICE_ID_LAVA_QUATRO_B	0x0102 /* 2x 16550, half of 4 port */
+#define PCI_DEVICE_ID_LAVA_OCTO_A	0x0180 /* 4x 16550A, half of 8 port */
+#define PCI_DEVICE_ID_LAVA_OCTO_B	0x0181 /* 4x 16550A, half of 8 port */
+#define PCI_DEVICE_ID_LAVA_PORT_PLUS	0x0200 /* 2x 16650 */
+#define PCI_DEVICE_ID_LAVA_QUAD_A	0x0201 /* 2x 16650, half of 4 port */
+#define PCI_DEVICE_ID_LAVA_QUAD_B	0x0202 /* 2x 16650, half of 4 port */
+#define PCI_DEVICE_ID_LAVA_SSERIAL	0x0500 /* 1x 16550 */
+#define PCI_DEVICE_ID_LAVA_PORT_650	0x0600 /* 1x 16650 */
+#define PCI_DEVICE_ID_LAVA_PARALLEL	0x8000
+#define PCI_DEVICE_ID_LAVA_DUAL_PAR_A	0x8002 /* The Lava Dual Parallel is */
+#define PCI_DEVICE_ID_LAVA_DUAL_PAR_B	0x8003 /* two PCI devices on a card */
+#define PCI_DEVICE_ID_LAVA_BOCA_IOPPAR	0x8800
+
+#define PCI_VENDOR_ID_TIMEDIA		0x1409
+#define PCI_DEVICE_ID_TIMEDIA_1889	0x7168
+
+#define PCI_VENDOR_ID_ICE		0x1412
+#define PCI_DEVICE_ID_ICE_1712		0x1712
+#define PCI_DEVICE_ID_VT1724		0x1724
+
+#define PCI_VENDOR_ID_OXSEMI		0x1415
+#define PCI_DEVICE_ID_OXSEMI_12PCI840	0x8403
+#define PCI_DEVICE_ID_OXSEMI_16PCI954	0x9501
+#define PCI_DEVICE_ID_OXSEMI_16PCI95N	0x9511
+#define PCI_DEVICE_ID_OXSEMI_16PCI954PP	0x9513
+#define PCI_DEVICE_ID_OXSEMI_16PCI952	0x9521
+
+#define PCI_VENDOR_ID_SAMSUNG		0x144d
+
+
+#define PCI_VENDOR_ID_TITAN		0x14D2
+#define PCI_DEVICE_ID_TITAN_010L	0x8001
+#define PCI_DEVICE_ID_TITAN_100L	0x8010
+#define PCI_DEVICE_ID_TITAN_110L	0x8011
+#define PCI_DEVICE_ID_TITAN_200L	0x8020
+#define PCI_DEVICE_ID_TITAN_210L	0x8021
+#define PCI_DEVICE_ID_TITAN_400L	0x8040
+#define PCI_DEVICE_ID_TITAN_800L	0x8080
+#define PCI_DEVICE_ID_TITAN_100		0xA001
+#define PCI_DEVICE_ID_TITAN_200		0xA005
+#define PCI_DEVICE_ID_TITAN_400		0xA003
+#define PCI_DEVICE_ID_TITAN_800B	0xA004
+
+#define PCI_VENDOR_ID_PANACOM		0x14d4
+#define PCI_DEVICE_ID_PANACOM_QUADMODEM	0x0400
+#define PCI_DEVICE_ID_PANACOM_DUALMODEM	0x0402
+
+
+#define PCI_VENDOR_ID_AFAVLAB		0x14db
+#define PCI_DEVICE_ID_AFAVLAB_P028	0x2180
+#define PCI_DEVICE_ID_AFAVLAB_P030	0x2182
+
+#define PCI_VENDOR_ID_BROADCOM		0x14e4
+#define PCI_DEVICE_ID_TIGON3_5752	0x1600
+#define PCI_DEVICE_ID_TIGON3_5752M	0x1601
+#define PCI_DEVICE_ID_TIGON3_5700	0x1644
+#define PCI_DEVICE_ID_TIGON3_5701	0x1645
+#define PCI_DEVICE_ID_TIGON3_5702	0x1646
+#define PCI_DEVICE_ID_TIGON3_5703	0x1647
+#define PCI_DEVICE_ID_TIGON3_5704	0x1648
+#define PCI_DEVICE_ID_TIGON3_5704S_2	0x1649
+#define PCI_DEVICE_ID_NX2_5706		0x164a
+#define PCI_DEVICE_ID_NX2_5708		0x164c
+#define PCI_DEVICE_ID_TIGON3_5702FE	0x164d
+#define PCI_DEVICE_ID_TIGON3_5705	0x1653
+#define PCI_DEVICE_ID_TIGON3_5705_2	0x1654
+#define PCI_DEVICE_ID_TIGON3_5720	0x1658
+#define PCI_DEVICE_ID_TIGON3_5721	0x1659
+#define PCI_DEVICE_ID_TIGON3_5705M	0x165d
+#define PCI_DEVICE_ID_TIGON3_5705M_2	0x165e
+#define PCI_DEVICE_ID_TIGON3_5714	0x1668
+#define PCI_DEVICE_ID_TIGON3_5780	0x166a
+#define PCI_DEVICE_ID_TIGON3_5780S	0x166b
+#define PCI_DEVICE_ID_TIGON3_5705F	0x166e
+#define PCI_DEVICE_ID_TIGON3_5750	0x1676
+#define PCI_DEVICE_ID_TIGON3_5751	0x1677
+#define PCI_DEVICE_ID_TIGON3_5715	0x1678
+#define PCI_DEVICE_ID_TIGON3_5750M	0x167c
+#define PCI_DEVICE_ID_TIGON3_5751M	0x167d
+#define PCI_DEVICE_ID_TIGON3_5751F	0x167e
+#define PCI_DEVICE_ID_TIGON3_5782	0x1696
+#define PCI_DEVICE_ID_TIGON3_5788	0x169c
+#define PCI_DEVICE_ID_TIGON3_5789	0x169d
+#define PCI_DEVICE_ID_TIGON3_5702X	0x16a6
+#define PCI_DEVICE_ID_TIGON3_5703X	0x16a7
+#define PCI_DEVICE_ID_TIGON3_5704S	0x16a8
+#define PCI_DEVICE_ID_NX2_5706S		0x16aa
+#define PCI_DEVICE_ID_NX2_5708S		0x16ac
+#define PCI_DEVICE_ID_TIGON3_5702A3	0x16c6
+#define PCI_DEVICE_ID_TIGON3_5703A3	0x16c7
+#define PCI_DEVICE_ID_TIGON3_5781	0x16dd
+#define PCI_DEVICE_ID_TIGON3_5753	0x16f7
+#define PCI_DEVICE_ID_TIGON3_5753M	0x16fd
+#define PCI_DEVICE_ID_TIGON3_5753F	0x16fe
+#define PCI_DEVICE_ID_TIGON3_5901	0x170d
+#define PCI_DEVICE_ID_BCM4401B1		0x170c
+#define PCI_DEVICE_ID_TIGON3_5901_2	0x170e
+#define PCI_DEVICE_ID_BCM4401		0x4401
+#define PCI_DEVICE_ID_BCM4401B0		0x4402
+
+#define PCI_VENDOR_ID_TOPIC		0x151f
+#define PCI_DEVICE_ID_TOPIC_TP560	0x0000
+
+#define PCI_VENDOR_ID_ENE		0x1524
+#define PCI_DEVICE_ID_ENE_1211		0x1211
+#define PCI_DEVICE_ID_ENE_1225		0x1225
+#define PCI_DEVICE_ID_ENE_1410		0x1410
+#define PCI_DEVICE_ID_ENE_710		0x1411
+#define PCI_DEVICE_ID_ENE_712		0x1412
+#define PCI_DEVICE_ID_ENE_1420		0x1420
+#define PCI_DEVICE_ID_ENE_720		0x1421
+#define PCI_DEVICE_ID_ENE_722		0x1422
+
+#define PCI_VENDOR_ID_CHELSIO		0x1425
+
+
+#define PCI_VENDOR_ID_SYBA		0x1592
+#define PCI_DEVICE_ID_SYBA_2P_EPP	0x0782
+#define PCI_DEVICE_ID_SYBA_1P_ECP	0x0783
+
+#define PCI_VENDOR_ID_MORETON		0x15aa
+#define PCI_DEVICE_ID_RASTEL_2PORT	0x2000
+
+#define PCI_VENDOR_ID_ZOLTRIX		0x15b0
+#define PCI_DEVICE_ID_ZOLTRIX_2BD0	0x2bd0 
+
+#define PCI_VENDOR_ID_MELLANOX		0x15b3
+#define PCI_DEVICE_ID_MELLANOX_TAVOR	0x5a44
+#define PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT 0x6278
+#define PCI_DEVICE_ID_MELLANOX_ARBEL	0x6282
+#define PCI_DEVICE_ID_MELLANOX_SINAI_OLD 0x5e8c
+#define PCI_DEVICE_ID_MELLANOX_SINAI	0x6274
+
+#define PCI_VENDOR_ID_PDC		0x15e9
+
+
+#define PCI_VENDOR_ID_FARSITE           0x1619
+#define PCI_DEVICE_ID_FARSITE_T2P       0x0400
+#define PCI_DEVICE_ID_FARSITE_T4P       0x0440
+#define PCI_DEVICE_ID_FARSITE_T1U       0x0610
+#define PCI_DEVICE_ID_FARSITE_T2U       0x0620
+#define PCI_DEVICE_ID_FARSITE_T4U       0x0640
+#define PCI_DEVICE_ID_FARSITE_TE1       0x1610
+#define PCI_DEVICE_ID_FARSITE_TE1C      0x1612
+
+#define PCI_VENDOR_ID_SIBYTE		0x166d
+#define PCI_DEVICE_ID_BCM1250_HT	0x0002
+
+#define PCI_VENDOR_ID_NETCELL		0x169c
+#define PCI_DEVICE_ID_REVOLUTION	0x0044
+
+#define PCI_VENDOR_ID_LINKSYS		0x1737
+#define PCI_DEVICE_ID_LINKSYS_EG1064	0x1064
+
+#define PCI_VENDOR_ID_ALTIMA		0x173b
+#define PCI_DEVICE_ID_ALTIMA_AC1000	0x03e8
+#define PCI_DEVICE_ID_ALTIMA_AC1001	0x03e9
+#define PCI_DEVICE_ID_ALTIMA_AC9100	0x03ea
+#define PCI_DEVICE_ID_ALTIMA_AC1003	0x03eb
+
+#define PCI_VENDOR_ID_S2IO		0x17d5
+#define	PCI_DEVICE_ID_S2IO_WIN		0x5731
+#define	PCI_DEVICE_ID_S2IO_UNI		0x5831
+#define PCI_DEVICE_ID_HERC_WIN		0x5732
+#define PCI_DEVICE_ID_HERC_UNI		0x5832
+
+
+#define PCI_VENDOR_ID_SITECOM		0x182d
+#define PCI_DEVICE_ID_SITECOM_DC105V2	0x3069
+
+#define PCI_VENDOR_ID_TOPSPIN		0x1867
+
+#define PCI_VENDOR_ID_TDI               0x192E
+#define PCI_DEVICE_ID_TDI_EHCI          0x0101
+
+
+#define PCI_VENDOR_ID_TEKRAM		0x1de1
+#define PCI_DEVICE_ID_TEKRAM_DC290	0xdc29
+
+#define PCI_VENDOR_ID_HINT             0x3388
+#define PCI_DEVICE_ID_HINT_VXPROII_IDE 0x8013
+
+#define PCI_VENDOR_ID_3DLABS		0x3d3d
+#define PCI_DEVICE_ID_3DLABS_PERMEDIA2	0x0007
+#define PCI_DEVICE_ID_3DLABS_PERMEDIA2V	0x0009
+
+
+#define PCI_VENDOR_ID_AKS		0x416c
+#define PCI_DEVICE_ID_AKS_ALADDINCARD	0x0100
+
+
+
+#define PCI_VENDOR_ID_S3		0x5333
+#define PCI_DEVICE_ID_S3_TRIO		0x8811
+#define PCI_DEVICE_ID_S3_868		0x8880
+#define PCI_DEVICE_ID_S3_968		0x88f0
+#define PCI_DEVICE_ID_S3_SAVAGE4	0x8a25
+#define PCI_DEVICE_ID_S3_PROSAVAGE8	0x8d04
+#define PCI_DEVICE_ID_S3_SONICVIBES	0xca00
+
+#define PCI_VENDOR_ID_DUNORD		0x5544
+#define PCI_DEVICE_ID_DUNORD_I3000	0x0001
+
+
+#define PCI_VENDOR_ID_DCI		0x6666
+#define PCI_DEVICE_ID_DCI_PCCOM4	0x0001
+#define PCI_DEVICE_ID_DCI_PCCOM8	0x0002
+
+#define PCI_VENDOR_ID_INTEL		0x8086
+#define PCI_DEVICE_ID_INTEL_EESSC	0x0008
+#define PCI_DEVICE_ID_INTEL_PXHD_0	0x0320
+#define PCI_DEVICE_ID_INTEL_PXHD_1	0x0321
+#define PCI_DEVICE_ID_INTEL_PXH_0	0x0329
+#define PCI_DEVICE_ID_INTEL_PXH_1	0x032A
+#define PCI_DEVICE_ID_INTEL_PXHV	0x032C
+#define PCI_DEVICE_ID_INTEL_82375	0x0482
+#define PCI_DEVICE_ID_INTEL_82424	0x0483
+#define PCI_DEVICE_ID_INTEL_82378	0x0484
+#define PCI_DEVICE_ID_INTEL_I960	0x0960
+#define PCI_DEVICE_ID_INTEL_I960RM	0x0962
+#define PCI_DEVICE_ID_INTEL_82815_MC	0x1130
+#define PCI_DEVICE_ID_INTEL_82815_CGC	0x1132
+#define PCI_DEVICE_ID_INTEL_82092AA_0	0x1221
+#define PCI_DEVICE_ID_INTEL_7505_0	0x2550  
+#define PCI_DEVICE_ID_INTEL_7205_0	0x255d
+#define PCI_DEVICE_ID_INTEL_82437	0x122d
+#define PCI_DEVICE_ID_INTEL_82371FB_0	0x122e
+#define PCI_DEVICE_ID_INTEL_82371FB_1	0x1230
+#define PCI_DEVICE_ID_INTEL_82371MX	0x1234
+#define PCI_DEVICE_ID_INTEL_82441	0x1237
+#define PCI_DEVICE_ID_INTEL_82380FB	0x124b
+#define PCI_DEVICE_ID_INTEL_82439	0x1250
+#define PCI_DEVICE_ID_INTEL_80960_RP	0x1960
+#define PCI_DEVICE_ID_INTEL_82840_HB	0x1a21
+#define PCI_DEVICE_ID_INTEL_82845_HB	0x1a30
+#define PCI_DEVICE_ID_INTEL_82801AA_0	0x2410
+#define PCI_DEVICE_ID_INTEL_82801AA_1	0x2411
+#define PCI_DEVICE_ID_INTEL_82801AA_3	0x2413
+#define PCI_DEVICE_ID_INTEL_82801AA_5	0x2415
+#define PCI_DEVICE_ID_INTEL_82801AA_6	0x2416
+#define PCI_DEVICE_ID_INTEL_82801AA_8	0x2418
+#define PCI_DEVICE_ID_INTEL_82801AB_0	0x2420
+#define PCI_DEVICE_ID_INTEL_82801AB_1	0x2421
+#define PCI_DEVICE_ID_INTEL_82801AB_3	0x2423
+#define PCI_DEVICE_ID_INTEL_82801AB_5	0x2425
+#define PCI_DEVICE_ID_INTEL_82801AB_6	0x2426
+#define PCI_DEVICE_ID_INTEL_82801AB_8	0x2428
+#define PCI_DEVICE_ID_INTEL_82801BA_0	0x2440
+#define PCI_DEVICE_ID_INTEL_82801BA_2	0x2443
+#define PCI_DEVICE_ID_INTEL_82801BA_4	0x2445
+#define PCI_DEVICE_ID_INTEL_82801BA_6	0x2448
+#define PCI_DEVICE_ID_INTEL_82801BA_8	0x244a
+#define PCI_DEVICE_ID_INTEL_82801BA_9	0x244b
+#define PCI_DEVICE_ID_INTEL_82801BA_10	0x244c
+#define PCI_DEVICE_ID_INTEL_82801BA_11	0x244e
+#define PCI_DEVICE_ID_INTEL_82801E_0	0x2450
+#define PCI_DEVICE_ID_INTEL_82801E_11	0x245b
+#define PCI_DEVICE_ID_INTEL_82801CA_0	0x2480
+#define PCI_DEVICE_ID_INTEL_82801CA_3	0x2483
+#define PCI_DEVICE_ID_INTEL_82801CA_5	0x2485
+#define PCI_DEVICE_ID_INTEL_82801CA_6	0x2486
+#define PCI_DEVICE_ID_INTEL_82801CA_10	0x248a
+#define PCI_DEVICE_ID_INTEL_82801CA_11	0x248b
+#define PCI_DEVICE_ID_INTEL_82801CA_12	0x248c
+#define PCI_DEVICE_ID_INTEL_82801DB_0	0x24c0
+#define PCI_DEVICE_ID_INTEL_82801DB_1	0x24c1
+#define PCI_DEVICE_ID_INTEL_82801DB_3	0x24c3
+#define PCI_DEVICE_ID_INTEL_82801DB_5	0x24c5
+#define PCI_DEVICE_ID_INTEL_82801DB_6	0x24c6
+#define PCI_DEVICE_ID_INTEL_82801DB_9	0x24c9
+#define PCI_DEVICE_ID_INTEL_82801DB_10	0x24ca
+#define PCI_DEVICE_ID_INTEL_82801DB_11	0x24cb
+#define PCI_DEVICE_ID_INTEL_82801DB_12  0x24cc
+#define PCI_DEVICE_ID_INTEL_82801EB_0	0x24d0
+#define PCI_DEVICE_ID_INTEL_82801EB_1	0x24d1
+#define PCI_DEVICE_ID_INTEL_82801EB_3	0x24d3
+#define PCI_DEVICE_ID_INTEL_82801EB_5	0x24d5
+#define PCI_DEVICE_ID_INTEL_82801EB_6	0x24d6
+#define PCI_DEVICE_ID_INTEL_82801EB_11	0x24db
+#define PCI_DEVICE_ID_INTEL_ESB_1	0x25a1
+#define PCI_DEVICE_ID_INTEL_ESB_2	0x25a2
+#define PCI_DEVICE_ID_INTEL_ESB_4	0x25a4
+#define PCI_DEVICE_ID_INTEL_ESB_5	0x25a6
+#define PCI_DEVICE_ID_INTEL_ESB_9	0x25ab
+#define PCI_DEVICE_ID_INTEL_82820_HB	0x2500
+#define PCI_DEVICE_ID_INTEL_82820_UP_HB	0x2501
+#define PCI_DEVICE_ID_INTEL_82850_HB	0x2530
+#define PCI_DEVICE_ID_INTEL_82860_HB	0x2531
+#define PCI_DEVICE_ID_INTEL_82845G_HB	0x2560
+#define PCI_DEVICE_ID_INTEL_82845G_IG	0x2562
+#define PCI_DEVICE_ID_INTEL_82865_HB	0x2570
+#define PCI_DEVICE_ID_INTEL_82865_IG	0x2572
+#define PCI_DEVICE_ID_INTEL_82875_HB	0x2578
+#define PCI_DEVICE_ID_INTEL_82915G_HB	0x2580
+#define PCI_DEVICE_ID_INTEL_82915G_IG	0x2582
+#define PCI_DEVICE_ID_INTEL_82915GM_HB	0x2590
+#define PCI_DEVICE_ID_INTEL_82915GM_IG	0x2592
+#define PCI_DEVICE_ID_INTEL_82945G_HB	0x2770
+#define PCI_DEVICE_ID_INTEL_82945G_IG	0x2772
+#define PCI_DEVICE_ID_INTEL_ICH6_0	0x2640
+#define PCI_DEVICE_ID_INTEL_ICH6_1	0x2641
+#define PCI_DEVICE_ID_INTEL_ICH6_2	0x2642
+#define PCI_DEVICE_ID_INTEL_ICH6_16	0x266a
+#define PCI_DEVICE_ID_INTEL_ICH6_17	0x266d
+#define PCI_DEVICE_ID_INTEL_ICH6_18	0x266e
+#define PCI_DEVICE_ID_INTEL_ICH6_19	0x266f
+#define PCI_DEVICE_ID_INTEL_ESB2_0	0x2670
+#define PCI_DEVICE_ID_INTEL_ESB2_14	0x2698
+#define PCI_DEVICE_ID_INTEL_ESB2_17	0x269b
+#define PCI_DEVICE_ID_INTEL_ESB2_18	0x269e
+#define PCI_DEVICE_ID_INTEL_ICH7_0	0x27b8
+#define PCI_DEVICE_ID_INTEL_ICH7_1	0x27b9
+#define PCI_DEVICE_ID_INTEL_ICH7_30	0x27b0
+#define PCI_DEVICE_ID_INTEL_ICH7_31	0x27bd
+#define PCI_DEVICE_ID_INTEL_ICH7_17	0x27da
+#define PCI_DEVICE_ID_INTEL_ICH7_19	0x27dd
+#define PCI_DEVICE_ID_INTEL_ICH7_20	0x27de
+#define PCI_DEVICE_ID_INTEL_ICH7_21	0x27df
+#define PCI_DEVICE_ID_INTEL_82855PM_HB	0x3340
+#define PCI_DEVICE_ID_INTEL_82830_HB	0x3575
+#define PCI_DEVICE_ID_INTEL_82830_CGC	0x3577
+#define PCI_DEVICE_ID_INTEL_82855GM_HB	0x3580
+#define PCI_DEVICE_ID_INTEL_82855GM_IG	0x3582
+#define PCI_DEVICE_ID_INTEL_E7520_MCH	0x3590
+#define PCI_DEVICE_ID_INTEL_E7320_MCH	0x3592
+#define PCI_DEVICE_ID_INTEL_MCH_PA	0x3595
+#define PCI_DEVICE_ID_INTEL_MCH_PA1	0x3596
+#define PCI_DEVICE_ID_INTEL_MCH_PB	0x3597
+#define PCI_DEVICE_ID_INTEL_MCH_PB1	0x3598
+#define PCI_DEVICE_ID_INTEL_MCH_PC	0x3599
+#define PCI_DEVICE_ID_INTEL_MCH_PC1	0x359a
+#define PCI_DEVICE_ID_INTEL_E7525_MCH	0x359e
+#define PCI_DEVICE_ID_INTEL_82371SB_0	0x7000
+#define PCI_DEVICE_ID_INTEL_82371SB_1	0x7010
+#define PCI_DEVICE_ID_INTEL_82371SB_2	0x7020
+#define PCI_DEVICE_ID_INTEL_82437VX	0x7030
+#define PCI_DEVICE_ID_INTEL_82439TX	0x7100
+#define PCI_DEVICE_ID_INTEL_82371AB_0	0x7110
+#define PCI_DEVICE_ID_INTEL_82371AB	0x7111
+#define PCI_DEVICE_ID_INTEL_82371AB_2	0x7112
+#define PCI_DEVICE_ID_INTEL_82371AB_3	0x7113
+#define PCI_DEVICE_ID_INTEL_82810_MC1	0x7120
+#define PCI_DEVICE_ID_INTEL_82810_IG1	0x7121
+#define PCI_DEVICE_ID_INTEL_82810_MC3	0x7122
+#define PCI_DEVICE_ID_INTEL_82810_IG3	0x7123
+#define PCI_DEVICE_ID_INTEL_82810E_MC	0x7124
+#define PCI_DEVICE_ID_INTEL_82810E_IG	0x7125
+#define PCI_DEVICE_ID_INTEL_82443LX_0	0x7180
+#define PCI_DEVICE_ID_INTEL_82443LX_1	0x7181
+#define PCI_DEVICE_ID_INTEL_82443BX_0	0x7190
+#define PCI_DEVICE_ID_INTEL_82443BX_1	0x7191
+#define PCI_DEVICE_ID_INTEL_82443BX_2	0x7192
+#define PCI_DEVICE_ID_INTEL_440MX	0x7195
+#define PCI_DEVICE_ID_INTEL_440MX_6	0x7196
+#define PCI_DEVICE_ID_INTEL_82443MX_0	0x7198
+#define PCI_DEVICE_ID_INTEL_82443MX_1	0x7199
+#define PCI_DEVICE_ID_INTEL_82443MX_3	0x719b
+#define PCI_DEVICE_ID_INTEL_82443GX_0	0x71a0
+#define PCI_DEVICE_ID_INTEL_82443GX_2	0x71a2
+#define PCI_DEVICE_ID_INTEL_82372FB_1	0x7601
+#define PCI_DEVICE_ID_INTEL_82454GX	0x84c4
+#define PCI_DEVICE_ID_INTEL_82451NX	0x84ca
+#define PCI_DEVICE_ID_INTEL_82454NX     0x84cb
+#define PCI_DEVICE_ID_INTEL_84460GX	0x84ea
+#define PCI_DEVICE_ID_INTEL_IXP4XX	0x8500
+#define PCI_DEVICE_ID_INTEL_IXP2800	0x9004
+#define PCI_DEVICE_ID_INTEL_S21152BB	0xb152
+
+#define PCI_VENDOR_ID_COMPUTONE		0x8e0e
+#define PCI_DEVICE_ID_COMPUTONE_IP2EX	0x0291
+#define PCI_DEVICE_ID_COMPUTONE_PG	0x0302
+#define PCI_SUBVENDOR_ID_COMPUTONE	0x8e0e
+#define PCI_SUBDEVICE_ID_COMPUTONE_PG4	0x0001
+#define PCI_SUBDEVICE_ID_COMPUTONE_PG8	0x0002
+#define PCI_SUBDEVICE_ID_COMPUTONE_PG6	0x0003
+
+#define PCI_VENDOR_ID_KTI		0x8e2e
+
+#define PCI_VENDOR_ID_ADAPTEC		0x9004
+#define PCI_DEVICE_ID_ADAPTEC_7810	0x1078
+#define PCI_DEVICE_ID_ADAPTEC_7821	0x2178
+#define PCI_DEVICE_ID_ADAPTEC_38602	0x3860
+#define PCI_DEVICE_ID_ADAPTEC_7850	0x5078
+#define PCI_DEVICE_ID_ADAPTEC_7855	0x5578
+#define PCI_DEVICE_ID_ADAPTEC_3860	0x6038
+#define PCI_DEVICE_ID_ADAPTEC_1480A	0x6075
+#define PCI_DEVICE_ID_ADAPTEC_7860	0x6078
+#define PCI_DEVICE_ID_ADAPTEC_7861	0x6178
+#define PCI_DEVICE_ID_ADAPTEC_7870	0x7078
+#define PCI_DEVICE_ID_ADAPTEC_7871	0x7178
+#define PCI_DEVICE_ID_ADAPTEC_7872	0x7278
+#define PCI_DEVICE_ID_ADAPTEC_7873	0x7378
+#define PCI_DEVICE_ID_ADAPTEC_7874	0x7478
+#define PCI_DEVICE_ID_ADAPTEC_7895	0x7895
+#define PCI_DEVICE_ID_ADAPTEC_7880	0x8078
+#define PCI_DEVICE_ID_ADAPTEC_7881	0x8178
+#define PCI_DEVICE_ID_ADAPTEC_7882	0x8278
+#define PCI_DEVICE_ID_ADAPTEC_7883	0x8378
+#define PCI_DEVICE_ID_ADAPTEC_7884	0x8478
+#define PCI_DEVICE_ID_ADAPTEC_7885	0x8578
+#define PCI_DEVICE_ID_ADAPTEC_7886	0x8678
+#define PCI_DEVICE_ID_ADAPTEC_7887	0x8778
+#define PCI_DEVICE_ID_ADAPTEC_7888	0x8878
+
+#define PCI_VENDOR_ID_ADAPTEC2		0x9005
+#define PCI_DEVICE_ID_ADAPTEC2_2940U2	0x0010
+#define PCI_DEVICE_ID_ADAPTEC2_2930U2	0x0011
+#define PCI_DEVICE_ID_ADAPTEC2_7890B	0x0013
+#define PCI_DEVICE_ID_ADAPTEC2_7890	0x001f
+#define PCI_DEVICE_ID_ADAPTEC2_3940U2	0x0050
+#define PCI_DEVICE_ID_ADAPTEC2_3950U2D	0x0051
+#define PCI_DEVICE_ID_ADAPTEC2_7896	0x005f
+#define PCI_DEVICE_ID_ADAPTEC2_7892A	0x0080
+#define PCI_DEVICE_ID_ADAPTEC2_7892B	0x0081
+#define PCI_DEVICE_ID_ADAPTEC2_7892D	0x0083
+#define PCI_DEVICE_ID_ADAPTEC2_7892P	0x008f
+#define PCI_DEVICE_ID_ADAPTEC2_7899A	0x00c0
+#define PCI_DEVICE_ID_ADAPTEC2_7899B	0x00c1
+#define PCI_DEVICE_ID_ADAPTEC2_7899D	0x00c3
+#define PCI_DEVICE_ID_ADAPTEC2_7899P	0x00cf
+#define PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN   0x0500
+#define PCI_DEVICE_ID_ADAPTEC2_SCAMP	0x0503
+
+
+#define PCI_VENDOR_ID_HOLTEK		0x9412
+#define PCI_DEVICE_ID_HOLTEK_6565	0x6565
+
+#define PCI_VENDOR_ID_NETMOS		0x9710
+#define PCI_DEVICE_ID_NETMOS_9705	0x9705
+#define PCI_DEVICE_ID_NETMOS_9715	0x9715
+#define PCI_DEVICE_ID_NETMOS_9735	0x9735
+#define PCI_DEVICE_ID_NETMOS_9745	0x9745
+#define PCI_DEVICE_ID_NETMOS_9755	0x9755
+#define PCI_DEVICE_ID_NETMOS_9805	0x9805
+#define PCI_DEVICE_ID_NETMOS_9815	0x9815
+#define PCI_DEVICE_ID_NETMOS_9835	0x9835
+#define PCI_DEVICE_ID_NETMOS_9845	0x9845
+#define PCI_DEVICE_ID_NETMOS_9855	0x9855
+
+#define PCI_SUBVENDOR_ID_EXSYS		0xd84d
+#define PCI_SUBDEVICE_ID_EXSYS_4014	0x4014
+#define PCI_SUBDEVICE_ID_EXSYS_4055	0x4055
+
+#define PCI_VENDOR_ID_TIGERJET		0xe159
+#define PCI_DEVICE_ID_TIGERJET_300	0x0001
+#define PCI_DEVICE_ID_TIGERJET_100	0x0002
+
+#define PCI_VENDOR_ID_TTTECH		0x0357
+#define PCI_DEVICE_ID_TTTECH_MC322	0x000A
+
+#define PCI_VENDOR_ID_XILINX_RME	0xea60
+#define PCI_DEVICE_ID_RME_DIGI32	0x9896
+#define PCI_DEVICE_ID_RME_DIGI32_PRO	0x9897
+#define PCI_DEVICE_ID_RME_DIGI32_8	0x9898
+
diff -urN oldtree/include/linux/poll.h newtree/include/linux/poll.h
--- oldtree/include/linux/poll.h	2006-01-28 19:08:42.000000000 +0000
+++ newtree/include/linux/poll.h	2006-01-29 11:18:10.842640464 +0000
@@ -11,6 +11,15 @@
 #include <linux/mm.h>
 #include <asm/uaccess.h>
 
+/* ~832 bytes of stack space used max in sys_select/sys_poll before allocating
+   additional memory. */
+#define MAX_STACK_ALLOC 832
+#define FRONTEND_STACK_ALLOC  256
+#define SELECT_STACK_ALLOC    FRONTEND_STACK_ALLOC
+#define POLL_STACK_ALLOC      FRONTEND_STACK_ALLOC
+#define WQUEUES_STACK_ALLOC   (MAX_STACK_ALLOC - FRONTEND_STACK_ALLOC)
+#define N_INLINE_POLL_ENTRIES (WQUEUES_STACK_ALLOC / sizeof(struct poll_table_entry))
+
 struct poll_table_struct;
 
 /* 
@@ -33,6 +42,12 @@
 	pt->qproc = qproc;
 }
 
+struct poll_table_entry {
+	struct file * filp;
+	wait_queue_t wait;
+	wait_queue_head_t * wait_address;
+};
+
 /*
  * Structures and helpers for sys_poll/sys_poll
  */
@@ -40,6 +55,8 @@
 	poll_table pt;
 	struct poll_table_page * table;
 	int error;
+	int inline_index;
+	struct poll_table_entry inline_entries[N_INLINE_POLL_ENTRIES];
 };
 
 extern void poll_initwait(struct poll_wqueues *pwq);
diff -urN oldtree/include/linux/sched.h newtree/include/linux/sched.h
--- oldtree/include/linux/sched.h	2006-01-28 19:08:42.000000000 +0000
+++ newtree/include/linux/sched.h	2006-01-29 11:18:10.843640312 +0000
@@ -38,6 +38,7 @@
 #include <linux/auxvec.h>	/* For AT_VECTOR_SIZE */
 
 struct exec_domain;
+struct bio;
 
 /*
  * cloning flags:
@@ -823,6 +824,9 @@
 /* journalling filesystem info */
 	void *journal_info;
 
+/* stacked block device info */
+	struct bio *bio_list, **bio_tail;
+
 /* VM state */
 	struct reclaim_state *reclaim_state;
 
diff -urN oldtree/include/linux/uaccess.h newtree/include/linux/uaccess.h
--- oldtree/include/linux/uaccess.h	1970-01-01 00:00:00.000000000 +0000
+++ newtree/include/linux/uaccess.h	2006-01-29 11:18:20.625153296 +0000
@@ -0,0 +1,22 @@
+#ifndef __LINUX_UACCESS_H__
+#define __LINUX_UACCESS_H__
+
+#include <asm/uaccess.h>
+
+#ifndef ARCH_HAS_NOCACHE_UACCESS
+
+static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
+				const void __user *from, unsigned long n)
+{
+	return __copy_from_user_inatomic(to, from, n);
+}
+
+static inline unsigned long __copy_from_user_nocache(void *to,
+				const void __user *from, unsigned long n)
+{
+	return __copy_from_user(to, from, n);
+}
+
+#endif		/* ARCH_HAS_NOCACHE_UACCESS */
+
+#endif		/* __LINUX_UACCESS_H__ */
diff -urN oldtree/lib/Kconfig.debug newtree/lib/Kconfig.debug
--- oldtree/lib/Kconfig.debug	2006-01-28 19:08:42.000000000 +0000
+++ newtree/lib/Kconfig.debug	2006-01-29 11:18:20.613155120 +0000
@@ -179,7 +179,7 @@
 config FRAME_POINTER
 	bool "Compile the kernel with frame pointers"
 	depends on DEBUG_KERNEL && (X86 || CRIS || M68K || M68KNOMMU || FRV || UML)
-	default y if DEBUG_INFO && UML
+	default y
 	help
 	  If you say Y here the resulting kernel image will be slightly larger
 	  and slower, but it might give very useful debugging information on
diff -urN oldtree/lib/Kconfig.debug.orig newtree/lib/Kconfig.debug.orig
--- oldtree/lib/Kconfig.debug.orig	1970-01-01 00:00:00.000000000 +0000
+++ newtree/lib/Kconfig.debug.orig	2006-01-28 19:08:42.000000000 +0000
@@ -0,0 +1,201 @@
+
+config PRINTK_TIME
+	bool "Show timing information on printks"
+	help
+	  Selecting this option causes timing information to be
+	  included in printk output.  This allows you to measure
+	  the interval between kernel operations, including bootup
+	  operations.  This is useful for identifying long delays
+	  in kernel startup.
+
+
+config DEBUG_KERNEL
+	bool "Kernel debugging"
+	help
+	  Say Y here if you are developing drivers or trying to debug and
+	  identify kernel problems.
+
+config MAGIC_SYSRQ
+	bool "Magic SysRq key"
+	depends on DEBUG_KERNEL && !UML
+	help
+	  If you say Y here, you will have some control over the system even
+	  if the system crashes for example during kernel debugging (e.g., you
+	  will be able to flush the buffer cache to disk, reboot the system
+	  immediately or dump some status information). This is accomplished
+	  by pressing various keys while holding SysRq (Alt+PrintScreen). It
+	  also works on a serial console (on PC hardware at least), if you
+	  send a BREAK and then within 5 seconds a command keypress. The
+	  keys are documented in <file:Documentation/sysrq.txt>. Don't say Y
+	  unless you really know what this hack does.
+
+config LOG_BUF_SHIFT
+	int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" if DEBUG_KERNEL
+	range 12 21
+	default 17 if ARCH_S390
+	default 16 if X86_NUMAQ || IA64
+	default 15 if SMP
+	default 14
+	help
+	  Select kernel log buffer size as a power of 2.
+	  Defaults and Examples:
+	  	     17 => 128 KB for S/390
+		     16 => 64 KB for x86 NUMAQ or IA-64
+	             15 => 32 KB for SMP
+	             14 => 16 KB for uniprocessor
+		     13 =>  8 KB
+		     12 =>  4 KB
+
+config DETECT_SOFTLOCKUP
+	bool "Detect Soft Lockups"
+	depends on DEBUG_KERNEL
+	default y
+	help
+	  Say Y here to enable the kernel to detect "soft lockups",
+	  which are bugs that cause the kernel to loop in kernel
+	  mode for more than 10 seconds, without giving other tasks a
+	  chance to run.
+
+	  When a soft-lockup is detected, the kernel will print the
+	  current stack trace (which you should report), but the
+	  system will stay locked up. This feature has negligible
+	  overhead.
+
+	  (Note that "hard lockups" are separate type of bugs that
+	   can be detected via the NMI-watchdog, on platforms that
+	   support it.)
+
+config SCHEDSTATS
+	bool "Collect scheduler statistics"
+	depends on DEBUG_KERNEL && PROC_FS
+	help
+	  If you say Y here, additional code will be inserted into the
+	  scheduler and related routines to collect statistics about
+	  scheduler behavior and provide them in /proc/schedstat.  These
+	  stats may be useful for both tuning and debugging the scheduler
+	  If you aren't debugging the scheduler or trying to tune a specific
+	  application, you can say N to avoid the very slight overhead
+	  this adds.
+
+config DEBUG_SLAB
+	bool "Debug memory allocations"
+	depends on DEBUG_KERNEL
+	help
+	  Say Y here to have the kernel do limited verification on memory
+	  allocation as well as poisoning memory on free to catch use of freed
+	  memory. This can make kmalloc/kfree-intensive workloads much slower.
+
+config DEBUG_PREEMPT
+	bool "Debug preemptible kernel"
+	depends on DEBUG_KERNEL && PREEMPT
+	default y
+	help
+	  If you say Y here then the kernel will use a debug variant of the
+	  commonly used smp_processor_id() function and will print warnings
+	  if kernel code uses it in a preemption-unsafe way. Also, the kernel
+	  will detect preemption count underflows.
+
+config DEBUG_SPINLOCK
+	bool "Spinlock debugging"
+	depends on DEBUG_KERNEL
+	help
+	  Say Y here and build SMP to catch missing spinlock initialization
+	  and certain other kinds of spinlock errors commonly made.  This is
+	  best used in conjunction with the NMI watchdog so that spinlock
+	  deadlocks are also debuggable.
+
+config DEBUG_SPINLOCK_SLEEP
+	bool "Sleep-inside-spinlock checking"
+	depends on DEBUG_KERNEL
+	help
+	  If you say Y here, various routines which may sleep will become very
+	  noisy if they are called with a spinlock held.
+
+config DEBUG_KOBJECT
+	bool "kobject debugging"
+	depends on DEBUG_KERNEL
+	help
+	  If you say Y here, some extra kobject debugging messages will be sent
+	  to the syslog. 
+
+config DEBUG_HIGHMEM
+	bool "Highmem debugging"
+	depends on DEBUG_KERNEL && HIGHMEM
+	help
+	  This options enables addition error checking for high memory systems.
+	  Disable for production systems.
+
+config DEBUG_BUGVERBOSE
+	bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EMBEDDED
+	depends on BUG
+	depends on ARM || ARM26 || M32R || M68K || SPARC32 || SPARC64 || X86_32 || FRV
+	default !EMBEDDED
+	help
+	  Say Y here to make BUG() panics output the file name and line number
+	  of the BUG call as well as the EIP and oops trace.  This aids
+	  debugging but costs about 70-100K of memory.
+
+config DEBUG_INFO
+	bool "Compile the kernel with debug info"
+	depends on DEBUG_KERNEL
+	help
+          If you say Y here the resulting kernel image will include
+	  debugging info resulting in a larger kernel image.
+	  Say Y here only if you plan to debug the kernel.
+
+	  If unsure, say N.
+
+config DEBUG_IOREMAP
+	bool "Enable ioremap() debugging"
+	depends on DEBUG_KERNEL && PARISC
+	help
+	  Enabling this option will cause the kernel to distinguish between
+	  ioremapped and physical addresses.  It will print a backtrace (at
+	  most one every 10 seconds), hopefully allowing you to see which
+	  drivers need work.  Fixing all these problems is a prerequisite
+	  for turning on USE_HPPA_IOREMAP.  The warnings are harmless;
+	  the kernel has enough information to fix the broken drivers
+	  automatically, but we'd like to make it more efficient by not
+	  having to do that.
+
+config DEBUG_FS
+	bool "Debug Filesystem"
+	depends on DEBUG_KERNEL && SYSFS
+	help
+	  debugfs is a virtual file system that kernel developers use to put
+	  debugging files into.  Enable this option to be able to read and
+	  write to these files.
+
+	  If unsure, say N.
+
+config DEBUG_VM
+	bool "Debug VM"
+	depends on DEBUG_KERNEL
+	help
+	  Enable this to debug the virtual-memory system.
+
+	  If unsure, say N.
+
+config FRAME_POINTER
+	bool "Compile the kernel with frame pointers"
+	depends on DEBUG_KERNEL && (X86 || CRIS || M68K || M68KNOMMU || FRV || UML)
+	default y if DEBUG_INFO && UML
+	help
+	  If you say Y here the resulting kernel image will be slightly larger
+	  and slower, but it might give very useful debugging information on
+	  some architectures or if you use external debuggers.
+	  If you don't debug the kernel, you can say N.
+
+config RCU_TORTURE_TEST
+	tristate "torture tests for RCU"
+	depends on DEBUG_KERNEL
+	default n
+	help
+	  This option provides a kernel module that runs torture tests
+	  on the RCU infrastructure.  The kernel module may be built
+	  after the fact on the running kernel to be tested, if desired.
+
+	  Say Y here if you want RCU torture tests to start automatically
+	  at boot time (you probably don't).
+	  Say M if you want the RCU torture tests to build as a module.
+	  Say N if you are unsure.
diff -urN oldtree/lib/Makefile newtree/lib/Makefile
--- oldtree/lib/Makefile	2006-01-28 19:08:42.000000000 +0000
+++ newtree/lib/Makefile	2006-01-29 11:18:20.570161656 +0000
@@ -9,7 +9,7 @@
 
 lib-y	+= kobject.o kref.o kobject_uevent.o klist.o
 
-obj-y += sort.o parser.o halfmd4.o
+obj-y += sort.o parser.o halfmd4.o iomap_copy.o
 
 ifeq ($(CONFIG_DEBUG_KOBJECT),y)
 CFLAGS_kobject.o += -DDEBUG
diff -urN oldtree/lib/iomap_copy.c newtree/lib/iomap_copy.c
--- oldtree/lib/iomap_copy.c	1970-01-01 00:00:00.000000000 +0000
+++ newtree/lib/iomap_copy.c	2006-01-29 11:18:20.569161808 +0000
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2006 PathScale, Inc.  All Rights Reserved.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+
+/**
+ * __iowrite32_copy - copy data to MMIO space, in 32-bit units
+ * @to: destination, in MMIO space (must be 32-bit aligned)
+ * @from: source (must be 32-bit aligned)
+ * @count: number of 32-bit quantities to copy
+ *
+ * Copy data from kernel space to MMIO space, in units of 32 bits at a
+ * time.  Order of access is not guaranteed, nor is a memory barrier
+ * performed afterwards.
+ */
+void __attribute__((weak)) __iowrite32_copy(void __iomem *to,
+					    const void *from,
+					    size_t count)
+{
+	u32 __iomem *dst = to;
+	const u32 *src = from;
+	const u32 *end = src + count;
+
+	while (src < end)
+		__raw_writel(*src++, dst++);
+}
+EXPORT_SYMBOL_GPL(__iowrite32_copy);
diff -urN oldtree/mm/filemap.c newtree/mm/filemap.c
--- oldtree/mm/filemap.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/mm/filemap.c	2006-01-29 11:18:20.624153448 +0000
@@ -14,6 +14,7 @@
 #include <linux/slab.h>
 #include <linux/compiler.h>
 #include <linux/fs.h>
+#include <linux/uaccess.h>
 #include <linux/aio.h>
 #include <linux/kernel_stat.h>
 #include <linux/mm.h>
@@ -34,7 +35,6 @@
  */
 #include <linux/buffer_head.h> /* for generic_osync_inode */
 
-#include <asm/uaccess.h>
 #include <asm/mman.h>
 
 static ssize_t
@@ -1736,7 +1736,7 @@
 		int copy = min(bytes, iov->iov_len - base);
 
 		base = 0;
-		left = __copy_from_user_inatomic(vaddr, buf, copy);
+		left = __copy_from_user_inatomic_nocache(vaddr, buf, copy);
 		copied += copy;
 		bytes -= copy;
 		vaddr += copy;
diff -urN oldtree/mm/filemap.c.orig newtree/mm/filemap.c.orig
--- oldtree/mm/filemap.c.orig	1970-01-01 00:00:00.000000000 +0000
+++ newtree/mm/filemap.c.orig	2006-01-28 19:08:42.000000000 +0000
@@ -0,0 +1,2270 @@
+/*
+ *	linux/mm/filemap.c
+ *
+ * Copyright (C) 1994-1999  Linus Torvalds
+ */
+
+/*
+ * This file handles the generic file mmap semantics used by
+ * most "normal" filesystems (but you don't /have/ to use this:
+ * the NFS filesystem used to do this differently, for example)
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/compiler.h>
+#include <linux/fs.h>
+#include <linux/aio.h>
+#include <linux/kernel_stat.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/mman.h>
+#include <linux/pagemap.h>
+#include <linux/file.h>
+#include <linux/uio.h>
+#include <linux/hash.h>
+#include <linux/writeback.h>
+#include <linux/pagevec.h>
+#include <linux/blkdev.h>
+#include <linux/security.h>
+#include <linux/syscalls.h>
+#include "filemap.h"
+/*
+ * FIXME: remove all knowledge of the buffer layer from the core VM
+ */
+#include <linux/buffer_head.h> /* for generic_osync_inode */
+
+#include <asm/uaccess.h>
+#include <asm/mman.h>
+
+static ssize_t
+generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
+	loff_t offset, unsigned long nr_segs);
+
+/*
+ * Shared mappings implemented 30.11.1994. It's not fully working yet,
+ * though.
+ *
+ * Shared mappings now work. 15.8.1995  Bruno.
+ *
+ * finished 'unifying' the page and buffer cache and SMP-threaded the
+ * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
+ *
+ * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
+ */
+
+/*
+ * Lock ordering:
+ *
+ *  ->i_mmap_lock		(vmtruncate)
+ *    ->private_lock		(__free_pte->__set_page_dirty_buffers)
+ *      ->swap_lock		(exclusive_swap_page, others)
+ *        ->mapping->tree_lock
+ *
+ *  ->i_sem
+ *    ->i_mmap_lock		(truncate->unmap_mapping_range)
+ *
+ *  ->mmap_sem
+ *    ->i_mmap_lock
+ *      ->page_table_lock or pte_lock	(various, mainly in memory.c)
+ *        ->mapping->tree_lock	(arch-dependent flush_dcache_mmap_lock)
+ *
+ *  ->mmap_sem
+ *    ->lock_page		(access_process_vm)
+ *
+ *  ->mmap_sem
+ *    ->i_sem			(msync)
+ *
+ *  ->i_sem
+ *    ->i_alloc_sem             (various)
+ *
+ *  ->inode_lock
+ *    ->sb_lock			(fs/fs-writeback.c)
+ *    ->mapping->tree_lock	(__sync_single_inode)
+ *
+ *  ->i_mmap_lock
+ *    ->anon_vma.lock		(vma_adjust)
+ *
+ *  ->anon_vma.lock
+ *    ->page_table_lock or pte_lock	(anon_vma_prepare and various)
+ *
+ *  ->page_table_lock or pte_lock
+ *    ->swap_lock		(try_to_unmap_one)
+ *    ->private_lock		(try_to_unmap_one)
+ *    ->tree_lock		(try_to_unmap_one)
+ *    ->zone.lru_lock		(follow_page->mark_page_accessed)
+ *    ->private_lock		(page_remove_rmap->set_page_dirty)
+ *    ->tree_lock		(page_remove_rmap->set_page_dirty)
+ *    ->inode_lock		(page_remove_rmap->set_page_dirty)
+ *    ->inode_lock		(zap_pte_range->set_page_dirty)
+ *    ->private_lock		(zap_pte_range->__set_page_dirty_buffers)
+ *
+ *  ->task->proc_lock
+ *    ->dcache_lock		(proc_pid_lookup)
+ */
+
+/*
+ * Remove a page from the page cache and free it. Caller has to make
+ * sure the page is locked and that nobody else uses it - or that usage
+ * is safe.  The caller must hold a write_lock on the mapping's tree_lock.
+ */
+void __remove_from_page_cache(struct page *page)
+{
+	struct address_space *mapping = page->mapping;
+
+	radix_tree_delete(&mapping->page_tree, page->index);
+	page->mapping = NULL;
+	mapping->nrpages--;
+	pagecache_acct(-1);
+}
+
+void remove_from_page_cache(struct page *page)
+{
+	struct address_space *mapping = page->mapping;
+
+	BUG_ON(!PageLocked(page));
+
+	write_lock_irq(&mapping->tree_lock);
+	__remove_from_page_cache(page);
+	write_unlock_irq(&mapping->tree_lock);
+}
+
+static int sync_page(void *word)
+{
+	struct address_space *mapping;
+	struct page *page;
+
+	page = container_of((unsigned long *)word, struct page, flags);
+
+	/*
+	 * page_mapping() is being called without PG_locked held.
+	 * Some knowledge of the state and use of the page is used to
+	 * reduce the requirements down to a memory barrier.
+	 * The danger here is of a stale page_mapping() return value
+	 * indicating a struct address_space different from the one it's
+	 * associated with when it is associated with one.
+	 * After smp_mb(), it's either the correct page_mapping() for
+	 * the page, or an old page_mapping() and the page's own
+	 * page_mapping() has gone NULL.
+	 * The ->sync_page() address_space operation must tolerate
+	 * page_mapping() going NULL. By an amazing coincidence,
+	 * this comes about because none of the users of the page
+	 * in the ->sync_page() methods make essential use of the
+	 * page_mapping(), merely passing the page down to the backing
+	 * device's unplug functions when it's non-NULL, which in turn
+	 * ignore it for all cases but swap, where only page_private(page) is
+	 * of interest. When page_mapping() does go NULL, the entire
+	 * call stack gracefully ignores the page and returns.
+	 * -- wli
+	 */
+	smp_mb();
+	mapping = page_mapping(page);
+	if (mapping && mapping->a_ops && mapping->a_ops->sync_page)
+		mapping->a_ops->sync_page(page);
+	io_schedule();
+	return 0;
+}
+
+/**
+ * filemap_fdatawrite_range - start writeback against all of a mapping's
+ * dirty pages that lie within the byte offsets <start, end>
+ * @mapping:	address space structure to write
+ * @start:	offset in bytes where the range starts
+ * @end:	offset in bytes where the range ends
+ * @sync_mode:	enable synchronous operation
+ *
+ * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
+ * opposed to a regular memory * cleansing writeback.  The difference between
+ * these two operations is that if a dirty page/buffer is encountered, it must
+ * be waited upon, and not just skipped over.
+ */
+static int __filemap_fdatawrite_range(struct address_space *mapping,
+	loff_t start, loff_t end, int sync_mode)
+{
+	int ret;
+	struct writeback_control wbc = {
+		.sync_mode = sync_mode,
+		.nr_to_write = mapping->nrpages * 2,
+		.start = start,
+		.end = end,
+	};
+
+	if (!mapping_cap_writeback_dirty(mapping))
+		return 0;
+
+	ret = do_writepages(mapping, &wbc);
+	return ret;
+}
+
+static inline int __filemap_fdatawrite(struct address_space *mapping,
+	int sync_mode)
+{
+	return __filemap_fdatawrite_range(mapping, 0, 0, sync_mode);
+}
+
+int filemap_fdatawrite(struct address_space *mapping)
+{
+	return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
+}
+EXPORT_SYMBOL(filemap_fdatawrite);
+
+static int filemap_fdatawrite_range(struct address_space *mapping,
+	loff_t start, loff_t end)
+{
+	return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
+}
+
+/*
+ * This is a mostly non-blocking flush.  Not suitable for data-integrity
+ * purposes - I/O may not be started against all dirty pages.
+ */
+int filemap_flush(struct address_space *mapping)
+{
+	return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
+}
+EXPORT_SYMBOL(filemap_flush);
+
+/*
+ * Wait for writeback to complete against pages indexed by start->end
+ * inclusive
+ */
+static int wait_on_page_writeback_range(struct address_space *mapping,
+				pgoff_t start, pgoff_t end)
+{
+	struct pagevec pvec;
+	int nr_pages;
+	int ret = 0;
+	pgoff_t index;
+
+	if (end < start)
+		return 0;
+
+	pagevec_init(&pvec, 0);
+	index = start;
+	while ((index <= end) &&
+			(nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
+			PAGECACHE_TAG_WRITEBACK,
+			min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) {
+		unsigned i;
+
+		for (i = 0; i < nr_pages; i++) {
+			struct page *page = pvec.pages[i];
+
+			/* until radix tree lookup accepts end_index */
+			if (page->index > end)
+				continue;
+
+			wait_on_page_writeback(page);
+			if (PageError(page))
+				ret = -EIO;
+		}
+		pagevec_release(&pvec);
+		cond_resched();
+	}
+
+	/* Check for outstanding write errors */
+	if (test_and_clear_bit(AS_ENOSPC, &mapping->flags))
+		ret = -ENOSPC;
+	if (test_and_clear_bit(AS_EIO, &mapping->flags))
+		ret = -EIO;
+
+	return ret;
+}
+
+/*
+ * Write and wait upon all the pages in the passed range.  This is a "data
+ * integrity" operation.  It waits upon in-flight writeout before starting and
+ * waiting upon new writeout.  If there was an IO error, return it.
+ *
+ * We need to re-take i_sem during the generic_osync_inode list walk because
+ * it is otherwise livelockable.
+ */
+int sync_page_range(struct inode *inode, struct address_space *mapping,
+			loff_t pos, size_t count)
+{
+	pgoff_t start = pos >> PAGE_CACHE_SHIFT;
+	pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
+	int ret;
+
+	if (!mapping_cap_writeback_dirty(mapping) || !count)
+		return 0;
+	ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1);
+	if (ret == 0) {
+		down(&inode->i_sem);
+		ret = generic_osync_inode(inode, mapping, OSYNC_METADATA);
+		up(&inode->i_sem);
+	}
+	if (ret == 0)
+		ret = wait_on_page_writeback_range(mapping, start, end);
+	return ret;
+}
+EXPORT_SYMBOL(sync_page_range);
+
+/*
+ * Note: Holding i_sem across sync_page_range_nolock is not a good idea
+ * as it forces O_SYNC writers to different parts of the same file
+ * to be serialised right until io completion.
+ */
+static int sync_page_range_nolock(struct inode *inode,
+				  struct address_space *mapping,
+				  loff_t pos, size_t count)
+{
+	pgoff_t start = pos >> PAGE_CACHE_SHIFT;
+	pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
+	int ret;
+
+	if (!mapping_cap_writeback_dirty(mapping) || !count)
+		return 0;
+	ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1);
+	if (ret == 0)
+		ret = generic_osync_inode(inode, mapping, OSYNC_METADATA);
+	if (ret == 0)
+		ret = wait_on_page_writeback_range(mapping, start, end);
+	return ret;
+}
+
+/**
+ * filemap_fdatawait - walk the list of under-writeback pages of the given
+ *     address space and wait for all of them.
+ *
+ * @mapping: address space structure to wait for
+ */
+int filemap_fdatawait(struct address_space *mapping)
+{
+	loff_t i_size = i_size_read(mapping->host);
+
+	if (i_size == 0)
+		return 0;
+
+	return wait_on_page_writeback_range(mapping, 0,
+				(i_size - 1) >> PAGE_CACHE_SHIFT);
+}
+EXPORT_SYMBOL(filemap_fdatawait);
+
+int filemap_write_and_wait(struct address_space *mapping)
+{
+	int retval = 0;
+
+	if (mapping->nrpages) {
+		retval = filemap_fdatawrite(mapping);
+		if (retval == 0)
+			retval = filemap_fdatawait(mapping);
+	}
+	return retval;
+}
+
+int filemap_write_and_wait_range(struct address_space *mapping,
+				 loff_t lstart, loff_t lend)
+{
+	int retval = 0;
+
+	if (mapping->nrpages) {
+		retval = __filemap_fdatawrite_range(mapping, lstart, lend,
+						    WB_SYNC_ALL);
+		if (retval == 0)
+			retval = wait_on_page_writeback_range(mapping,
+						    lstart >> PAGE_CACHE_SHIFT,
+						    lend >> PAGE_CACHE_SHIFT);
+	}
+	return retval;
+}
+
+/*
+ * This function is used to add newly allocated pagecache pages:
+ * the page is new, so we can just run SetPageLocked() against it.
+ * The other page state flags were set by rmqueue().
+ *
+ * This function does not add the page to the LRU.  The caller must do that.
+ */
+int add_to_page_cache(struct page *page, struct address_space *mapping,
+		pgoff_t offset, gfp_t gfp_mask)
+{
+	int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
+
+	if (error == 0) {
+		write_lock_irq(&mapping->tree_lock);
+		error = radix_tree_insert(&mapping->page_tree, offset, page);
+		if (!error) {
+			page_cache_get(page);
+			SetPageLocked(page);
+			page->mapping = mapping;
+			page->index = offset;
+			mapping->nrpages++;
+			pagecache_acct(1);
+		}
+		write_unlock_irq(&mapping->tree_lock);
+		radix_tree_preload_end();
+	}
+	return error;
+}
+
+EXPORT_SYMBOL(add_to_page_cache);
+
+int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
+				pgoff_t offset, gfp_t gfp_mask)
+{
+	int ret = add_to_page_cache(page, mapping, offset, gfp_mask);
+	if (ret == 0)
+		lru_cache_add(page);
+	return ret;
+}
+
+/*
+ * In order to wait for pages to become available there must be
+ * waitqueues associated with pages. By using a hash table of
+ * waitqueues where the bucket discipline is to maintain all
+ * waiters on the same queue and wake all when any of the pages
+ * become available, and for the woken contexts to check to be
+ * sure the appropriate page became available, this saves space
+ * at a cost of "thundering herd" phenomena during rare hash
+ * collisions.
+ */
+static wait_queue_head_t *page_waitqueue(struct page *page)
+{
+	const struct zone *zone = page_zone(page);
+
+	return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
+}
+
+static inline void wake_up_page(struct page *page, int bit)
+{
+	__wake_up_bit(page_waitqueue(page), &page->flags, bit);
+}
+
+void fastcall wait_on_page_bit(struct page *page, int bit_nr)
+{
+	DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
+
+	if (test_bit(bit_nr, &page->flags))
+		__wait_on_bit(page_waitqueue(page), &wait, sync_page,
+							TASK_UNINTERRUPTIBLE);
+}
+EXPORT_SYMBOL(wait_on_page_bit);
+
+/**
+ * unlock_page() - unlock a locked page
+ *
+ * @page: the page
+ *
+ * Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
+ * Also wakes sleepers in wait_on_page_writeback() because the wakeup
+ * mechananism between PageLocked pages and PageWriteback pages is shared.
+ * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
+ *
+ * The first mb is necessary to safely close the critical section opened by the
+ * TestSetPageLocked(), the second mb is necessary to enforce ordering between
+ * the clear_bit and the read of the waitqueue (to avoid SMP races with a
+ * parallel wait_on_page_locked()).
+ */
+void fastcall unlock_page(struct page *page)
+{
+	smp_mb__before_clear_bit();
+	if (!TestClearPageLocked(page))
+		BUG();
+	smp_mb__after_clear_bit(); 
+	wake_up_page(page, PG_locked);
+}
+EXPORT_SYMBOL(unlock_page);
+
+/*
+ * End writeback against a page.
+ */
+void end_page_writeback(struct page *page)
+{
+	if (!TestClearPageReclaim(page) || rotate_reclaimable_page(page)) {
+		if (!test_clear_page_writeback(page))
+			BUG();
+	}
+	smp_mb__after_clear_bit();
+	wake_up_page(page, PG_writeback);
+}
+EXPORT_SYMBOL(end_page_writeback);
+
+/*
+ * Get a lock on the page, assuming we need to sleep to get it.
+ *
+ * Ugly: running sync_page() in state TASK_UNINTERRUPTIBLE is scary.  If some
+ * random driver's requestfn sets TASK_RUNNING, we could busywait.  However
+ * chances are that on the second loop, the block layer's plug list is empty,
+ * so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
+ */
+void fastcall __lock_page(struct page *page)
+{
+	DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
+
+	__wait_on_bit_lock(page_waitqueue(page), &wait, sync_page,
+							TASK_UNINTERRUPTIBLE);
+}
+EXPORT_SYMBOL(__lock_page);
+
+/*
+ * a rather lightweight function, finding and getting a reference to a
+ * hashed page atomically.
+ */
+struct page * find_get_page(struct address_space *mapping, unsigned long offset)
+{
+	struct page *page;
+
+	read_lock_irq(&mapping->tree_lock);
+	page = radix_tree_lookup(&mapping->page_tree, offset);
+	if (page)
+		page_cache_get(page);
+	read_unlock_irq(&mapping->tree_lock);
+	return page;
+}
+
+EXPORT_SYMBOL(find_get_page);
+
+/*
+ * Same as above, but trylock it instead of incrementing the count.
+ */
+struct page *find_trylock_page(struct address_space *mapping, unsigned long offset)
+{
+	struct page *page;
+
+	read_lock_irq(&mapping->tree_lock);
+	page = radix_tree_lookup(&mapping->page_tree, offset);
+	if (page && TestSetPageLocked(page))
+		page = NULL;
+	read_unlock_irq(&mapping->tree_lock);
+	return page;
+}
+
+EXPORT_SYMBOL(find_trylock_page);
+
+/**
+ * find_lock_page - locate, pin and lock a pagecache page
+ *
+ * @mapping: the address_space to search
+ * @offset: the page index
+ *
+ * Locates the desired pagecache page, locks it, increments its reference
+ * count and returns its address.
+ *
+ * Returns zero if the page was not present. find_lock_page() may sleep.
+ */
+struct page *find_lock_page(struct address_space *mapping,
+				unsigned long offset)
+{
+	struct page *page;
+
+	read_lock_irq(&mapping->tree_lock);
+repeat:
+	page = radix_tree_lookup(&mapping->page_tree, offset);
+	if (page) {
+		page_cache_get(page);
+		if (TestSetPageLocked(page)) {
+			read_unlock_irq(&mapping->tree_lock);
+			lock_page(page);
+			read_lock_irq(&mapping->tree_lock);
+
+			/* Has the page been truncated while we slept? */
+			if (page->mapping != mapping || page->index != offset) {
+				unlock_page(page);
+				page_cache_release(page);
+				goto repeat;
+			}
+		}
+	}
+	read_unlock_irq(&mapping->tree_lock);
+	return page;
+}
+
+EXPORT_SYMBOL(find_lock_page);
+
+/**
+ * find_or_create_page - locate or add a pagecache page
+ *
+ * @mapping: the page's address_space
+ * @index: the page's index into the mapping
+ * @gfp_mask: page allocation mode
+ *
+ * Locates a page in the pagecache.  If the page is not present, a new page
+ * is allocated using @gfp_mask and is added to the pagecache and to the VM's
+ * LRU list.  The returned page is locked and has its reference count
+ * incremented.
+ *
+ * find_or_create_page() may sleep, even if @gfp_flags specifies an atomic
+ * allocation!
+ *
+ * find_or_create_page() returns the desired page's address, or zero on
+ * memory exhaustion.
+ */
+struct page *find_or_create_page(struct address_space *mapping,
+		unsigned long index, gfp_t gfp_mask)
+{
+	struct page *page, *cached_page = NULL;
+	int err;
+repeat:
+	page = find_lock_page(mapping, index);
+	if (!page) {
+		if (!cached_page) {
+			cached_page = alloc_page(gfp_mask);
+			if (!cached_page)
+				return NULL;
+		}
+		err = add_to_page_cache_lru(cached_page, mapping,
+					index, gfp_mask);
+		if (!err) {
+			page = cached_page;
+			cached_page = NULL;
+		} else if (err == -EEXIST)
+			goto repeat;
+	}
+	if (cached_page)
+		page_cache_release(cached_page);
+	return page;
+}
+
+EXPORT_SYMBOL(find_or_create_page);
+
+/**
+ * find_get_pages - gang pagecache lookup
+ * @mapping:	The address_space to search
+ * @start:	The starting page index
+ * @nr_pages:	The maximum number of pages
+ * @pages:	Where the resulting pages are placed
+ *
+ * find_get_pages() will search for and return a group of up to
+ * @nr_pages pages in the mapping.  The pages are placed at @pages.
+ * find_get_pages() takes a reference against the returned pages.
+ *
+ * The search returns a group of mapping-contiguous pages with ascending
+ * indexes.  There may be holes in the indices due to not-present pages.
+ *
+ * find_get_pages() returns the number of pages which were found.
+ */
+unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
+			    unsigned int nr_pages, struct page **pages)
+{
+	unsigned int i;
+	unsigned int ret;
+
+	read_lock_irq(&mapping->tree_lock);
+	ret = radix_tree_gang_lookup(&mapping->page_tree,
+				(void **)pages, start, nr_pages);
+	for (i = 0; i < ret; i++)
+		page_cache_get(pages[i]);
+	read_unlock_irq(&mapping->tree_lock);
+	return ret;
+}
+
+/*
+ * Like find_get_pages, except we only return pages which are tagged with
+ * `tag'.   We update *index to index the next page for the traversal.
+ */
+unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
+			int tag, unsigned int nr_pages, struct page **pages)
+{
+	unsigned int i;
+	unsigned int ret;
+
+	read_lock_irq(&mapping->tree_lock);
+	ret = radix_tree_gang_lookup_tag(&mapping->page_tree,
+				(void **)pages, *index, nr_pages, tag);
+	for (i = 0; i < ret; i++)
+		page_cache_get(pages[i]);
+	if (ret)
+		*index = pages[ret - 1]->index + 1;
+	read_unlock_irq(&mapping->tree_lock);
+	return ret;
+}
+
+/*
+ * Same as grab_cache_page, but do not wait if the page is unavailable.
+ * This is intended for speculative data generators, where the data can
+ * be regenerated if the page couldn't be grabbed.  This routine should
+ * be safe to call while holding the lock for another page.
+ *
+ * Clear __GFP_FS when allocating the page to avoid recursion into the fs
+ * and deadlock against the caller's locked page.
+ */
+struct page *
+grab_cache_page_nowait(struct address_space *mapping, unsigned long index)
+{
+	struct page *page = find_get_page(mapping, index);
+	gfp_t gfp_mask;
+
+	if (page) {
+		if (!TestSetPageLocked(page))
+			return page;
+		page_cache_release(page);
+		return NULL;
+	}
+	gfp_mask = mapping_gfp_mask(mapping) & ~__GFP_FS;
+	page = alloc_pages(gfp_mask, 0);
+	if (page && add_to_page_cache_lru(page, mapping, index, gfp_mask)) {
+		page_cache_release(page);
+		page = NULL;
+	}
+	return page;
+}
+
+EXPORT_SYMBOL(grab_cache_page_nowait);
+
+/*
+ * This is a generic file read routine, and uses the
+ * mapping->a_ops->readpage() function for the actual low-level
+ * stuff.
+ *
+ * This is really ugly. But the goto's actually try to clarify some
+ * of the logic when it comes to error handling etc.
+ *
+ * Note the struct file* is only passed for the use of readpage.  It may be
+ * NULL.
+ */
+void do_generic_mapping_read(struct address_space *mapping,
+			     struct file_ra_state *_ra,
+			     struct file *filp,
+			     loff_t *ppos,
+			     read_descriptor_t *desc,
+			     read_actor_t actor)
+{
+	struct inode *inode = mapping->host;
+	unsigned long index;
+	unsigned long end_index;
+	unsigned long offset;
+	unsigned long last_index;
+	unsigned long next_index;
+	unsigned long prev_index;
+	loff_t isize;
+	struct page *cached_page;
+	int error;
+	struct file_ra_state ra = *_ra;
+
+	cached_page = NULL;
+	index = *ppos >> PAGE_CACHE_SHIFT;
+	next_index = index;
+	prev_index = ra.prev_page;
+	last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
+	offset = *ppos & ~PAGE_CACHE_MASK;
+
+	isize = i_size_read(inode);
+	if (!isize)
+		goto out;
+
+	end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
+	for (;;) {
+		struct page *page;
+		unsigned long nr, ret;
+
+		/* nr is the maximum number of bytes to copy from this page */
+		nr = PAGE_CACHE_SIZE;
+		if (index >= end_index) {
+			if (index > end_index)
+				goto out;
+			nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
+			if (nr <= offset) {
+				goto out;
+			}
+		}
+		nr = nr - offset;
+
+		cond_resched();
+		if (index == next_index)
+			next_index = page_cache_readahead(mapping, &ra, filp,
+					index, last_index - index);
+
+find_page:
+		page = find_get_page(mapping, index);
+		if (unlikely(page == NULL)) {
+			handle_ra_miss(mapping, &ra, index);
+			goto no_cached_page;
+		}
+		if (!PageUptodate(page))
+			goto page_not_up_to_date;
+page_ok:
+
+		/* If users can be writing to this page using arbitrary
+		 * virtual addresses, take care about potential aliasing
+		 * before reading the page on the kernel side.
+		 */
+		if (mapping_writably_mapped(mapping))
+			flush_dcache_page(page);
+
+		/*
+		 * When (part of) the same page is read multiple times
+		 * in succession, only mark it as accessed the first time.
+		 */
+		if (prev_index != index)
+			mark_page_accessed(page);
+		prev_index = index;
+
+		/*
+		 * Ok, we have the page, and it's up-to-date, so
+		 * now we can copy it to user space...
+		 *
+		 * The actor routine returns how many bytes were actually used..
+		 * NOTE! This may not be the same as how much of a user buffer
+		 * we filled up (we may be padding etc), so we can only update
+		 * "pos" here (the actor routine has to update the user buffer
+		 * pointers and the remaining count).
+		 */
+		ret = actor(desc, page, offset, nr);
+		offset += ret;
+		index += offset >> PAGE_CACHE_SHIFT;
+		offset &= ~PAGE_CACHE_MASK;
+
+		page_cache_release(page);
+		if (ret == nr && desc->count)
+			continue;
+		goto out;
+
+page_not_up_to_date:
+		/* Get exclusive access to the page ... */
+		lock_page(page);
+
+		/* Did it get unhashed before we got the lock? */
+		if (!page->mapping) {
+			unlock_page(page);
+			page_cache_release(page);
+			continue;
+		}
+
+		/* Did somebody else fill it already? */
+		if (PageUptodate(page)) {
+			unlock_page(page);
+			goto page_ok;
+		}
+
+readpage:
+		/* Start the actual read. The read will unlock the page. */
+		error = mapping->a_ops->readpage(filp, page);
+
+		if (unlikely(error))
+			goto readpage_error;
+
+		if (!PageUptodate(page)) {
+			lock_page(page);
+			if (!PageUptodate(page)) {
+				if (page->mapping == NULL) {
+					/*
+					 * invalidate_inode_pages got it
+					 */
+					unlock_page(page);
+					page_cache_release(page);
+					goto find_page;
+				}
+				unlock_page(page);
+				error = -EIO;
+				goto readpage_error;
+			}
+			unlock_page(page);
+		}
+
+		/*
+		 * i_size must be checked after we have done ->readpage.
+		 *
+		 * Checking i_size after the readpage allows us to calculate
+		 * the correct value for "nr", which means the zero-filled
+		 * part of the page is not copied back to userspace (unless
+		 * another truncate extends the file - this is desired though).
+		 */
+		isize = i_size_read(inode);
+		end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
+		if (unlikely(!isize || index > end_index)) {
+			page_cache_release(page);
+			goto out;
+		}
+
+		/* nr is the maximum number of bytes to copy from this page */
+		nr = PAGE_CACHE_SIZE;
+		if (index == end_index) {
+			nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
+			if (nr <= offset) {
+				page_cache_release(page);
+				goto out;
+			}
+		}
+		nr = nr - offset;
+		goto page_ok;
+
+readpage_error:
+		/* UHHUH! A synchronous read error occurred. Report it */
+		desc->error = error;
+		page_cache_release(page);
+		goto out;
+
+no_cached_page:
+		/*
+		 * Ok, it wasn't cached, so we need to create a new
+		 * page..
+		 */
+		if (!cached_page) {
+			cached_page = page_cache_alloc_cold(mapping);
+			if (!cached_page) {
+				desc->error = -ENOMEM;
+				goto out;
+			}
+		}
+		error = add_to_page_cache_lru(cached_page, mapping,
+						index, GFP_KERNEL);
+		if (error) {
+			if (error == -EEXIST)
+				goto find_page;
+			desc->error = error;
+			goto out;
+		}
+		page = cached_page;
+		cached_page = NULL;
+		goto readpage;
+	}
+
+out:
+	*_ra = ra;
+
+	*ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
+	if (cached_page)
+		page_cache_release(cached_page);
+	if (filp)
+		file_accessed(filp);
+}
+
+EXPORT_SYMBOL(do_generic_mapping_read);
+
+int file_read_actor(read_descriptor_t *desc, struct page *page,
+			unsigned long offset, unsigned long size)
+{
+	char *kaddr;
+	unsigned long left, count = desc->count;
+
+	if (size > count)
+		size = count;
+
+	/*
+	 * Faults on the destination of a read are common, so do it before
+	 * taking the kmap.
+	 */
+	if (!fault_in_pages_writeable(desc->arg.buf, size)) {
+		kaddr = kmap_atomic(page, KM_USER0);
+		left = __copy_to_user_inatomic(desc->arg.buf,
+						kaddr + offset, size);
+		kunmap_atomic(kaddr, KM_USER0);
+		if (left == 0)
+			goto success;
+	}
+
+	/* Do it the slow way */
+	kaddr = kmap(page);
+	left = __copy_to_user(desc->arg.buf, kaddr + offset, size);
+	kunmap(page);
+
+	if (left) {
+		size -= left;
+		desc->error = -EFAULT;
+	}
+success:
+	desc->count = count - size;
+	desc->written += size;
+	desc->arg.buf += size;
+	return size;
+}
+
+/*
+ * This is the "read()" routine for all filesystems
+ * that can use the page cache directly.
+ */
+ssize_t
+__generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
+		unsigned long nr_segs, loff_t *ppos)
+{
+	struct file *filp = iocb->ki_filp;
+	ssize_t retval;
+	unsigned long seg;
+	size_t count;
+
+	count = 0;
+	for (seg = 0; seg < nr_segs; seg++) {
+		const struct iovec *iv = &iov[seg];
+
+		/*
+		 * If any segment has a negative length, or the cumulative
+		 * length ever wraps negative then return -EINVAL.
+		 */
+		count += iv->iov_len;
+		if (unlikely((ssize_t)(count|iv->iov_len) < 0))
+			return -EINVAL;
+		if (access_ok(VERIFY_WRITE, iv->iov_base, iv->iov_len))
+			continue;
+		if (seg == 0)
+			return -EFAULT;
+		nr_segs = seg;
+		count -= iv->iov_len;	/* This segment is no good */
+		break;
+	}
+
+	/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
+	if (filp->f_flags & O_DIRECT) {
+		loff_t pos = *ppos, size;
+		struct address_space *mapping;
+		struct inode *inode;
+
+		mapping = filp->f_mapping;
+		inode = mapping->host;
+		retval = 0;
+		if (!count)
+			goto out; /* skip atime */
+		size = i_size_read(inode);
+		if (pos < size) {
+			retval = generic_file_direct_IO(READ, iocb,
+						iov, pos, nr_segs);
+			if (retval > 0 && !is_sync_kiocb(iocb))
+				retval = -EIOCBQUEUED;
+			if (retval > 0)
+				*ppos = pos + retval;
+		}
+		file_accessed(filp);
+		goto out;
+	}
+
+	retval = 0;
+	if (count) {
+		for (seg = 0; seg < nr_segs; seg++) {
+			read_descriptor_t desc;
+
+			desc.written = 0;
+			desc.arg.buf = iov[seg].iov_base;
+			desc.count = iov[seg].iov_len;
+			if (desc.count == 0)
+				continue;
+			desc.error = 0;
+			do_generic_file_read(filp,ppos,&desc,file_read_actor);
+			retval += desc.written;
+			if (desc.error) {
+				retval = retval ?: desc.error;
+				break;
+			}
+		}
+	}
+out:
+	return retval;
+}
+
+EXPORT_SYMBOL(__generic_file_aio_read);
+
+ssize_t
+generic_file_aio_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos)
+{
+	struct iovec local_iov = { .iov_base = buf, .iov_len = count };
+
+	BUG_ON(iocb->ki_pos != pos);
+	return __generic_file_aio_read(iocb, &local_iov, 1, &iocb->ki_pos);
+}
+
+EXPORT_SYMBOL(generic_file_aio_read);
+
+ssize_t
+generic_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
+{
+	struct iovec local_iov = { .iov_base = buf, .iov_len = count };
+	struct kiocb kiocb;
+	ssize_t ret;
+
+	init_sync_kiocb(&kiocb, filp);
+	ret = __generic_file_aio_read(&kiocb, &local_iov, 1, ppos);
+	if (-EIOCBQUEUED == ret)
+		ret = wait_on_sync_kiocb(&kiocb);
+	return ret;
+}
+
+EXPORT_SYMBOL(generic_file_read);
+
+int file_send_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size)
+{
+	ssize_t written;
+	unsigned long count = desc->count;
+	struct file *file = desc->arg.data;
+
+	if (size > count)
+		size = count;
+
+	written = file->f_op->sendpage(file, page, offset,
+				       size, &file->f_pos, size<count);
+	if (written < 0) {
+		desc->error = written;
+		written = 0;
+	}
+	desc->count = count - written;
+	desc->written += written;
+	return written;
+}
+
+ssize_t generic_file_sendfile(struct file *in_file, loff_t *ppos,
+			 size_t count, read_actor_t actor, void *target)
+{
+	read_descriptor_t desc;
+
+	if (!count)
+		return 0;
+
+	desc.written = 0;
+	desc.count = count;
+	desc.arg.data = target;
+	desc.error = 0;
+
+	do_generic_file_read(in_file, ppos, &desc, actor);
+	if (desc.written)
+		return desc.written;
+	return desc.error;
+}
+
+EXPORT_SYMBOL(generic_file_sendfile);
+
+static ssize_t
+do_readahead(struct address_space *mapping, struct file *filp,
+	     unsigned long index, unsigned long nr)
+{
+	if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
+		return -EINVAL;
+
+	force_page_cache_readahead(mapping, filp, index,
+					max_sane_readahead(nr));
+	return 0;
+}
+
+asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
+{
+	ssize_t ret;
+	struct file *file;
+
+	ret = -EBADF;
+	file = fget(fd);
+	if (file) {
+		if (file->f_mode & FMODE_READ) {
+			struct address_space *mapping = file->f_mapping;
+			unsigned long start = offset >> PAGE_CACHE_SHIFT;
+			unsigned long end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
+			unsigned long len = end - start + 1;
+			ret = do_readahead(mapping, file, start, len);
+		}
+		fput(file);
+	}
+	return ret;
+}
+
+#ifdef CONFIG_MMU
+/*
+ * This adds the requested page to the page cache if it isn't already there,
+ * and schedules an I/O to read in its contents from disk.
+ */
+static int FASTCALL(page_cache_read(struct file * file, unsigned long offset));
+static int fastcall page_cache_read(struct file * file, unsigned long offset)
+{
+	struct address_space *mapping = file->f_mapping;
+	struct page *page; 
+	int error;
+
+	page = page_cache_alloc_cold(mapping);
+	if (!page)
+		return -ENOMEM;
+
+	error = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL);
+	if (!error) {
+		error = mapping->a_ops->readpage(file, page);
+		page_cache_release(page);
+		return error;
+	}
+
+	/*
+	 * We arrive here in the unlikely event that someone 
+	 * raced with us and added our page to the cache first
+	 * or we are out of memory for radix-tree nodes.
+	 */
+	page_cache_release(page);
+	return error == -EEXIST ? 0 : error;
+}
+
+#define MMAP_LOTSAMISS  (100)
+
+/*
+ * filemap_nopage() is invoked via the vma operations vector for a
+ * mapped memory region to read in file data during a page fault.
+ *
+ * The goto's are kind of ugly, but this streamlines the normal case of having
+ * it in the page cache, and handles the special cases reasonably without
+ * having a lot of duplicated code.
+ */
+struct page *filemap_nopage(struct vm_area_struct *area,
+				unsigned long address, int *type)
+{
+	int error;
+	struct file *file = area->vm_file;
+	struct address_space *mapping = file->f_mapping;
+	struct file_ra_state *ra = &file->f_ra;
+	struct inode *inode = mapping->host;
+	struct page *page;
+	unsigned long size, pgoff;
+	int did_readaround = 0, majmin = VM_FAULT_MINOR;
+
+	pgoff = ((address-area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff;
+
+retry_all:
+	size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+	if (pgoff >= size)
+		goto outside_data_content;
+
+	/* If we don't want any read-ahead, don't bother */
+	if (VM_RandomReadHint(area))
+		goto no_cached_page;
+
+	/*
+	 * The readahead code wants to be told about each and every page
+	 * so it can build and shrink its windows appropriately
+	 *
+	 * For sequential accesses, we use the generic readahead logic.
+	 */
+	if (VM_SequentialReadHint(area))
+		page_cache_readahead(mapping, ra, file, pgoff, 1);
+
+	/*
+	 * Do we have something in the page cache already?
+	 */
+retry_find:
+	page = find_get_page(mapping, pgoff);
+	if (!page) {
+		unsigned long ra_pages;
+
+		if (VM_SequentialReadHint(area)) {
+			handle_ra_miss(mapping, ra, pgoff);
+			goto no_cached_page;
+		}
+		ra->mmap_miss++;
+
+		/*
+		 * Do we miss much more than hit in this file? If so,
+		 * stop bothering with read-ahead. It will only hurt.
+		 */
+		if (ra->mmap_miss > ra->mmap_hit + MMAP_LOTSAMISS)
+			goto no_cached_page;
+
+		/*
+		 * To keep the pgmajfault counter straight, we need to
+		 * check did_readaround, as this is an inner loop.
+		 */
+		if (!did_readaround) {
+			majmin = VM_FAULT_MAJOR;
+			inc_page_state(pgmajfault);
+		}
+		did_readaround = 1;
+		ra_pages = max_sane_readahead(file->f_ra.ra_pages);
+		if (ra_pages) {
+			pgoff_t start = 0;
+
+			if (pgoff > ra_pages / 2)
+				start = pgoff - ra_pages / 2;
+			do_page_cache_readahead(mapping, file, start, ra_pages);
+		}
+		page = find_get_page(mapping, pgoff);
+		if (!page)
+			goto no_cached_page;
+	}
+
+	if (!did_readaround)
+		ra->mmap_hit++;
+
+	/*
+	 * Ok, found a page in the page cache, now we need to check
+	 * that it's up-to-date.
+	 */
+	if (!PageUptodate(page))
+		goto page_not_uptodate;
+
+success:
+	/*
+	 * Found the page and have a reference on it.
+	 */
+	mark_page_accessed(page);
+	if (type)
+		*type = majmin;
+	return page;
+
+outside_data_content:
+	/*
+	 * An external ptracer can access pages that normally aren't
+	 * accessible..
+	 */
+	if (area->vm_mm == current->mm)
+		return NULL;
+	/* Fall through to the non-read-ahead case */
+no_cached_page:
+	/*
+	 * We're only likely to ever get here if MADV_RANDOM is in
+	 * effect.
+	 */
+	error = page_cache_read(file, pgoff);
+	grab_swap_token();
+
+	/*
+	 * The page we want has now been added to the page cache.
+	 * In the unlikely event that someone removed it in the
+	 * meantime, we'll just come back here and read it again.
+	 */
+	if (error >= 0)
+		goto retry_find;
+
+	/*
+	 * An error return from page_cache_read can result if the
+	 * system is low on memory, or a problem occurs while trying
+	 * to schedule I/O.
+	 */
+	if (error == -ENOMEM)
+		return NOPAGE_OOM;
+	return NULL;
+
+page_not_uptodate:
+	if (!did_readaround) {
+		majmin = VM_FAULT_MAJOR;
+		inc_page_state(pgmajfault);
+	}
+	lock_page(page);
+
+	/* Did it get unhashed while we waited for it? */
+	if (!page->mapping) {
+		unlock_page(page);
+		page_cache_release(page);
+		goto retry_all;
+	}
+
+	/* Did somebody else get it up-to-date? */
+	if (PageUptodate(page)) {
+		unlock_page(page);
+		goto success;
+	}
+
+	if (!mapping->a_ops->readpage(file, page)) {
+		wait_on_page_locked(page);
+		if (PageUptodate(page))
+			goto success;
+	}
+
+	/*
+	 * Umm, take care of errors if the page isn't up-to-date.
+	 * Try to re-read it _once_. We do this synchronously,
+	 * because there really aren't any performance issues here
+	 * and we need to check for errors.
+	 */
+	lock_page(page);
+
+	/* Somebody truncated the page on us? */
+	if (!page->mapping) {
+		unlock_page(page);
+		page_cache_release(page);
+		goto retry_all;
+	}
+
+	/* Somebody else successfully read it in? */
+	if (PageUptodate(page)) {
+		unlock_page(page);
+		goto success;
+	}
+	ClearPageError(page);
+	if (!mapping->a_ops->readpage(file, page)) {
+		wait_on_page_locked(page);
+		if (PageUptodate(page))
+			goto success;
+	}
+
+	/*
+	 * Things didn't work out. Return zero to tell the
+	 * mm layer so, possibly freeing the page cache page first.
+	 */
+	page_cache_release(page);
+	return NULL;
+}
+
+EXPORT_SYMBOL(filemap_nopage);
+
+static struct page * filemap_getpage(struct file *file, unsigned long pgoff,
+					int nonblock)
+{
+	struct address_space *mapping = file->f_mapping;
+	struct page *page;
+	int error;
+
+	/*
+	 * Do we have something in the page cache already?
+	 */
+retry_find:
+	page = find_get_page(mapping, pgoff);
+	if (!page) {
+		if (nonblock)
+			return NULL;
+		goto no_cached_page;
+	}
+
+	/*
+	 * Ok, found a page in the page cache, now we need to check
+	 * that it's up-to-date.
+	 */
+	if (!PageUptodate(page)) {
+		if (nonblock) {
+			page_cache_release(page);
+			return NULL;
+		}
+		goto page_not_uptodate;
+	}
+
+success:
+	/*
+	 * Found the page and have a reference on it.
+	 */
+	mark_page_accessed(page);
+	return page;
+
+no_cached_page:
+	error = page_cache_read(file, pgoff);
+
+	/*
+	 * The page we want has now been added to the page cache.
+	 * In the unlikely event that someone removed it in the
+	 * meantime, we'll just come back here and read it again.
+	 */
+	if (error >= 0)
+		goto retry_find;
+
+	/*
+	 * An error return from page_cache_read can result if the
+	 * system is low on memory, or a problem occurs while trying
+	 * to schedule I/O.
+	 */
+	return NULL;
+
+page_not_uptodate:
+	lock_page(page);
+
+	/* Did it get unhashed while we waited for it? */
+	if (!page->mapping) {
+		unlock_page(page);
+		goto err;
+	}
+
+	/* Did somebody else get it up-to-date? */
+	if (PageUptodate(page)) {
+		unlock_page(page);
+		goto success;
+	}
+
+	if (!mapping->a_ops->readpage(file, page)) {
+		wait_on_page_locked(page);
+		if (PageUptodate(page))
+			goto success;
+	}
+
+	/*
+	 * Umm, take care of errors if the page isn't up-to-date.
+	 * Try to re-read it _once_. We do this synchronously,
+	 * because there really aren't any performance issues here
+	 * and we need to check for errors.
+	 */
+	lock_page(page);
+
+	/* Somebody truncated the page on us? */
+	if (!page->mapping) {
+		unlock_page(page);
+		goto err;
+	}
+	/* Somebody else successfully read it in? */
+	if (PageUptodate(page)) {
+		unlock_page(page);
+		goto success;
+	}
+
+	ClearPageError(page);
+	if (!mapping->a_ops->readpage(file, page)) {
+		wait_on_page_locked(page);
+		if (PageUptodate(page))
+			goto success;
+	}
+
+	/*
+	 * Things didn't work out. Return zero to tell the
+	 * mm layer so, possibly freeing the page cache page first.
+	 */
+err:
+	page_cache_release(page);
+
+	return NULL;
+}
+
+int filemap_populate(struct vm_area_struct *vma, unsigned long addr,
+		unsigned long len, pgprot_t prot, unsigned long pgoff,
+		int nonblock)
+{
+	struct file *file = vma->vm_file;
+	struct address_space *mapping = file->f_mapping;
+	struct inode *inode = mapping->host;
+	unsigned long size;
+	struct mm_struct *mm = vma->vm_mm;
+	struct page *page;
+	int err;
+
+	if (!nonblock)
+		force_page_cache_readahead(mapping, vma->vm_file,
+					pgoff, len >> PAGE_CACHE_SHIFT);
+
+repeat:
+	size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+	if (pgoff + (len >> PAGE_CACHE_SHIFT) > size)
+		return -EINVAL;
+
+	page = filemap_getpage(file, pgoff, nonblock);
+
+	/* XXX: This is wrong, a filesystem I/O error may have happened. Fix that as
+	 * done in shmem_populate calling shmem_getpage */
+	if (!page && !nonblock)
+		return -ENOMEM;
+
+	if (page) {
+		err = install_page(mm, vma, addr, page, prot);
+		if (err) {
+			page_cache_release(page);
+			return err;
+		}
+	} else if (vma->vm_flags & VM_NONLINEAR) {
+		/* No page was found just because we can't read it in now (being
+		 * here implies nonblock != 0), but the page may exist, so set
+		 * the PTE to fault it in later. */
+		err = install_file_pte(mm, vma, addr, pgoff, prot);
+		if (err)
+			return err;
+	}
+
+	len -= PAGE_SIZE;
+	addr += PAGE_SIZE;
+	pgoff++;
+	if (len)
+		goto repeat;
+
+	return 0;
+}
+EXPORT_SYMBOL(filemap_populate);
+
+struct vm_operations_struct generic_file_vm_ops = {
+	.nopage		= filemap_nopage,
+	.populate	= filemap_populate,
+};
+
+/* This is used for a general mmap of a disk file */
+
+int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
+{
+	struct address_space *mapping = file->f_mapping;
+
+	if (!mapping->a_ops->readpage)
+		return -ENOEXEC;
+	file_accessed(file);
+	vma->vm_ops = &generic_file_vm_ops;
+	return 0;
+}
+
+/*
+ * This is for filesystems which do not implement ->writepage.
+ */
+int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
+		return -EINVAL;
+	return generic_file_mmap(file, vma);
+}
+#else
+int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
+{
+	return -ENOSYS;
+}
+int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
+{
+	return -ENOSYS;
+}
+#endif /* CONFIG_MMU */
+
+EXPORT_SYMBOL(generic_file_mmap);
+EXPORT_SYMBOL(generic_file_readonly_mmap);
+
+static inline struct page *__read_cache_page(struct address_space *mapping,
+				unsigned long index,
+				int (*filler)(void *,struct page*),
+				void *data)
+{
+	struct page *page, *cached_page = NULL;
+	int err;
+repeat:
+	page = find_get_page(mapping, index);
+	if (!page) {
+		if (!cached_page) {
+			cached_page = page_cache_alloc_cold(mapping);
+			if (!cached_page)
+				return ERR_PTR(-ENOMEM);
+		}
+		err = add_to_page_cache_lru(cached_page, mapping,
+					index, GFP_KERNEL);
+		if (err == -EEXIST)
+			goto repeat;
+		if (err < 0) {
+			/* Presumably ENOMEM for radix tree node */
+			page_cache_release(cached_page);
+			return ERR_PTR(err);
+		}
+		page = cached_page;
+		cached_page = NULL;
+		err = filler(data, page);
+		if (err < 0) {
+			page_cache_release(page);
+			page = ERR_PTR(err);
+		}
+	}
+	if (cached_page)
+		page_cache_release(cached_page);
+	return page;
+}
+
+/*
+ * Read into the page cache. If a page already exists,
+ * and PageUptodate() is not set, try to fill the page.
+ */
+struct page *read_cache_page(struct address_space *mapping,
+				unsigned long index,
+				int (*filler)(void *,struct page*),
+				void *data)
+{
+	struct page *page;
+	int err;
+
+retry:
+	page = __read_cache_page(mapping, index, filler, data);
+	if (IS_ERR(page))
+		goto out;
+	mark_page_accessed(page);
+	if (PageUptodate(page))
+		goto out;
+
+	lock_page(page);
+	if (!page->mapping) {
+		unlock_page(page);
+		page_cache_release(page);
+		goto retry;
+	}
+	if (PageUptodate(page)) {
+		unlock_page(page);
+		goto out;
+	}
+	err = filler(data, page);
+	if (err < 0) {
+		page_cache_release(page);
+		page = ERR_PTR(err);
+	}
+ out:
+	return page;
+}
+
+EXPORT_SYMBOL(read_cache_page);
+
+/*
+ * If the page was newly created, increment its refcount and add it to the
+ * caller's lru-buffering pagevec.  This function is specifically for
+ * generic_file_write().
+ */
+static inline struct page *
+__grab_cache_page(struct address_space *mapping, unsigned long index,
+			struct page **cached_page, struct pagevec *lru_pvec)
+{
+	int err;
+	struct page *page;
+repeat:
+	page = find_lock_page(mapping, index);
+	if (!page) {
+		if (!*cached_page) {
+			*cached_page = page_cache_alloc(mapping);
+			if (!*cached_page)
+				return NULL;
+		}
+		err = add_to_page_cache(*cached_page, mapping,
+					index, GFP_KERNEL);
+		if (err == -EEXIST)
+			goto repeat;
+		if (err == 0) {
+			page = *cached_page;
+			page_cache_get(page);
+			if (!pagevec_add(lru_pvec, page))
+				__pagevec_lru_add(lru_pvec);
+			*cached_page = NULL;
+		}
+	}
+	return page;
+}
+
+/*
+ * The logic we want is
+ *
+ *	if suid or (sgid and xgrp)
+ *		remove privs
+ */
+int remove_suid(struct dentry *dentry)
+{
+	mode_t mode = dentry->d_inode->i_mode;
+	int kill = 0;
+	int result = 0;
+
+	/* suid always must be killed */
+	if (unlikely(mode & S_ISUID))
+		kill = ATTR_KILL_SUID;
+
+	/*
+	 * sgid without any exec bits is just a mandatory locking mark; leave
+	 * it alone.  If some exec bits are set, it's a real sgid; kill it.
+	 */
+	if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
+		kill |= ATTR_KILL_SGID;
+
+	if (unlikely(kill && !capable(CAP_FSETID))) {
+		struct iattr newattrs;
+
+		newattrs.ia_valid = ATTR_FORCE | kill;
+		result = notify_change(dentry, &newattrs);
+	}
+	return result;
+}
+EXPORT_SYMBOL(remove_suid);
+
+size_t
+__filemap_copy_from_user_iovec(char *vaddr, 
+			const struct iovec *iov, size_t base, size_t bytes)
+{
+	size_t copied = 0, left = 0;
+
+	while (bytes) {
+		char __user *buf = iov->iov_base + base;
+		int copy = min(bytes, iov->iov_len - base);
+
+		base = 0;
+		left = __copy_from_user_inatomic(vaddr, buf, copy);
+		copied += copy;
+		bytes -= copy;
+		vaddr += copy;
+		iov++;
+
+		if (unlikely(left)) {
+			/* zero the rest of the target like __copy_from_user */
+			if (bytes)
+				memset(vaddr, 0, bytes);
+			break;
+		}
+	}
+	return copied - left;
+}
+
+/*
+ * Performs necessary checks before doing a write
+ *
+ * Can adjust writing position aor amount of bytes to write.
+ * Returns appropriate error code that caller should return or
+ * zero in case that write should be allowed.
+ */
+inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk)
+{
+	struct inode *inode = file->f_mapping->host;
+	unsigned long limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
+
+        if (unlikely(*pos < 0))
+                return -EINVAL;
+
+	if (!isblk) {
+		/* FIXME: this is for backwards compatibility with 2.4 */
+		if (file->f_flags & O_APPEND)
+                        *pos = i_size_read(inode);
+
+		if (limit != RLIM_INFINITY) {
+			if (*pos >= limit) {
+				send_sig(SIGXFSZ, current, 0);
+				return -EFBIG;
+			}
+			if (*count > limit - (typeof(limit))*pos) {
+				*count = limit - (typeof(limit))*pos;
+			}
+		}
+	}
+
+	/*
+	 * LFS rule
+	 */
+	if (unlikely(*pos + *count > MAX_NON_LFS &&
+				!(file->f_flags & O_LARGEFILE))) {
+		if (*pos >= MAX_NON_LFS) {
+			send_sig(SIGXFSZ, current, 0);
+			return -EFBIG;
+		}
+		if (*count > MAX_NON_LFS - (unsigned long)*pos) {
+			*count = MAX_NON_LFS - (unsigned long)*pos;
+		}
+	}
+
+	/*
+	 * Are we about to exceed the fs block limit ?
+	 *
+	 * If we have written data it becomes a short write.  If we have
+	 * exceeded without writing data we send a signal and return EFBIG.
+	 * Linus frestrict idea will clean these up nicely..
+	 */
+	if (likely(!isblk)) {
+		if (unlikely(*pos >= inode->i_sb->s_maxbytes)) {
+			if (*count || *pos > inode->i_sb->s_maxbytes) {
+				send_sig(SIGXFSZ, current, 0);
+				return -EFBIG;
+			}
+			/* zero-length writes at ->s_maxbytes are OK */
+		}
+
+		if (unlikely(*pos + *count > inode->i_sb->s_maxbytes))
+			*count = inode->i_sb->s_maxbytes - *pos;
+	} else {
+		loff_t isize;
+		if (bdev_read_only(I_BDEV(inode)))
+			return -EPERM;
+		isize = i_size_read(inode);
+		if (*pos >= isize) {
+			if (*count || *pos > isize)
+				return -ENOSPC;
+		}
+
+		if (*pos + *count > isize)
+			*count = isize - *pos;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(generic_write_checks);
+
+ssize_t
+generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
+		unsigned long *nr_segs, loff_t pos, loff_t *ppos,
+		size_t count, size_t ocount)
+{
+	struct file	*file = iocb->ki_filp;
+	struct address_space *mapping = file->f_mapping;
+	struct inode	*inode = mapping->host;
+	ssize_t		written;
+
+	if (count != ocount)
+		*nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
+
+	written = generic_file_direct_IO(WRITE, iocb, iov, pos, *nr_segs);
+	if (written > 0) {
+		loff_t end = pos + written;
+		if (end > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
+			i_size_write(inode,  end);
+			mark_inode_dirty(inode);
+		}
+		*ppos = end;
+	}
+
+	/*
+	 * Sync the fs metadata but not the minor inode changes and
+	 * of course not the data as we did direct DMA for the IO.
+	 * i_sem is held, which protects generic_osync_inode() from
+	 * livelocking.
+	 */
+	if (written >= 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
+		int err = generic_osync_inode(inode, mapping, OSYNC_METADATA);
+		if (err < 0)
+			written = err;
+	}
+	if (written == count && !is_sync_kiocb(iocb))
+		written = -EIOCBQUEUED;
+	return written;
+}
+EXPORT_SYMBOL(generic_file_direct_write);
+
+ssize_t
+generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
+		unsigned long nr_segs, loff_t pos, loff_t *ppos,
+		size_t count, ssize_t written)
+{
+	struct file *file = iocb->ki_filp;
+	struct address_space * mapping = file->f_mapping;
+	struct address_space_operations *a_ops = mapping->a_ops;
+	struct inode 	*inode = mapping->host;
+	long		status = 0;
+	struct page	*page;
+	struct page	*cached_page = NULL;
+	size_t		bytes;
+	struct pagevec	lru_pvec;
+	const struct iovec *cur_iov = iov; /* current iovec */
+	size_t		iov_base = 0;	   /* offset in the current iovec */
+	char __user	*buf;
+
+	pagevec_init(&lru_pvec, 0);
+
+	/*
+	 * handle partial DIO write.  Adjust cur_iov if needed.
+	 */
+	if (likely(nr_segs == 1))
+		buf = iov->iov_base + written;
+	else {
+		filemap_set_next_iovec(&cur_iov, &iov_base, written);
+		buf = cur_iov->iov_base + iov_base;
+	}
+
+	do {
+		unsigned long index;
+		unsigned long offset;
+		unsigned long maxlen;
+		size_t copied;
+
+		offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
+		index = pos >> PAGE_CACHE_SHIFT;
+		bytes = PAGE_CACHE_SIZE - offset;
+		if (bytes > count)
+			bytes = count;
+
+		/*
+		 * Bring in the user page that we will copy from _first_.
+		 * Otherwise there's a nasty deadlock on copying from the
+		 * same page as we're writing to, without it being marked
+		 * up-to-date.
+		 */
+		maxlen = cur_iov->iov_len - iov_base;
+		if (maxlen > bytes)
+			maxlen = bytes;
+		fault_in_pages_readable(buf, maxlen);
+
+		page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec);
+		if (!page) {
+			status = -ENOMEM;
+			break;
+		}
+
+		status = a_ops->prepare_write(file, page, offset, offset+bytes);
+		if (unlikely(status)) {
+			loff_t isize = i_size_read(inode);
+			/*
+			 * prepare_write() may have instantiated a few blocks
+			 * outside i_size.  Trim these off again.
+			 */
+			unlock_page(page);
+			page_cache_release(page);
+			if (pos + bytes > isize)
+				vmtruncate(inode, isize);
+			break;
+		}
+		if (likely(nr_segs == 1))
+			copied = filemap_copy_from_user(page, offset,
+							buf, bytes);
+		else
+			copied = filemap_copy_from_user_iovec(page, offset,
+						cur_iov, iov_base, bytes);
+		flush_dcache_page(page);
+		status = a_ops->commit_write(file, page, offset, offset+bytes);
+		if (likely(copied > 0)) {
+			if (!status)
+				status = copied;
+
+			if (status >= 0) {
+				written += status;
+				count -= status;
+				pos += status;
+				buf += status;
+				if (unlikely(nr_segs > 1)) {
+					filemap_set_next_iovec(&cur_iov,
+							&iov_base, status);
+					if (count)
+						buf = cur_iov->iov_base +
+							iov_base;
+				} else {
+					iov_base += status;
+				}
+			}
+		}
+		if (unlikely(copied != bytes))
+			if (status >= 0)
+				status = -EFAULT;
+		unlock_page(page);
+		mark_page_accessed(page);
+		page_cache_release(page);
+		if (status < 0)
+			break;
+		balance_dirty_pages_ratelimited(mapping);
+		cond_resched();
+	} while (count);
+	*ppos = pos;
+
+	if (cached_page)
+		page_cache_release(cached_page);
+
+	/*
+	 * For now, when the user asks for O_SYNC, we'll actually give O_DSYNC
+	 */
+	if (likely(status >= 0)) {
+		if (unlikely((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
+			if (!a_ops->writepage || !is_sync_kiocb(iocb))
+				status = generic_osync_inode(inode, mapping,
+						OSYNC_METADATA|OSYNC_DATA);
+		}
+  	}
+	
+	/*
+	 * If we get here for O_DIRECT writes then we must have fallen through
+	 * to buffered writes (block instantiation inside i_size).  So we sync
+	 * the file data here, to try to honour O_DIRECT expectations.
+	 */
+	if (unlikely(file->f_flags & O_DIRECT) && written)
+		status = filemap_write_and_wait(mapping);
+
+	pagevec_lru_add(&lru_pvec);
+	return written ? written : status;
+}
+EXPORT_SYMBOL(generic_file_buffered_write);
+
+static ssize_t
+__generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
+				unsigned long nr_segs, loff_t *ppos)
+{
+	struct file *file = iocb->ki_filp;
+	struct address_space * mapping = file->f_mapping;
+	size_t ocount;		/* original count */
+	size_t count;		/* after file limit checks */
+	struct inode 	*inode = mapping->host;
+	unsigned long	seg;
+	loff_t		pos;
+	ssize_t		written;
+	ssize_t		err;
+
+	ocount = 0;
+	for (seg = 0; seg < nr_segs; seg++) {
+		const struct iovec *iv = &iov[seg];
+
+		/*
+		 * If any segment has a negative length, or the cumulative
+		 * length ever wraps negative then return -EINVAL.
+		 */
+		ocount += iv->iov_len;
+		if (unlikely((ssize_t)(ocount|iv->iov_len) < 0))
+			return -EINVAL;
+		if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len))
+			continue;
+		if (seg == 0)
+			return -EFAULT;
+		nr_segs = seg;
+		ocount -= iv->iov_len;	/* This segment is no good */
+		break;
+	}
+
+	count = ocount;
+	pos = *ppos;
+
+	vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
+
+	/* We can write back this queue in page reclaim */
+	current->backing_dev_info = mapping->backing_dev_info;
+	written = 0;
+
+	err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
+	if (err)
+		goto out;
+
+	if (count == 0)
+		goto out;
+
+	err = remove_suid(file->f_dentry);
+	if (err)
+		goto out;
+
+	inode_update_time(inode, 1);
+
+	/* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
+	if (unlikely(file->f_flags & O_DIRECT)) {
+		written = generic_file_direct_write(iocb, iov,
+				&nr_segs, pos, ppos, count, ocount);
+		if (written < 0 || written == count)
+			goto out;
+		/*
+		 * direct-io write to a hole: fall through to buffered I/O
+		 * for completing the rest of the request.
+		 */
+		pos += written;
+		count -= written;
+	}
+
+	written = generic_file_buffered_write(iocb, iov, nr_segs,
+			pos, ppos, count, written);
+out:
+	current->backing_dev_info = NULL;
+	return written ? written : err;
+}
+EXPORT_SYMBOL(generic_file_aio_write_nolock);
+
+ssize_t
+generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
+				unsigned long nr_segs, loff_t *ppos)
+{
+	struct file *file = iocb->ki_filp;
+	struct address_space *mapping = file->f_mapping;
+	struct inode *inode = mapping->host;
+	ssize_t ret;
+	loff_t pos = *ppos;
+
+	ret = __generic_file_aio_write_nolock(iocb, iov, nr_segs, ppos);
+
+	if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
+		int err;
+
+		err = sync_page_range_nolock(inode, mapping, pos, ret);
+		if (err < 0)
+			ret = err;
+	}
+	return ret;
+}
+
+static ssize_t
+__generic_file_write_nolock(struct file *file, const struct iovec *iov,
+				unsigned long nr_segs, loff_t *ppos)
+{
+	struct kiocb kiocb;
+	ssize_t ret;
+
+	init_sync_kiocb(&kiocb, file);
+	ret = __generic_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos);
+	if (ret == -EIOCBQUEUED)
+		ret = wait_on_sync_kiocb(&kiocb);
+	return ret;
+}
+
+ssize_t
+generic_file_write_nolock(struct file *file, const struct iovec *iov,
+				unsigned long nr_segs, loff_t *ppos)
+{
+	struct kiocb kiocb;
+	ssize_t ret;
+
+	init_sync_kiocb(&kiocb, file);
+	ret = generic_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos);
+	if (-EIOCBQUEUED == ret)
+		ret = wait_on_sync_kiocb(&kiocb);
+	return ret;
+}
+EXPORT_SYMBOL(generic_file_write_nolock);
+
+ssize_t generic_file_aio_write(struct kiocb *iocb, const char __user *buf,
+			       size_t count, loff_t pos)
+{
+	struct file *file = iocb->ki_filp;
+	struct address_space *mapping = file->f_mapping;
+	struct inode *inode = mapping->host;
+	ssize_t ret;
+	struct iovec local_iov = { .iov_base = (void __user *)buf,
+					.iov_len = count };
+
+	BUG_ON(iocb->ki_pos != pos);
+
+	down(&inode->i_sem);
+	ret = __generic_file_aio_write_nolock(iocb, &local_iov, 1,
+						&iocb->ki_pos);
+	up(&inode->i_sem);
+
+	if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
+		ssize_t err;
+
+		err = sync_page_range(inode, mapping, pos, ret);
+		if (err < 0)
+			ret = err;
+	}
+	return ret;
+}
+EXPORT_SYMBOL(generic_file_aio_write);
+
+ssize_t generic_file_write(struct file *file, const char __user *buf,
+			   size_t count, loff_t *ppos)
+{
+	struct address_space *mapping = file->f_mapping;
+	struct inode *inode = mapping->host;
+	ssize_t	ret;
+	struct iovec local_iov = { .iov_base = (void __user *)buf,
+					.iov_len = count };
+
+	down(&inode->i_sem);
+	ret = __generic_file_write_nolock(file, &local_iov, 1, ppos);
+	up(&inode->i_sem);
+
+	if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
+		ssize_t err;
+
+		err = sync_page_range(inode, mapping, *ppos - ret, ret);
+		if (err < 0)
+			ret = err;
+	}
+	return ret;
+}
+EXPORT_SYMBOL(generic_file_write);
+
+ssize_t generic_file_readv(struct file *filp, const struct iovec *iov,
+			unsigned long nr_segs, loff_t *ppos)
+{
+	struct kiocb kiocb;
+	ssize_t ret;
+
+	init_sync_kiocb(&kiocb, filp);
+	ret = __generic_file_aio_read(&kiocb, iov, nr_segs, ppos);
+	if (-EIOCBQUEUED == ret)
+		ret = wait_on_sync_kiocb(&kiocb);
+	return ret;
+}
+EXPORT_SYMBOL(generic_file_readv);
+
+ssize_t generic_file_writev(struct file *file, const struct iovec *iov,
+			unsigned long nr_segs, loff_t *ppos)
+{
+	struct address_space *mapping = file->f_mapping;
+	struct inode *inode = mapping->host;
+	ssize_t ret;
+
+	down(&inode->i_sem);
+	ret = __generic_file_write_nolock(file, iov, nr_segs, ppos);
+	up(&inode->i_sem);
+
+	if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
+		int err;
+
+		err = sync_page_range(inode, mapping, *ppos - ret, ret);
+		if (err < 0)
+			ret = err;
+	}
+	return ret;
+}
+EXPORT_SYMBOL(generic_file_writev);
+
+/*
+ * Called under i_sem for writes to S_ISREG files.   Returns -EIO if something
+ * went wrong during pagecache shootdown.
+ */
+static ssize_t
+generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
+	loff_t offset, unsigned long nr_segs)
+{
+	struct file *file = iocb->ki_filp;
+	struct address_space *mapping = file->f_mapping;
+	ssize_t retval;
+	size_t write_len = 0;
+
+	/*
+	 * If it's a write, unmap all mmappings of the file up-front.  This
+	 * will cause any pte dirty bits to be propagated into the pageframes
+	 * for the subsequent filemap_write_and_wait().
+	 */
+	if (rw == WRITE) {
+		write_len = iov_length(iov, nr_segs);
+	       	if (mapping_mapped(mapping))
+			unmap_mapping_range(mapping, offset, write_len, 0);
+	}
+
+	retval = filemap_write_and_wait(mapping);
+	if (retval == 0) {
+		retval = mapping->a_ops->direct_IO(rw, iocb, iov,
+						offset, nr_segs);
+		if (rw == WRITE && mapping->nrpages) {
+			pgoff_t end = (offset + write_len - 1)
+						>> PAGE_CACHE_SHIFT;
+			int err = invalidate_inode_pages2_range(mapping,
+					offset >> PAGE_CACHE_SHIFT, end);
+			if (err)
+				retval = err;
+		}
+	}
+	return retval;
+}
diff -urN oldtree/mm/filemap.h newtree/mm/filemap.h
--- oldtree/mm/filemap.h	2006-01-28 19:08:42.000000000 +0000
+++ newtree/mm/filemap.h	2006-01-29 11:18:20.624153448 +0000
@@ -13,7 +13,7 @@
 #include <linux/highmem.h>
 #include <linux/uio.h>
 #include <linux/config.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 
 size_t
 __filemap_copy_from_user_iovec(char *vaddr,
@@ -34,13 +34,13 @@
 	int left;
 
 	kaddr = kmap_atomic(page, KM_USER0);
-	left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
+	left = __copy_from_user_inatomic_nocache(kaddr + offset, buf, bytes);
 	kunmap_atomic(kaddr, KM_USER0);
 
 	if (left != 0) {
 		/* Do it the slow way */
 		kaddr = kmap(page);
-		left = __copy_from_user(kaddr + offset, buf, bytes);
+		left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
 		kunmap(page);
 	}
 	return bytes - left;
diff -urN oldtree/mm/page_alloc.c newtree/mm/page_alloc.c
--- oldtree/mm/page_alloc.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/mm/page_alloc.c	2006-01-29 11:18:20.629152688 +0000
@@ -1699,7 +1699,7 @@
  * up by free_all_bootmem() once the early boot process is
  * done. Non-atomic initialization, single-pass.
  */
-void __devinit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
+void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
 		unsigned long start_pfn)
 {
 	struct page *page;
@@ -1754,7 +1754,7 @@
 	memmap_init_zone((size), (nid), (zone), (start_pfn))
 #endif
 
-static int __devinit zone_batchsize(struct zone *zone)
+static int __meminit zone_batchsize(struct zone *zone)
 {
 	int batch;
 
@@ -1832,7 +1832,7 @@
  * Dynamically allocate memory for the
  * per cpu pageset array in struct zone.
  */
-static int __devinit process_zones(int cpu)
+static int __meminit process_zones(int cpu)
 {
 	struct zone *zone, *dzone;
 
@@ -1871,7 +1871,7 @@
 #endif
 }
 
-static int __devinit pageset_cpuup_callback(struct notifier_block *nfb,
+static int __meminit pageset_cpuup_callback(struct notifier_block *nfb,
 		unsigned long action,
 		void *hcpu)
 {
@@ -1911,7 +1911,7 @@
 
 #endif
 
-static __devinit
+static __meminit
 void zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
 {
 	int i;
@@ -1931,7 +1931,7 @@
 		init_waitqueue_head(zone->wait_table + i);
 }
 
-static __devinit void zone_pcp_init(struct zone *zone)
+static __meminit void zone_pcp_init(struct zone *zone)
 {
 	int cpu;
 	unsigned long batch = zone_batchsize(zone);
@@ -1949,7 +1949,7 @@
 		zone->name, zone->present_pages, batch);
 }
 
-static __devinit void init_currently_empty_zone(struct zone *zone,
+static __meminit void init_currently_empty_zone(struct zone *zone,
 		unsigned long zone_start_pfn, unsigned long size)
 {
 	struct pglist_data *pgdat = zone->zone_pgdat;
diff -urN oldtree/mm/page_alloc.c.orig newtree/mm/page_alloc.c.orig
--- oldtree/mm/page_alloc.c.orig	1970-01-01 00:00:00.000000000 +0000
+++ newtree/mm/page_alloc.c.orig	2006-01-28 19:08:42.000000000 +0000
@@ -0,0 +1,2631 @@
+/*
+ *  linux/mm/page_alloc.c
+ *
+ *  Manages the free list, the system allocates free pages here.
+ *  Note that kmalloc() lives in slab.c
+ *
+ *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
+ *  Swap reorganised 29.12.95, Stephen Tweedie
+ *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
+ *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
+ *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
+ *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
+ *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
+ */
+
+#include <linux/config.h>
+#include <linux/stddef.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/interrupt.h>
+#include <linux/pagemap.h>
+#include <linux/bootmem.h>
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/suspend.h>
+#include <linux/pagevec.h>
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/notifier.h>
+#include <linux/topology.h>
+#include <linux/sysctl.h>
+#include <linux/cpu.h>
+#include <linux/cpuset.h>
+#include <linux/memory_hotplug.h>
+#include <linux/nodemask.h>
+#include <linux/vmalloc.h>
+
+#include <asm/tlbflush.h>
+#include "internal.h"
+
+/*
+ * MCD - HACK: Find somewhere to initialize this EARLY, or make this
+ * initializer cleaner
+ */
+nodemask_t node_online_map __read_mostly = { { [0] = 1UL } };
+EXPORT_SYMBOL(node_online_map);
+nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL;
+EXPORT_SYMBOL(node_possible_map);
+struct pglist_data *pgdat_list __read_mostly;
+unsigned long totalram_pages __read_mostly;
+unsigned long totalhigh_pages __read_mostly;
+long nr_swap_pages;
+
+/*
+ * results with 256, 32 in the lowmem_reserve sysctl:
+ *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
+ *	1G machine -> (16M dma, 784M normal, 224M high)
+ *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
+ *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
+ *	HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
+ *
+ * TBD: should special case ZONE_DMA32 machines here - in those we normally
+ * don't need any ZONE_NORMAL reservation
+ */
+int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 256, 256, 32 };
+
+EXPORT_SYMBOL(totalram_pages);
+
+/*
+ * Used by page_zone() to look up the address of the struct zone whose
+ * id is encoded in the upper bits of page->flags
+ */
+struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly;
+EXPORT_SYMBOL(zone_table);
+
+static char *zone_names[MAX_NR_ZONES] = { "DMA", "DMA32", "Normal", "HighMem" };
+int min_free_kbytes = 1024;
+
+unsigned long __initdata nr_kernel_pages;
+unsigned long __initdata nr_all_pages;
+
+static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
+{
+	int ret = 0;
+	unsigned seq;
+	unsigned long pfn = page_to_pfn(page);
+
+	do {
+		seq = zone_span_seqbegin(zone);
+		if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
+			ret = 1;
+		else if (pfn < zone->zone_start_pfn)
+			ret = 1;
+	} while (zone_span_seqretry(zone, seq));
+
+	return ret;
+}
+
+static int page_is_consistent(struct zone *zone, struct page *page)
+{
+#ifdef CONFIG_HOLES_IN_ZONE
+	if (!pfn_valid(page_to_pfn(page)))
+		return 0;
+#endif
+	if (zone != page_zone(page))
+		return 0;
+
+	return 1;
+}
+/*
+ * Temporary debugging check for pages not lying within a given zone.
+ */
+static int bad_range(struct zone *zone, struct page *page)
+{
+	if (page_outside_zone_boundaries(zone, page))
+		return 1;
+	if (!page_is_consistent(zone, page))
+		return 1;
+
+	return 0;
+}
+
+static void bad_page(const char *function, struct page *page)
+{
+	printk(KERN_EMERG "Bad page state at %s (in process '%s', page %p)\n",
+		function, current->comm, page);
+	printk(KERN_EMERG "flags:0x%0*lx mapping:%p mapcount:%d count:%d\n",
+		(int)(2*sizeof(unsigned long)), (unsigned long)page->flags,
+		page->mapping, page_mapcount(page), page_count(page));
+	printk(KERN_EMERG "Backtrace:\n");
+	dump_stack();
+	printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n");
+	page->flags &= ~(1 << PG_lru	|
+			1 << PG_private |
+			1 << PG_locked	|
+			1 << PG_active	|
+			1 << PG_dirty	|
+			1 << PG_reclaim |
+			1 << PG_slab    |
+			1 << PG_swapcache |
+			1 << PG_writeback );
+	set_page_count(page, 0);
+	reset_page_mapcount(page);
+	page->mapping = NULL;
+	add_taint(TAINT_BAD_PAGE);
+}
+
+/*
+ * Higher-order pages are called "compound pages".  They are structured thusly:
+ *
+ * The first PAGE_SIZE page is called the "head page".
+ *
+ * The remaining PAGE_SIZE pages are called "tail pages".
+ *
+ * All pages have PG_compound set.  All pages have their ->private pointing at
+ * the head page (even the head page has this).
+ *
+ * The first tail page's ->mapping, if non-zero, holds the address of the
+ * compound page's put_page() function.
+ *
+ * The order of the allocation is stored in the first tail page's ->index
+ * This is only for debug at present.  This usage means that zero-order pages
+ * may not be compound.
+ */
+static void prep_compound_page(struct page *page, unsigned long order)
+{
+	int i;
+	int nr_pages = 1 << order;
+
+	page[1].mapping = NULL;
+	page[1].index = order;
+	for (i = 0; i < nr_pages; i++) {
+		struct page *p = page + i;
+
+		SetPageCompound(p);
+		set_page_private(p, (unsigned long)page);
+	}
+}
+
+static void destroy_compound_page(struct page *page, unsigned long order)
+{
+	int i;
+	int nr_pages = 1 << order;
+
+	if (!PageCompound(page))
+		return;
+
+	if (page[1].index != order)
+		bad_page(__FUNCTION__, page);
+
+	for (i = 0; i < nr_pages; i++) {
+		struct page *p = page + i;
+
+		if (!PageCompound(p))
+			bad_page(__FUNCTION__, page);
+		if (page_private(p) != (unsigned long)page)
+			bad_page(__FUNCTION__, page);
+		ClearPageCompound(p);
+	}
+}
+
+/*
+ * function for dealing with page's order in buddy system.
+ * zone->lock is already acquired when we use these.
+ * So, we don't need atomic page->flags operations here.
+ */
+static inline unsigned long page_order(struct page *page) {
+	return page_private(page);
+}
+
+static inline void set_page_order(struct page *page, int order) {
+	set_page_private(page, order);
+	__SetPagePrivate(page);
+}
+
+static inline void rmv_page_order(struct page *page)
+{
+	__ClearPagePrivate(page);
+	set_page_private(page, 0);
+}
+
+/*
+ * Locate the struct page for both the matching buddy in our
+ * pair (buddy1) and the combined O(n+1) page they form (page).
+ *
+ * 1) Any buddy B1 will have an order O twin B2 which satisfies
+ * the following equation:
+ *     B2 = B1 ^ (1 << O)
+ * For example, if the starting buddy (buddy2) is #8 its order
+ * 1 buddy is #10:
+ *     B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
+ *
+ * 2) Any buddy B will have an order O+1 parent P which
+ * satisfies the following equation:
+ *     P = B & ~(1 << O)
+ *
+ * Assumption: *_mem_map is contigious at least up to MAX_ORDER
+ */
+static inline struct page *
+__page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order)
+{
+	unsigned long buddy_idx = page_idx ^ (1 << order);
+
+	return page + (buddy_idx - page_idx);
+}
+
+static inline unsigned long
+__find_combined_index(unsigned long page_idx, unsigned int order)
+{
+	return (page_idx & ~(1 << order));
+}
+
+/*
+ * This function checks whether a page is free && is the buddy
+ * we can do coalesce a page and its buddy if
+ * (a) the buddy is free &&
+ * (b) the buddy is on the buddy system &&
+ * (c) a page and its buddy have the same order.
+ * for recording page's order, we use page_private(page) and PG_private.
+ *
+ */
+static inline int page_is_buddy(struct page *page, int order)
+{
+       if (PagePrivate(page)           &&
+           (page_order(page) == order) &&
+            page_count(page) == 0)
+               return 1;
+       return 0;
+}
+
+/*
+ * Freeing function for a buddy system allocator.
+ *
+ * The concept of a buddy system is to maintain direct-mapped table
+ * (containing bit values) for memory blocks of various "orders".
+ * The bottom level table contains the map for the smallest allocatable
+ * units of memory (here, pages), and each level above it describes
+ * pairs of units from the levels below, hence, "buddies".
+ * At a high level, all that happens here is marking the table entry
+ * at the bottom level available, and propagating the changes upward
+ * as necessary, plus some accounting needed to play nicely with other
+ * parts of the VM system.
+ * At each level, we keep a list of pages, which are heads of continuous
+ * free pages of length of (1 << order) and marked with PG_Private.Page's
+ * order is recorded in page_private(page) field.
+ * So when we are allocating or freeing one, we can derive the state of the
+ * other.  That is, if we allocate a small block, and both were   
+ * free, the remainder of the region must be split into blocks.   
+ * If a block is freed, and its buddy is also free, then this
+ * triggers coalescing into a block of larger size.            
+ *
+ * -- wli
+ */
+
+static inline void __free_pages_bulk (struct page *page,
+		struct zone *zone, unsigned int order)
+{
+	unsigned long page_idx;
+	int order_size = 1 << order;
+
+	if (unlikely(order))
+		destroy_compound_page(page, order);
+
+	page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
+
+	BUG_ON(page_idx & (order_size - 1));
+	BUG_ON(bad_range(zone, page));
+
+	zone->free_pages += order_size;
+	while (order < MAX_ORDER-1) {
+		unsigned long combined_idx;
+		struct free_area *area;
+		struct page *buddy;
+
+		combined_idx = __find_combined_index(page_idx, order);
+		buddy = __page_find_buddy(page, page_idx, order);
+
+		if (bad_range(zone, buddy))
+			break;
+		if (!page_is_buddy(buddy, order))
+			break;		/* Move the buddy up one level. */
+		list_del(&buddy->lru);
+		area = zone->free_area + order;
+		area->nr_free--;
+		rmv_page_order(buddy);
+		page = page + (combined_idx - page_idx);
+		page_idx = combined_idx;
+		order++;
+	}
+	set_page_order(page, order);
+	list_add(&page->lru, &zone->free_area[order].free_list);
+	zone->free_area[order].nr_free++;
+}
+
+static inline int free_pages_check(const char *function, struct page *page)
+{
+	if (	page_mapcount(page) ||
+		page->mapping != NULL ||
+		page_count(page) != 0 ||
+		(page->flags & (
+			1 << PG_lru	|
+			1 << PG_private |
+			1 << PG_locked	|
+			1 << PG_active	|
+			1 << PG_reclaim	|
+			1 << PG_slab	|
+			1 << PG_swapcache |
+			1 << PG_writeback |
+			1 << PG_reserved )))
+		bad_page(function, page);
+	if (PageDirty(page))
+		__ClearPageDirty(page);
+	/*
+	 * For now, we report if PG_reserved was found set, but do not
+	 * clear it, and do not free the page.  But we shall soon need
+	 * to do more, for when the ZERO_PAGE count wraps negative.
+	 */
+	return PageReserved(page);
+}
+
+/*
+ * Frees a list of pages. 
+ * Assumes all pages on list are in same zone, and of same order.
+ * count is the number of pages to free.
+ *
+ * If the zone was previously in an "all pages pinned" state then look to
+ * see if this freeing clears that state.
+ *
+ * And clear the zone's pages_scanned counter, to hold off the "all pages are
+ * pinned" detection logic.
+ */
+static int
+free_pages_bulk(struct zone *zone, int count,
+		struct list_head *list, unsigned int order)
+{
+	unsigned long flags;
+	struct page *page = NULL;
+	int ret = 0;
+
+	spin_lock_irqsave(&zone->lock, flags);
+	zone->all_unreclaimable = 0;
+	zone->pages_scanned = 0;
+	while (!list_empty(list) && count--) {
+		page = list_entry(list->prev, struct page, lru);
+		/* have to delete it as __free_pages_bulk list manipulates */
+		list_del(&page->lru);
+		__free_pages_bulk(page, zone, order);
+		ret++;
+	}
+	spin_unlock_irqrestore(&zone->lock, flags);
+	return ret;
+}
+
+void __free_pages_ok(struct page *page, unsigned int order)
+{
+	LIST_HEAD(list);
+	int i;
+	int reserved = 0;
+
+	arch_free_page(page, order);
+
+#ifndef CONFIG_MMU
+	if (order > 0)
+		for (i = 1 ; i < (1 << order) ; ++i)
+			__put_page(page + i);
+#endif
+
+	for (i = 0 ; i < (1 << order) ; ++i)
+		reserved += free_pages_check(__FUNCTION__, page + i);
+	if (reserved)
+		return;
+
+	list_add(&page->lru, &list);
+	mod_page_state(pgfree, 1 << order);
+	kernel_map_pages(page, 1<<order, 0);
+	free_pages_bulk(page_zone(page), 1, &list, order);
+}
+
+
+/*
+ * The order of subdivision here is critical for the IO subsystem.
+ * Please do not alter this order without good reasons and regression
+ * testing. Specifically, as large blocks of memory are subdivided,
+ * the order in which smaller blocks are delivered depends on the order
+ * they're subdivided in this function. This is the primary factor
+ * influencing the order in which pages are delivered to the IO
+ * subsystem according to empirical testing, and this is also justified
+ * by considering the behavior of a buddy system containing a single
+ * large block of memory acted on by a series of small allocations.
+ * This behavior is a critical factor in sglist merging's success.
+ *
+ * -- wli
+ */
+static inline struct page *
+expand(struct zone *zone, struct page *page,
+ 	int low, int high, struct free_area *area)
+{
+	unsigned long size = 1 << high;
+
+	while (high > low) {
+		area--;
+		high--;
+		size >>= 1;
+		BUG_ON(bad_range(zone, &page[size]));
+		list_add(&page[size].lru, &area->free_list);
+		area->nr_free++;
+		set_page_order(&page[size], high);
+	}
+	return page;
+}
+
+void set_page_refs(struct page *page, int order)
+{
+#ifdef CONFIG_MMU
+	set_page_count(page, 1);
+#else
+	int i;
+
+	/*
+	 * We need to reference all the pages for this order, otherwise if
+	 * anyone accesses one of the pages with (get/put) it will be freed.
+	 * - eg: access_process_vm()
+	 */
+	for (i = 0; i < (1 << order); i++)
+		set_page_count(page + i, 1);
+#endif /* CONFIG_MMU */
+}
+
+/*
+ * This page is about to be returned from the page allocator
+ */
+static int prep_new_page(struct page *page, int order)
+{
+	if (	page_mapcount(page) ||
+		page->mapping != NULL ||
+		page_count(page) != 0 ||
+		(page->flags & (
+			1 << PG_lru	|
+			1 << PG_private	|
+			1 << PG_locked	|
+			1 << PG_active	|
+			1 << PG_dirty	|
+			1 << PG_reclaim	|
+			1 << PG_slab    |
+			1 << PG_swapcache |
+			1 << PG_writeback |
+			1 << PG_reserved )))
+		bad_page(__FUNCTION__, page);
+
+	/*
+	 * For now, we report if PG_reserved was found set, but do not
+	 * clear it, and do not allocate the page: as a safety net.
+	 */
+	if (PageReserved(page))
+		return 1;
+
+	page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
+			1 << PG_referenced | 1 << PG_arch_1 |
+			1 << PG_checked | 1 << PG_mappedtodisk);
+	set_page_private(page, 0);
+	set_page_refs(page, order);
+	kernel_map_pages(page, 1 << order, 1);
+	return 0;
+}
+
+/* 
+ * Do the hard work of removing an element from the buddy allocator.
+ * Call me with the zone->lock already held.
+ */
+static struct page *__rmqueue(struct zone *zone, unsigned int order)
+{
+	struct free_area * area;
+	unsigned int current_order;
+	struct page *page;
+
+	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
+		area = zone->free_area + current_order;
+		if (list_empty(&area->free_list))
+			continue;
+
+		page = list_entry(area->free_list.next, struct page, lru);
+		list_del(&page->lru);
+		rmv_page_order(page);
+		area->nr_free--;
+		zone->free_pages -= 1UL << order;
+		return expand(zone, page, order, current_order, area);
+	}
+
+	return NULL;
+}
+
+/* 
+ * Obtain a specified number of elements from the buddy allocator, all under
+ * a single hold of the lock, for efficiency.  Add them to the supplied list.
+ * Returns the number of new pages which were placed at *list.
+ */
+static int rmqueue_bulk(struct zone *zone, unsigned int order, 
+			unsigned long count, struct list_head *list)
+{
+	unsigned long flags;
+	int i;
+	int allocated = 0;
+	struct page *page;
+	
+	spin_lock_irqsave(&zone->lock, flags);
+	for (i = 0; i < count; ++i) {
+		page = __rmqueue(zone, order);
+		if (page == NULL)
+			break;
+		allocated++;
+		list_add_tail(&page->lru, list);
+	}
+	spin_unlock_irqrestore(&zone->lock, flags);
+	return allocated;
+}
+
+#ifdef CONFIG_NUMA
+/* Called from the slab reaper to drain remote pagesets */
+void drain_remote_pages(void)
+{
+	struct zone *zone;
+	int i;
+	unsigned long flags;
+
+	local_irq_save(flags);
+	for_each_zone(zone) {
+		struct per_cpu_pageset *pset;
+
+		/* Do not drain local pagesets */
+		if (zone->zone_pgdat->node_id == numa_node_id())
+			continue;
+
+		pset = zone->pageset[smp_processor_id()];
+		for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
+			struct per_cpu_pages *pcp;
+
+			pcp = &pset->pcp[i];
+			if (pcp->count)
+				pcp->count -= free_pages_bulk(zone, pcp->count,
+						&pcp->list, 0);
+		}
+	}
+	local_irq_restore(flags);
+}
+#endif
+
+#if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU)
+static void __drain_pages(unsigned int cpu)
+{
+	struct zone *zone;
+	int i;
+
+	for_each_zone(zone) {
+		struct per_cpu_pageset *pset;
+
+		pset = zone_pcp(zone, cpu);
+		for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
+			struct per_cpu_pages *pcp;
+
+			pcp = &pset->pcp[i];
+			pcp->count -= free_pages_bulk(zone, pcp->count,
+						&pcp->list, 0);
+		}
+	}
+}
+#endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */
+
+#ifdef CONFIG_PM
+
+void mark_free_pages(struct zone *zone)
+{
+	unsigned long zone_pfn, flags;
+	int order;
+	struct list_head *curr;
+
+	if (!zone->spanned_pages)
+		return;
+
+	spin_lock_irqsave(&zone->lock, flags);
+	for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
+		ClearPageNosaveFree(pfn_to_page(zone_pfn + zone->zone_start_pfn));
+
+	for (order = MAX_ORDER - 1; order >= 0; --order)
+		list_for_each(curr, &zone->free_area[order].free_list) {
+			unsigned long start_pfn, i;
+
+			start_pfn = page_to_pfn(list_entry(curr, struct page, lru));
+
+			for (i=0; i < (1<<order); i++)
+				SetPageNosaveFree(pfn_to_page(start_pfn+i));
+	}
+	spin_unlock_irqrestore(&zone->lock, flags);
+}
+
+/*
+ * Spill all of this CPU's per-cpu pages back into the buddy allocator.
+ */
+void drain_local_pages(void)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);	
+	__drain_pages(smp_processor_id());
+	local_irq_restore(flags);	
+}
+#endif /* CONFIG_PM */
+
+static void zone_statistics(struct zonelist *zonelist, struct zone *z)
+{
+#ifdef CONFIG_NUMA
+	unsigned long flags;
+	int cpu;
+	pg_data_t *pg = z->zone_pgdat;
+	pg_data_t *orig = zonelist->zones[0]->zone_pgdat;
+	struct per_cpu_pageset *p;
+
+	local_irq_save(flags);
+	cpu = smp_processor_id();
+	p = zone_pcp(z,cpu);
+	if (pg == orig) {
+		p->numa_hit++;
+	} else {
+		p->numa_miss++;
+		zone_pcp(zonelist->zones[0], cpu)->numa_foreign++;
+	}
+	if (pg == NODE_DATA(numa_node_id()))
+		p->local_node++;
+	else
+		p->other_node++;
+	local_irq_restore(flags);
+#endif
+}
+
+/*
+ * Free a 0-order page
+ */
+static void FASTCALL(free_hot_cold_page(struct page *page, int cold));
+static void fastcall free_hot_cold_page(struct page *page, int cold)
+{
+	struct zone *zone = page_zone(page);
+	struct per_cpu_pages *pcp;
+	unsigned long flags;
+
+	arch_free_page(page, 0);
+
+	if (PageAnon(page))
+		page->mapping = NULL;
+	if (free_pages_check(__FUNCTION__, page))
+		return;
+
+	inc_page_state(pgfree);
+	kernel_map_pages(page, 1, 0);
+
+	pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
+	local_irq_save(flags);
+	list_add(&page->lru, &pcp->list);
+	pcp->count++;
+	if (pcp->count >= pcp->high)
+		pcp->count -= free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
+	local_irq_restore(flags);
+	put_cpu();
+}
+
+void fastcall free_hot_page(struct page *page)
+{
+	free_hot_cold_page(page, 0);
+}
+	
+void fastcall free_cold_page(struct page *page)
+{
+	free_hot_cold_page(page, 1);
+}
+
+static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
+{
+	int i;
+
+	BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM);
+	for(i = 0; i < (1 << order); i++)
+		clear_highpage(page + i);
+}
+
+/*
+ * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
+ * we cheat by calling it from here, in the order > 0 path.  Saves a branch
+ * or two.
+ */
+static struct page *
+buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags)
+{
+	unsigned long flags;
+	struct page *page;
+	int cold = !!(gfp_flags & __GFP_COLD);
+
+again:
+	if (order == 0) {
+		struct per_cpu_pages *pcp;
+
+		page = NULL;
+		pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
+		local_irq_save(flags);
+		if (pcp->count <= pcp->low)
+			pcp->count += rmqueue_bulk(zone, 0,
+						pcp->batch, &pcp->list);
+		if (pcp->count) {
+			page = list_entry(pcp->list.next, struct page, lru);
+			list_del(&page->lru);
+			pcp->count--;
+		}
+		local_irq_restore(flags);
+		put_cpu();
+	} else {
+		spin_lock_irqsave(&zone->lock, flags);
+		page = __rmqueue(zone, order);
+		spin_unlock_irqrestore(&zone->lock, flags);
+	}
+
+	if (page != NULL) {
+		BUG_ON(bad_range(zone, page));
+		mod_page_state_zone(zone, pgalloc, 1 << order);
+		if (prep_new_page(page, order))
+			goto again;
+
+		if (gfp_flags & __GFP_ZERO)
+			prep_zero_page(page, order, gfp_flags);
+
+		if (order && (gfp_flags & __GFP_COMP))
+			prep_compound_page(page, order);
+	}
+	return page;
+}
+
+#define ALLOC_NO_WATERMARKS	0x01 /* don't check watermarks at all */
+#define ALLOC_WMARK_MIN		0x02 /* use pages_min watermark */
+#define ALLOC_WMARK_LOW		0x04 /* use pages_low watermark */
+#define ALLOC_WMARK_HIGH	0x08 /* use pages_high watermark */
+#define ALLOC_HARDER		0x10 /* try to alloc harder */
+#define ALLOC_HIGH		0x20 /* __GFP_HIGH set */
+#define ALLOC_CPUSET		0x40 /* check for correct cpuset */
+
+/*
+ * Return 1 if free pages are above 'mark'. This takes into account the order
+ * of the allocation.
+ */
+int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
+		      int classzone_idx, int alloc_flags)
+{
+	/* free_pages my go negative - that's OK */
+	long min = mark, free_pages = z->free_pages - (1 << order) + 1;
+	int o;
+
+	if (alloc_flags & ALLOC_HIGH)
+		min -= min / 2;
+	if (alloc_flags & ALLOC_HARDER)
+		min -= min / 4;
+
+	if (free_pages <= min + z->lowmem_reserve[classzone_idx])
+		return 0;
+	for (o = 0; o < order; o++) {
+		/* At the next order, this order's pages become unavailable */
+		free_pages -= z->free_area[o].nr_free << o;
+
+		/* Require fewer higher order pages to be free */
+		min >>= 1;
+
+		if (free_pages <= min)
+			return 0;
+	}
+	return 1;
+}
+
+/*
+ * get_page_from_freeliest goes through the zonelist trying to allocate
+ * a page.
+ */
+static struct page *
+get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
+		struct zonelist *zonelist, int alloc_flags)
+{
+	struct zone **z = zonelist->zones;
+	struct page *page = NULL;
+	int classzone_idx = zone_idx(*z);
+
+	/*
+	 * Go through the zonelist once, looking for a zone with enough free.
+	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
+	 */
+	do {
+		if ((alloc_flags & ALLOC_CPUSET) &&
+				!cpuset_zone_allowed(*z, gfp_mask))
+			continue;
+
+		if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
+			unsigned long mark;
+			if (alloc_flags & ALLOC_WMARK_MIN)
+				mark = (*z)->pages_min;
+			else if (alloc_flags & ALLOC_WMARK_LOW)
+				mark = (*z)->pages_low;
+			else
+				mark = (*z)->pages_high;
+			if (!zone_watermark_ok(*z, order, mark,
+				    classzone_idx, alloc_flags))
+				continue;
+		}
+
+		page = buffered_rmqueue(*z, order, gfp_mask);
+		if (page) {
+			zone_statistics(zonelist, *z);
+			break;
+		}
+	} while (*(++z) != NULL);
+	return page;
+}
+
+/*
+ * This is the 'heart' of the zoned buddy allocator.
+ */
+struct page * fastcall
+__alloc_pages(gfp_t gfp_mask, unsigned int order,
+		struct zonelist *zonelist)
+{
+	const gfp_t wait = gfp_mask & __GFP_WAIT;
+	struct zone **z;
+	struct page *page;
+	struct reclaim_state reclaim_state;
+	struct task_struct *p = current;
+	int do_retry;
+	int alloc_flags;
+	int did_some_progress;
+
+	might_sleep_if(wait);
+
+restart:
+	z = zonelist->zones;  /* the list of zones suitable for gfp_mask */
+
+	if (unlikely(*z == NULL)) {
+		/* Should this ever happen?? */
+		return NULL;
+	}
+
+	page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
+				zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET);
+	if (page)
+		goto got_pg;
+
+	do {
+		wakeup_kswapd(*z, order);
+	} while (*(++z));
+
+	/*
+	 * OK, we're below the kswapd watermark and have kicked background
+	 * reclaim. Now things get more complex, so set up alloc_flags according
+	 * to how we want to proceed.
+	 *
+	 * The caller may dip into page reserves a bit more if the caller
+	 * cannot run direct reclaim, or if the caller has realtime scheduling
+	 * policy.
+	 */
+	alloc_flags = ALLOC_WMARK_MIN;
+	if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
+		alloc_flags |= ALLOC_HARDER;
+	if (gfp_mask & __GFP_HIGH)
+		alloc_flags |= ALLOC_HIGH;
+	if (wait)
+		alloc_flags |= ALLOC_CPUSET;
+
+	/*
+	 * Go through the zonelist again. Let __GFP_HIGH and allocations
+	 * coming from realtime tasks go deeper into reserves.
+	 *
+	 * This is the last chance, in general, before the goto nopage.
+	 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
+	 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
+	 */
+	page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags);
+	if (page)
+		goto got_pg;
+
+	/* This allocation should allow future memory freeing. */
+
+	if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
+			&& !in_interrupt()) {
+		if (!(gfp_mask & __GFP_NOMEMALLOC)) {
+nofail_alloc:
+			/* go through the zonelist yet again, ignoring mins */
+			page = get_page_from_freelist(gfp_mask, order,
+				zonelist, ALLOC_NO_WATERMARKS|ALLOC_CPUSET);
+			if (page)
+				goto got_pg;
+			if (gfp_mask & __GFP_NOFAIL) {
+				blk_congestion_wait(WRITE, HZ/50);
+				goto nofail_alloc;
+			}
+		}
+		goto nopage;
+	}
+
+	/* Atomic allocations - we can't balance anything */
+	if (!wait)
+		goto nopage;
+
+rebalance:
+	cond_resched();
+
+	/* We now go into synchronous reclaim */
+	p->flags |= PF_MEMALLOC;
+	reclaim_state.reclaimed_slab = 0;
+	p->reclaim_state = &reclaim_state;
+
+	did_some_progress = try_to_free_pages(zonelist->zones, gfp_mask);
+
+	p->reclaim_state = NULL;
+	p->flags &= ~PF_MEMALLOC;
+
+	cond_resched();
+
+	if (likely(did_some_progress)) {
+		page = get_page_from_freelist(gfp_mask, order,
+						zonelist, alloc_flags);
+		if (page)
+			goto got_pg;
+	} else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
+		/*
+		 * Go through the zonelist yet one more time, keep
+		 * very high watermark here, this is only to catch
+		 * a parallel oom killing, we must fail if we're still
+		 * under heavy pressure.
+		 */
+		page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
+				zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET);
+		if (page)
+			goto got_pg;
+
+		out_of_memory(gfp_mask, order);
+		goto restart;
+	}
+
+	/*
+	 * Don't let big-order allocations loop unless the caller explicitly
+	 * requests that.  Wait for some write requests to complete then retry.
+	 *
+	 * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order
+	 * <= 3, but that may not be true in other implementations.
+	 */
+	do_retry = 0;
+	if (!(gfp_mask & __GFP_NORETRY)) {
+		if ((order <= 3) || (gfp_mask & __GFP_REPEAT))
+			do_retry = 1;
+		if (gfp_mask & __GFP_NOFAIL)
+			do_retry = 1;
+	}
+	if (do_retry) {
+		blk_congestion_wait(WRITE, HZ/50);
+		goto rebalance;
+	}
+
+nopage:
+	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) {
+		printk(KERN_WARNING "%s: page allocation failure."
+			" order:%d, mode:0x%x\n",
+			p->comm, order, gfp_mask);
+		dump_stack();
+		show_mem();
+	}
+got_pg:
+	return page;
+}
+
+EXPORT_SYMBOL(__alloc_pages);
+
+/*
+ * Common helper functions.
+ */
+fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
+{
+	struct page * page;
+	page = alloc_pages(gfp_mask, order);
+	if (!page)
+		return 0;
+	return (unsigned long) page_address(page);
+}
+
+EXPORT_SYMBOL(__get_free_pages);
+
+fastcall unsigned long get_zeroed_page(gfp_t gfp_mask)
+{
+	struct page * page;
+
+	/*
+	 * get_zeroed_page() returns a 32-bit address, which cannot represent
+	 * a highmem page
+	 */
+	BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
+
+	page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
+	if (page)
+		return (unsigned long) page_address(page);
+	return 0;
+}
+
+EXPORT_SYMBOL(get_zeroed_page);
+
+void __pagevec_free(struct pagevec *pvec)
+{
+	int i = pagevec_count(pvec);
+
+	while (--i >= 0)
+		free_hot_cold_page(pvec->pages[i], pvec->cold);
+}
+
+fastcall void __free_pages(struct page *page, unsigned int order)
+{
+	if (put_page_testzero(page)) {
+		if (order == 0)
+			free_hot_page(page);
+		else
+			__free_pages_ok(page, order);
+	}
+}
+
+EXPORT_SYMBOL(__free_pages);
+
+fastcall void free_pages(unsigned long addr, unsigned int order)
+{
+	if (addr != 0) {
+		BUG_ON(!virt_addr_valid((void *)addr));
+		__free_pages(virt_to_page((void *)addr), order);
+	}
+}
+
+EXPORT_SYMBOL(free_pages);
+
+/*
+ * Total amount of free (allocatable) RAM:
+ */
+unsigned int nr_free_pages(void)
+{
+	unsigned int sum = 0;
+	struct zone *zone;
+
+	for_each_zone(zone)
+		sum += zone->free_pages;
+
+	return sum;
+}
+
+EXPORT_SYMBOL(nr_free_pages);
+
+#ifdef CONFIG_NUMA
+unsigned int nr_free_pages_pgdat(pg_data_t *pgdat)
+{
+	unsigned int i, sum = 0;
+
+	for (i = 0; i < MAX_NR_ZONES; i++)
+		sum += pgdat->node_zones[i].free_pages;
+
+	return sum;
+}
+#endif
+
+static unsigned int nr_free_zone_pages(int offset)
+{
+	/* Just pick one node, since fallback list is circular */
+	pg_data_t *pgdat = NODE_DATA(numa_node_id());
+	unsigned int sum = 0;
+
+	struct zonelist *zonelist = pgdat->node_zonelists + offset;
+	struct zone **zonep = zonelist->zones;
+	struct zone *zone;
+
+	for (zone = *zonep++; zone; zone = *zonep++) {
+		unsigned long size = zone->present_pages;
+		unsigned long high = zone->pages_high;
+		if (size > high)
+			sum += size - high;
+	}
+
+	return sum;
+}
+
+/*
+ * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL
+ */
+unsigned int nr_free_buffer_pages(void)
+{
+	return nr_free_zone_pages(gfp_zone(GFP_USER));
+}
+
+/*
+ * Amount of free RAM allocatable within all zones
+ */
+unsigned int nr_free_pagecache_pages(void)
+{
+	return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER));
+}
+
+#ifdef CONFIG_HIGHMEM
+unsigned int nr_free_highpages (void)
+{
+	pg_data_t *pgdat;
+	unsigned int pages = 0;
+
+	for_each_pgdat(pgdat)
+		pages += pgdat->node_zones[ZONE_HIGHMEM].free_pages;
+
+	return pages;
+}
+#endif
+
+#ifdef CONFIG_NUMA
+static void show_node(struct zone *zone)
+{
+	printk("Node %d ", zone->zone_pgdat->node_id);
+}
+#else
+#define show_node(zone)	do { } while (0)
+#endif
+
+/*
+ * Accumulate the page_state information across all CPUs.
+ * The result is unavoidably approximate - it can change
+ * during and after execution of this function.
+ */
+static DEFINE_PER_CPU(struct page_state, page_states) = {0};
+
+atomic_t nr_pagecache = ATOMIC_INIT(0);
+EXPORT_SYMBOL(nr_pagecache);
+#ifdef CONFIG_SMP
+DEFINE_PER_CPU(long, nr_pagecache_local) = 0;
+#endif
+
+void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask)
+{
+	int cpu = 0;
+
+	memset(ret, 0, sizeof(*ret));
+	cpus_and(*cpumask, *cpumask, cpu_online_map);
+
+	cpu = first_cpu(*cpumask);
+	while (cpu < NR_CPUS) {
+		unsigned long *in, *out, off;
+
+		in = (unsigned long *)&per_cpu(page_states, cpu);
+
+		cpu = next_cpu(cpu, *cpumask);
+
+		if (cpu < NR_CPUS)
+			prefetch(&per_cpu(page_states, cpu));
+
+		out = (unsigned long *)ret;
+		for (off = 0; off < nr; off++)
+			*out++ += *in++;
+	}
+}
+
+void get_page_state_node(struct page_state *ret, int node)
+{
+	int nr;
+	cpumask_t mask = node_to_cpumask(node);
+
+	nr = offsetof(struct page_state, GET_PAGE_STATE_LAST);
+	nr /= sizeof(unsigned long);
+
+	__get_page_state(ret, nr+1, &mask);
+}
+
+void get_page_state(struct page_state *ret)
+{
+	int nr;
+	cpumask_t mask = CPU_MASK_ALL;
+
+	nr = offsetof(struct page_state, GET_PAGE_STATE_LAST);
+	nr /= sizeof(unsigned long);
+
+	__get_page_state(ret, nr + 1, &mask);
+}
+
+void get_full_page_state(struct page_state *ret)
+{
+	cpumask_t mask = CPU_MASK_ALL;
+
+	__get_page_state(ret, sizeof(*ret) / sizeof(unsigned long), &mask);
+}
+
+unsigned long __read_page_state(unsigned long offset)
+{
+	unsigned long ret = 0;
+	int cpu;
+
+	for_each_online_cpu(cpu) {
+		unsigned long in;
+
+		in = (unsigned long)&per_cpu(page_states, cpu) + offset;
+		ret += *((unsigned long *)in);
+	}
+	return ret;
+}
+
+void __mod_page_state(unsigned long offset, unsigned long delta)
+{
+	unsigned long flags;
+	void* ptr;
+
+	local_irq_save(flags);
+	ptr = &__get_cpu_var(page_states);
+	*(unsigned long*)(ptr + offset) += delta;
+	local_irq_restore(flags);
+}
+
+EXPORT_SYMBOL(__mod_page_state);
+
+void __get_zone_counts(unsigned long *active, unsigned long *inactive,
+			unsigned long *free, struct pglist_data *pgdat)
+{
+	struct zone *zones = pgdat->node_zones;
+	int i;
+
+	*active = 0;
+	*inactive = 0;
+	*free = 0;
+	for (i = 0; i < MAX_NR_ZONES; i++) {
+		*active += zones[i].nr_active;
+		*inactive += zones[i].nr_inactive;
+		*free += zones[i].free_pages;
+	}
+}
+
+void get_zone_counts(unsigned long *active,
+		unsigned long *inactive, unsigned long *free)
+{
+	struct pglist_data *pgdat;
+
+	*active = 0;
+	*inactive = 0;
+	*free = 0;
+	for_each_pgdat(pgdat) {
+		unsigned long l, m, n;
+		__get_zone_counts(&l, &m, &n, pgdat);
+		*active += l;
+		*inactive += m;
+		*free += n;
+	}
+}
+
+void si_meminfo(struct sysinfo *val)
+{
+	val->totalram = totalram_pages;
+	val->sharedram = 0;
+	val->freeram = nr_free_pages();
+	val->bufferram = nr_blockdev_pages();
+#ifdef CONFIG_HIGHMEM
+	val->totalhigh = totalhigh_pages;
+	val->freehigh = nr_free_highpages();
+#else
+	val->totalhigh = 0;
+	val->freehigh = 0;
+#endif
+	val->mem_unit = PAGE_SIZE;
+}
+
+EXPORT_SYMBOL(si_meminfo);
+
+#ifdef CONFIG_NUMA
+void si_meminfo_node(struct sysinfo *val, int nid)
+{
+	pg_data_t *pgdat = NODE_DATA(nid);
+
+	val->totalram = pgdat->node_present_pages;
+	val->freeram = nr_free_pages_pgdat(pgdat);
+	val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
+	val->freehigh = pgdat->node_zones[ZONE_HIGHMEM].free_pages;
+	val->mem_unit = PAGE_SIZE;
+}
+#endif
+
+#define K(x) ((x) << (PAGE_SHIFT-10))
+
+/*
+ * Show free area list (used inside shift_scroll-lock stuff)
+ * We also calculate the percentage fragmentation. We do this by counting the
+ * memory on each free list with the exception of the first item on the list.
+ */
+void show_free_areas(void)
+{
+	struct page_state ps;
+	int cpu, temperature;
+	unsigned long active;
+	unsigned long inactive;
+	unsigned long free;
+	struct zone *zone;
+
+	for_each_zone(zone) {
+		show_node(zone);
+		printk("%s per-cpu:", zone->name);
+
+		if (!zone->present_pages) {
+			printk(" empty\n");
+			continue;
+		} else
+			printk("\n");
+
+		for_each_online_cpu(cpu) {
+			struct per_cpu_pageset *pageset;
+
+			pageset = zone_pcp(zone, cpu);
+
+			for (temperature = 0; temperature < 2; temperature++)
+				printk("cpu %d %s: low %d, high %d, batch %d used:%d\n",
+					cpu,
+					temperature ? "cold" : "hot",
+					pageset->pcp[temperature].low,
+					pageset->pcp[temperature].high,
+					pageset->pcp[temperature].batch,
+					pageset->pcp[temperature].count);
+		}
+	}
+
+	get_page_state(&ps);
+	get_zone_counts(&active, &inactive, &free);
+
+	printk("Free pages: %11ukB (%ukB HighMem)\n",
+		K(nr_free_pages()),
+		K(nr_free_highpages()));
+
+	printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu "
+		"unstable:%lu free:%u slab:%lu mapped:%lu pagetables:%lu\n",
+		active,
+		inactive,
+		ps.nr_dirty,
+		ps.nr_writeback,
+		ps.nr_unstable,
+		nr_free_pages(),
+		ps.nr_slab,
+		ps.nr_mapped,
+		ps.nr_page_table_pages);
+
+	for_each_zone(zone) {
+		int i;
+
+		show_node(zone);
+		printk("%s"
+			" free:%lukB"
+			" min:%lukB"
+			" low:%lukB"
+			" high:%lukB"
+			" active:%lukB"
+			" inactive:%lukB"
+			" present:%lukB"
+			" pages_scanned:%lu"
+			" all_unreclaimable? %s"
+			"\n",
+			zone->name,
+			K(zone->free_pages),
+			K(zone->pages_min),
+			K(zone->pages_low),
+			K(zone->pages_high),
+			K(zone->nr_active),
+			K(zone->nr_inactive),
+			K(zone->present_pages),
+			zone->pages_scanned,
+			(zone->all_unreclaimable ? "yes" : "no")
+			);
+		printk("lowmem_reserve[]:");
+		for (i = 0; i < MAX_NR_ZONES; i++)
+			printk(" %lu", zone->lowmem_reserve[i]);
+		printk("\n");
+	}
+
+	for_each_zone(zone) {
+ 		unsigned long nr, flags, order, total = 0;
+
+		show_node(zone);
+		printk("%s: ", zone->name);
+		if (!zone->present_pages) {
+			printk("empty\n");
+			continue;
+		}
+
+		spin_lock_irqsave(&zone->lock, flags);
+		for (order = 0; order < MAX_ORDER; order++) {
+			nr = zone->free_area[order].nr_free;
+			total += nr << order;
+			printk("%lu*%lukB ", nr, K(1UL) << order);
+		}
+		spin_unlock_irqrestore(&zone->lock, flags);
+		printk("= %lukB\n", K(total));
+	}
+
+	show_swap_cache_info();
+}
+
+/*
+ * Builds allocation fallback zone lists.
+ */
+static int __init build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist, int j, int k)
+{
+	switch (k) {
+		struct zone *zone;
+	default:
+		BUG();
+	case ZONE_HIGHMEM:
+		zone = pgdat->node_zones + ZONE_HIGHMEM;
+		if (zone->present_pages) {
+#ifndef CONFIG_HIGHMEM
+			BUG();
+#endif
+			zonelist->zones[j++] = zone;
+		}
+	case ZONE_NORMAL:
+		zone = pgdat->node_zones + ZONE_NORMAL;
+		if (zone->present_pages)
+			zonelist->zones[j++] = zone;
+	case ZONE_DMA32:
+		zone = pgdat->node_zones + ZONE_DMA32;
+		if (zone->present_pages)
+			zonelist->zones[j++] = zone;
+	case ZONE_DMA:
+		zone = pgdat->node_zones + ZONE_DMA;
+		if (zone->present_pages)
+			zonelist->zones[j++] = zone;
+	}
+
+	return j;
+}
+
+static inline int highest_zone(int zone_bits)
+{
+	int res = ZONE_NORMAL;
+	if (zone_bits & (__force int)__GFP_HIGHMEM)
+		res = ZONE_HIGHMEM;
+	if (zone_bits & (__force int)__GFP_DMA32)
+		res = ZONE_DMA32;
+	if (zone_bits & (__force int)__GFP_DMA)
+		res = ZONE_DMA;
+	return res;
+}
+
+#ifdef CONFIG_NUMA
+#define MAX_NODE_LOAD (num_online_nodes())
+static int __initdata node_load[MAX_NUMNODES];
+/**
+ * find_next_best_node - find the next node that should appear in a given node's fallback list
+ * @node: node whose fallback list we're appending
+ * @used_node_mask: nodemask_t of already used nodes
+ *
+ * We use a number of factors to determine which is the next node that should
+ * appear on a given node's fallback list.  The node should not have appeared
+ * already in @node's fallback list, and it should be the next closest node
+ * according to the distance array (which contains arbitrary distance values
+ * from each node to each node in the system), and should also prefer nodes
+ * with no CPUs, since presumably they'll have very little allocation pressure
+ * on them otherwise.
+ * It returns -1 if no node is found.
+ */
+static int __init find_next_best_node(int node, nodemask_t *used_node_mask)
+{
+	int i, n, val;
+	int min_val = INT_MAX;
+	int best_node = -1;
+
+	for_each_online_node(i) {
+		cpumask_t tmp;
+
+		/* Start from local node */
+		n = (node+i) % num_online_nodes();
+
+		/* Don't want a node to appear more than once */
+		if (node_isset(n, *used_node_mask))
+			continue;
+
+		/* Use the local node if we haven't already */
+		if (!node_isset(node, *used_node_mask)) {
+			best_node = node;
+			break;
+		}
+
+		/* Use the distance array to find the distance */
+		val = node_distance(node, n);
+
+		/* Give preference to headless and unused nodes */
+		tmp = node_to_cpumask(n);
+		if (!cpus_empty(tmp))
+			val += PENALTY_FOR_NODE_WITH_CPUS;
+
+		/* Slight preference for less loaded node */
+		val *= (MAX_NODE_LOAD*MAX_NUMNODES);
+		val += node_load[n];
+
+		if (val < min_val) {
+			min_val = val;
+			best_node = n;
+		}
+	}
+
+	if (best_node >= 0)
+		node_set(best_node, *used_node_mask);
+
+	return best_node;
+}
+
+static void __init build_zonelists(pg_data_t *pgdat)
+{
+	int i, j, k, node, local_node;
+	int prev_node, load;
+	struct zonelist *zonelist;
+	nodemask_t used_mask;
+
+	/* initialize zonelists */
+	for (i = 0; i < GFP_ZONETYPES; i++) {
+		zonelist = pgdat->node_zonelists + i;
+		zonelist->zones[0] = NULL;
+	}
+
+	/* NUMA-aware ordering of nodes */
+	local_node = pgdat->node_id;
+	load = num_online_nodes();
+	prev_node = local_node;
+	nodes_clear(used_mask);
+	while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
+		/*
+		 * We don't want to pressure a particular node.
+		 * So adding penalty to the first node in same
+		 * distance group to make it round-robin.
+		 */
+		if (node_distance(local_node, node) !=
+				node_distance(local_node, prev_node))
+			node_load[node] += load;
+		prev_node = node;
+		load--;
+		for (i = 0; i < GFP_ZONETYPES; i++) {
+			zonelist = pgdat->node_zonelists + i;
+			for (j = 0; zonelist->zones[j] != NULL; j++);
+
+			k = highest_zone(i);
+
+	 		j = build_zonelists_node(NODE_DATA(node), zonelist, j, k);
+			zonelist->zones[j] = NULL;
+		}
+	}
+}
+
+#else	/* CONFIG_NUMA */
+
+static void __init build_zonelists(pg_data_t *pgdat)
+{
+	int i, j, k, node, local_node;
+
+	local_node = pgdat->node_id;
+	for (i = 0; i < GFP_ZONETYPES; i++) {
+		struct zonelist *zonelist;
+
+		zonelist = pgdat->node_zonelists + i;
+
+		j = 0;
+		k = highest_zone(i);
+ 		j = build_zonelists_node(pgdat, zonelist, j, k);
+ 		/*
+ 		 * Now we build the zonelist so that it contains the zones
+ 		 * of all the other nodes.
+ 		 * We don't want to pressure a particular node, so when
+ 		 * building the zones for node N, we make sure that the
+ 		 * zones coming right after the local ones are those from
+ 		 * node N+1 (modulo N)
+ 		 */
+		for (node = local_node + 1; node < MAX_NUMNODES; node++) {
+			if (!node_online(node))
+				continue;
+			j = build_zonelists_node(NODE_DATA(node), zonelist, j, k);
+		}
+		for (node = 0; node < local_node; node++) {
+			if (!node_online(node))
+				continue;
+			j = build_zonelists_node(NODE_DATA(node), zonelist, j, k);
+		}
+
+		zonelist->zones[j] = NULL;
+	}
+}
+
+#endif	/* CONFIG_NUMA */
+
+void __init build_all_zonelists(void)
+{
+	int i;
+
+	for_each_online_node(i)
+		build_zonelists(NODE_DATA(i));
+	printk("Built %i zonelists\n", num_online_nodes());
+	cpuset_init_current_mems_allowed();
+}
+
+/*
+ * Helper functions to size the waitqueue hash table.
+ * Essentially these want to choose hash table sizes sufficiently
+ * large so that collisions trying to wait on pages are rare.
+ * But in fact, the number of active page waitqueues on typical
+ * systems is ridiculously low, less than 200. So this is even
+ * conservative, even though it seems large.
+ *
+ * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
+ * waitqueues, i.e. the size of the waitq table given the number of pages.
+ */
+#define PAGES_PER_WAITQUEUE	256
+
+static inline unsigned long wait_table_size(unsigned long pages)
+{
+	unsigned long size = 1;
+
+	pages /= PAGES_PER_WAITQUEUE;
+
+	while (size < pages)
+		size <<= 1;
+
+	/*
+	 * Once we have dozens or even hundreds of threads sleeping
+	 * on IO we've got bigger problems than wait queue collision.
+	 * Limit the size of the wait table to a reasonable size.
+	 */
+	size = min(size, 4096UL);
+
+	return max(size, 4UL);
+}
+
+/*
+ * This is an integer logarithm so that shifts can be used later
+ * to extract the more random high bits from the multiplicative
+ * hash function before the remainder is taken.
+ */
+static inline unsigned long wait_table_bits(unsigned long size)
+{
+	return ffz(~size);
+}
+
+#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
+
+static void __init calculate_zone_totalpages(struct pglist_data *pgdat,
+		unsigned long *zones_size, unsigned long *zholes_size)
+{
+	unsigned long realtotalpages, totalpages = 0;
+	int i;
+
+	for (i = 0; i < MAX_NR_ZONES; i++)
+		totalpages += zones_size[i];
+	pgdat->node_spanned_pages = totalpages;
+
+	realtotalpages = totalpages;
+	if (zholes_size)
+		for (i = 0; i < MAX_NR_ZONES; i++)
+			realtotalpages -= zholes_size[i];
+	pgdat->node_present_pages = realtotalpages;
+	printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages);
+}
+
+
+/*
+ * Initially all pages are reserved - free ones are freed
+ * up by free_all_bootmem() once the early boot process is
+ * done. Non-atomic initialization, single-pass.
+ */
+void __devinit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
+		unsigned long start_pfn)
+{
+	struct page *page;
+	unsigned long end_pfn = start_pfn + size;
+	unsigned long pfn;
+
+	for (pfn = start_pfn; pfn < end_pfn; pfn++, page++) {
+		if (!early_pfn_valid(pfn))
+			continue;
+		if (!early_pfn_in_nid(pfn, nid))
+			continue;
+		page = pfn_to_page(pfn);
+		set_page_links(page, zone, nid, pfn);
+		set_page_count(page, 1);
+		reset_page_mapcount(page);
+		SetPageReserved(page);
+		INIT_LIST_HEAD(&page->lru);
+#ifdef WANT_PAGE_VIRTUAL
+		/* The shift won't overflow because ZONE_NORMAL is below 4G. */
+		if (!is_highmem_idx(zone))
+			set_page_address(page, __va(pfn << PAGE_SHIFT));
+#endif
+	}
+}
+
+void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone,
+				unsigned long size)
+{
+	int order;
+	for (order = 0; order < MAX_ORDER ; order++) {
+		INIT_LIST_HEAD(&zone->free_area[order].free_list);
+		zone->free_area[order].nr_free = 0;
+	}
+}
+
+#define ZONETABLE_INDEX(x, zone_nr)	((x << ZONES_SHIFT) | zone_nr)
+void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn,
+		unsigned long size)
+{
+	unsigned long snum = pfn_to_section_nr(pfn);
+	unsigned long end = pfn_to_section_nr(pfn + size);
+
+	if (FLAGS_HAS_NODE)
+		zone_table[ZONETABLE_INDEX(nid, zid)] = zone;
+	else
+		for (; snum <= end; snum++)
+			zone_table[ZONETABLE_INDEX(snum, zid)] = zone;
+}
+
+#ifndef __HAVE_ARCH_MEMMAP_INIT
+#define memmap_init(size, nid, zone, start_pfn) \
+	memmap_init_zone((size), (nid), (zone), (start_pfn))
+#endif
+
+static int __devinit zone_batchsize(struct zone *zone)
+{
+	int batch;
+
+	/*
+	 * The per-cpu-pages pools are set to around 1000th of the
+	 * size of the zone.  But no more than 1/2 of a meg.
+	 *
+	 * OK, so we don't know how big the cache is.  So guess.
+	 */
+	batch = zone->present_pages / 1024;
+	if (batch * PAGE_SIZE > 512 * 1024)
+		batch = (512 * 1024) / PAGE_SIZE;
+	batch /= 4;		/* We effectively *= 4 below */
+	if (batch < 1)
+		batch = 1;
+
+	/*
+	 * Clamp the batch to a 2^n - 1 value. Having a power
+	 * of 2 value was found to be more likely to have
+	 * suboptimal cache aliasing properties in some cases.
+	 *
+	 * For example if 2 tasks are alternately allocating
+	 * batches of pages, one task can end up with a lot
+	 * of pages of one half of the possible page colors
+	 * and the other with pages of the other colors.
+	 */
+	batch = (1 << (fls(batch + batch/2)-1)) - 1;
+
+	return batch;
+}
+
+inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
+{
+	struct per_cpu_pages *pcp;
+
+	memset(p, 0, sizeof(*p));
+
+	pcp = &p->pcp[0];		/* hot */
+	pcp->count = 0;
+	pcp->low = 0;
+	pcp->high = 6 * batch;
+	pcp->batch = max(1UL, 1 * batch);
+	INIT_LIST_HEAD(&pcp->list);
+
+	pcp = &p->pcp[1];		/* cold*/
+	pcp->count = 0;
+	pcp->low = 0;
+	pcp->high = 2 * batch;
+	pcp->batch = max(1UL, batch/2);
+	INIT_LIST_HEAD(&pcp->list);
+}
+
+#ifdef CONFIG_NUMA
+/*
+ * Boot pageset table. One per cpu which is going to be used for all
+ * zones and all nodes. The parameters will be set in such a way
+ * that an item put on a list will immediately be handed over to
+ * the buddy list. This is safe since pageset manipulation is done
+ * with interrupts disabled.
+ *
+ * Some NUMA counter updates may also be caught by the boot pagesets.
+ *
+ * The boot_pagesets must be kept even after bootup is complete for
+ * unused processors and/or zones. They do play a role for bootstrapping
+ * hotplugged processors.
+ *
+ * zoneinfo_show() and maybe other functions do
+ * not check if the processor is online before following the pageset pointer.
+ * Other parts of the kernel may not check if the zone is available.
+ */
+static struct per_cpu_pageset
+	boot_pageset[NR_CPUS];
+
+/*
+ * Dynamically allocate memory for the
+ * per cpu pageset array in struct zone.
+ */
+static int __devinit process_zones(int cpu)
+{
+	struct zone *zone, *dzone;
+
+	for_each_zone(zone) {
+
+		zone->pageset[cpu] = kmalloc_node(sizeof(struct per_cpu_pageset),
+					 GFP_KERNEL, cpu_to_node(cpu));
+		if (!zone->pageset[cpu])
+			goto bad;
+
+		setup_pageset(zone->pageset[cpu], zone_batchsize(zone));
+	}
+
+	return 0;
+bad:
+	for_each_zone(dzone) {
+		if (dzone == zone)
+			break;
+		kfree(dzone->pageset[cpu]);
+		dzone->pageset[cpu] = NULL;
+	}
+	return -ENOMEM;
+}
+
+static inline void free_zone_pagesets(int cpu)
+{
+#ifdef CONFIG_NUMA
+	struct zone *zone;
+
+	for_each_zone(zone) {
+		struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
+
+		zone_pcp(zone, cpu) = NULL;
+		kfree(pset);
+	}
+#endif
+}
+
+static int __devinit pageset_cpuup_callback(struct notifier_block *nfb,
+		unsigned long action,
+		void *hcpu)
+{
+	int cpu = (long)hcpu;
+	int ret = NOTIFY_OK;
+
+	switch (action) {
+		case CPU_UP_PREPARE:
+			if (process_zones(cpu))
+				ret = NOTIFY_BAD;
+			break;
+		case CPU_UP_CANCELED:
+		case CPU_DEAD:
+			free_zone_pagesets(cpu);
+			break;
+		default:
+			break;
+	}
+	return ret;
+}
+
+static struct notifier_block pageset_notifier =
+	{ &pageset_cpuup_callback, NULL, 0 };
+
+void __init setup_per_cpu_pageset(void)
+{
+	int err;
+
+	/* Initialize per_cpu_pageset for cpu 0.
+	 * A cpuup callback will do this for every cpu
+	 * as it comes online
+	 */
+	err = process_zones(smp_processor_id());
+	BUG_ON(err);
+	register_cpu_notifier(&pageset_notifier);
+}
+
+#endif
+
+static __devinit
+void zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
+{
+	int i;
+	struct pglist_data *pgdat = zone->zone_pgdat;
+
+	/*
+	 * The per-page waitqueue mechanism uses hashed waitqueues
+	 * per zone.
+	 */
+	zone->wait_table_size = wait_table_size(zone_size_pages);
+	zone->wait_table_bits =	wait_table_bits(zone->wait_table_size);
+	zone->wait_table = (wait_queue_head_t *)
+		alloc_bootmem_node(pgdat, zone->wait_table_size
+					* sizeof(wait_queue_head_t));
+
+	for(i = 0; i < zone->wait_table_size; ++i)
+		init_waitqueue_head(zone->wait_table + i);
+}
+
+static __devinit void zone_pcp_init(struct zone *zone)
+{
+	int cpu;
+	unsigned long batch = zone_batchsize(zone);
+
+	for (cpu = 0; cpu < NR_CPUS; cpu++) {
+#ifdef CONFIG_NUMA
+		/* Early boot. Slab allocator not functional yet */
+		zone->pageset[cpu] = &boot_pageset[cpu];
+		setup_pageset(&boot_pageset[cpu],0);
+#else
+		setup_pageset(zone_pcp(zone,cpu), batch);
+#endif
+	}
+	printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%lu\n",
+		zone->name, zone->present_pages, batch);
+}
+
+static __devinit void init_currently_empty_zone(struct zone *zone,
+		unsigned long zone_start_pfn, unsigned long size)
+{
+	struct pglist_data *pgdat = zone->zone_pgdat;
+
+	zone_wait_table_init(zone, size);
+	pgdat->nr_zones = zone_idx(zone) + 1;
+
+	zone->zone_mem_map = pfn_to_page(zone_start_pfn);
+	zone->zone_start_pfn = zone_start_pfn;
+
+	memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn);
+
+	zone_init_free_lists(pgdat, zone, zone->spanned_pages);
+}
+
+/*
+ * Set up the zone data structures:
+ *   - mark all pages reserved
+ *   - mark all memory queues empty
+ *   - clear the memory bitmaps
+ */
+static void __init free_area_init_core(struct pglist_data *pgdat,
+		unsigned long *zones_size, unsigned long *zholes_size)
+{
+	unsigned long j;
+	int nid = pgdat->node_id;
+	unsigned long zone_start_pfn = pgdat->node_start_pfn;
+
+	pgdat_resize_init(pgdat);
+	pgdat->nr_zones = 0;
+	init_waitqueue_head(&pgdat->kswapd_wait);
+	pgdat->kswapd_max_order = 0;
+	
+	for (j = 0; j < MAX_NR_ZONES; j++) {
+		struct zone *zone = pgdat->node_zones + j;
+		unsigned long size, realsize;
+
+		realsize = size = zones_size[j];
+		if (zholes_size)
+			realsize -= zholes_size[j];
+
+		if (j < ZONE_HIGHMEM)
+			nr_kernel_pages += realsize;
+		nr_all_pages += realsize;
+
+		zone->spanned_pages = size;
+		zone->present_pages = realsize;
+		zone->name = zone_names[j];
+		spin_lock_init(&zone->lock);
+		spin_lock_init(&zone->lru_lock);
+		zone_seqlock_init(zone);
+		zone->zone_pgdat = pgdat;
+		zone->free_pages = 0;
+
+		zone->temp_priority = zone->prev_priority = DEF_PRIORITY;
+
+		zone_pcp_init(zone);
+		INIT_LIST_HEAD(&zone->active_list);
+		INIT_LIST_HEAD(&zone->inactive_list);
+		zone->nr_scan_active = 0;
+		zone->nr_scan_inactive = 0;
+		zone->nr_active = 0;
+		zone->nr_inactive = 0;
+		atomic_set(&zone->reclaim_in_progress, 0);
+		if (!size)
+			continue;
+
+		zonetable_add(zone, nid, j, zone_start_pfn, size);
+		init_currently_empty_zone(zone, zone_start_pfn, size);
+		zone_start_pfn += size;
+	}
+}
+
+static void __init alloc_node_mem_map(struct pglist_data *pgdat)
+{
+	/* Skip empty nodes */
+	if (!pgdat->node_spanned_pages)
+		return;
+
+#ifdef CONFIG_FLAT_NODE_MEM_MAP
+	/* ia64 gets its own node_mem_map, before this, without bootmem */
+	if (!pgdat->node_mem_map) {
+		unsigned long size;
+		struct page *map;
+
+		size = (pgdat->node_spanned_pages + 1) * sizeof(struct page);
+		map = alloc_remap(pgdat->node_id, size);
+		if (!map)
+			map = alloc_bootmem_node(pgdat, size);
+		pgdat->node_mem_map = map;
+	}
+#ifdef CONFIG_FLATMEM
+	/*
+	 * With no DISCONTIG, the global mem_map is just set as node 0's
+	 */
+	if (pgdat == NODE_DATA(0))
+		mem_map = NODE_DATA(0)->node_mem_map;
+#endif
+#endif /* CONFIG_FLAT_NODE_MEM_MAP */
+}
+
+void __init free_area_init_node(int nid, struct pglist_data *pgdat,
+		unsigned long *zones_size, unsigned long node_start_pfn,
+		unsigned long *zholes_size)
+{
+	pgdat->node_id = nid;
+	pgdat->node_start_pfn = node_start_pfn;
+	calculate_zone_totalpages(pgdat, zones_size, zholes_size);
+
+	alloc_node_mem_map(pgdat);
+
+	free_area_init_core(pgdat, zones_size, zholes_size);
+}
+
+#ifndef CONFIG_NEED_MULTIPLE_NODES
+static bootmem_data_t contig_bootmem_data;
+struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data };
+
+EXPORT_SYMBOL(contig_page_data);
+#endif
+
+void __init free_area_init(unsigned long *zones_size)
+{
+	free_area_init_node(0, NODE_DATA(0), zones_size,
+			__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
+}
+
+#ifdef CONFIG_PROC_FS
+
+#include <linux/seq_file.h>
+
+static void *frag_start(struct seq_file *m, loff_t *pos)
+{
+	pg_data_t *pgdat;
+	loff_t node = *pos;
+
+	for (pgdat = pgdat_list; pgdat && node; pgdat = pgdat->pgdat_next)
+		--node;
+
+	return pgdat;
+}
+
+static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
+{
+	pg_data_t *pgdat = (pg_data_t *)arg;
+
+	(*pos)++;
+	return pgdat->pgdat_next;
+}
+
+static void frag_stop(struct seq_file *m, void *arg)
+{
+}
+
+/* 
+ * This walks the free areas for each zone.
+ */
+static int frag_show(struct seq_file *m, void *arg)
+{
+	pg_data_t *pgdat = (pg_data_t *)arg;
+	struct zone *zone;
+	struct zone *node_zones = pgdat->node_zones;
+	unsigned long flags;
+	int order;
+
+	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
+		if (!zone->present_pages)
+			continue;
+
+		spin_lock_irqsave(&zone->lock, flags);
+		seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
+		for (order = 0; order < MAX_ORDER; ++order)
+			seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
+		spin_unlock_irqrestore(&zone->lock, flags);
+		seq_putc(m, '\n');
+	}
+	return 0;
+}
+
+struct seq_operations fragmentation_op = {
+	.start	= frag_start,
+	.next	= frag_next,
+	.stop	= frag_stop,
+	.show	= frag_show,
+};
+
+/*
+ * Output information about zones in @pgdat.
+ */
+static int zoneinfo_show(struct seq_file *m, void *arg)
+{
+	pg_data_t *pgdat = arg;
+	struct zone *zone;
+	struct zone *node_zones = pgdat->node_zones;
+	unsigned long flags;
+
+	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; zone++) {
+		int i;
+
+		if (!zone->present_pages)
+			continue;
+
+		spin_lock_irqsave(&zone->lock, flags);
+		seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
+		seq_printf(m,
+			   "\n  pages free     %lu"
+			   "\n        min      %lu"
+			   "\n        low      %lu"
+			   "\n        high     %lu"
+			   "\n        active   %lu"
+			   "\n        inactive %lu"
+			   "\n        scanned  %lu (a: %lu i: %lu)"
+			   "\n        spanned  %lu"
+			   "\n        present  %lu",
+			   zone->free_pages,
+			   zone->pages_min,
+			   zone->pages_low,
+			   zone->pages_high,
+			   zone->nr_active,
+			   zone->nr_inactive,
+			   zone->pages_scanned,
+			   zone->nr_scan_active, zone->nr_scan_inactive,
+			   zone->spanned_pages,
+			   zone->present_pages);
+		seq_printf(m,
+			   "\n        protection: (%lu",
+			   zone->lowmem_reserve[0]);
+		for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
+			seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
+		seq_printf(m,
+			   ")"
+			   "\n  pagesets");
+		for (i = 0; i < ARRAY_SIZE(zone->pageset); i++) {
+			struct per_cpu_pageset *pageset;
+			int j;
+
+			pageset = zone_pcp(zone, i);
+			for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
+				if (pageset->pcp[j].count)
+					break;
+			}
+			if (j == ARRAY_SIZE(pageset->pcp))
+				continue;
+			for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
+				seq_printf(m,
+					   "\n    cpu: %i pcp: %i"
+					   "\n              count: %i"
+					   "\n              low:   %i"
+					   "\n              high:  %i"
+					   "\n              batch: %i",
+					   i, j,
+					   pageset->pcp[j].count,
+					   pageset->pcp[j].low,
+					   pageset->pcp[j].high,
+					   pageset->pcp[j].batch);
+			}
+#ifdef CONFIG_NUMA
+			seq_printf(m,
+				   "\n            numa_hit:       %lu"
+				   "\n            numa_miss:      %lu"
+				   "\n            numa_foreign:   %lu"
+				   "\n            interleave_hit: %lu"
+				   "\n            local_node:     %lu"
+				   "\n            other_node:     %lu",
+				   pageset->numa_hit,
+				   pageset->numa_miss,
+				   pageset->numa_foreign,
+				   pageset->interleave_hit,
+				   pageset->local_node,
+				   pageset->other_node);
+#endif
+		}
+		seq_printf(m,
+			   "\n  all_unreclaimable: %u"
+			   "\n  prev_priority:     %i"
+			   "\n  temp_priority:     %i"
+			   "\n  start_pfn:         %lu",
+			   zone->all_unreclaimable,
+			   zone->prev_priority,
+			   zone->temp_priority,
+			   zone->zone_start_pfn);
+		spin_unlock_irqrestore(&zone->lock, flags);
+		seq_putc(m, '\n');
+	}
+	return 0;
+}
+
+struct seq_operations zoneinfo_op = {
+	.start	= frag_start, /* iterate over all zones. The same as in
+			       * fragmentation. */
+	.next	= frag_next,
+	.stop	= frag_stop,
+	.show	= zoneinfo_show,
+};
+
+static char *vmstat_text[] = {
+	"nr_dirty",
+	"nr_writeback",
+	"nr_unstable",
+	"nr_page_table_pages",
+	"nr_mapped",
+	"nr_slab",
+
+	"pgpgin",
+	"pgpgout",
+	"pswpin",
+	"pswpout",
+	"pgalloc_high",
+
+	"pgalloc_normal",
+	"pgalloc_dma",
+	"pgfree",
+	"pgactivate",
+	"pgdeactivate",
+
+	"pgfault",
+	"pgmajfault",
+	"pgrefill_high",
+	"pgrefill_normal",
+	"pgrefill_dma",
+
+	"pgsteal_high",
+	"pgsteal_normal",
+	"pgsteal_dma",
+	"pgscan_kswapd_high",
+	"pgscan_kswapd_normal",
+
+	"pgscan_kswapd_dma",
+	"pgscan_direct_high",
+	"pgscan_direct_normal",
+	"pgscan_direct_dma",
+	"pginodesteal",
+
+	"slabs_scanned",
+	"kswapd_steal",
+	"kswapd_inodesteal",
+	"pageoutrun",
+	"allocstall",
+
+	"pgrotated",
+	"nr_bounce",
+};
+
+static void *vmstat_start(struct seq_file *m, loff_t *pos)
+{
+	struct page_state *ps;
+
+	if (*pos >= ARRAY_SIZE(vmstat_text))
+		return NULL;
+
+	ps = kmalloc(sizeof(*ps), GFP_KERNEL);
+	m->private = ps;
+	if (!ps)
+		return ERR_PTR(-ENOMEM);
+	get_full_page_state(ps);
+	ps->pgpgin /= 2;		/* sectors -> kbytes */
+	ps->pgpgout /= 2;
+	return (unsigned long *)ps + *pos;
+}
+
+static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
+{
+	(*pos)++;
+	if (*pos >= ARRAY_SIZE(vmstat_text))
+		return NULL;
+	return (unsigned long *)m->private + *pos;
+}
+
+static int vmstat_show(struct seq_file *m, void *arg)
+{
+	unsigned long *l = arg;
+	unsigned long off = l - (unsigned long *)m->private;
+
+	seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
+	return 0;
+}
+
+static void vmstat_stop(struct seq_file *m, void *arg)
+{
+	kfree(m->private);
+	m->private = NULL;
+}
+
+struct seq_operations vmstat_op = {
+	.start	= vmstat_start,
+	.next	= vmstat_next,
+	.stop	= vmstat_stop,
+	.show	= vmstat_show,
+};
+
+#endif /* CONFIG_PROC_FS */
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int page_alloc_cpu_notify(struct notifier_block *self,
+				 unsigned long action, void *hcpu)
+{
+	int cpu = (unsigned long)hcpu;
+	long *count;
+	unsigned long *src, *dest;
+
+	if (action == CPU_DEAD) {
+		int i;
+
+		/* Drain local pagecache count. */
+		count = &per_cpu(nr_pagecache_local, cpu);
+		atomic_add(*count, &nr_pagecache);
+		*count = 0;
+		local_irq_disable();
+		__drain_pages(cpu);
+
+		/* Add dead cpu's page_states to our own. */
+		dest = (unsigned long *)&__get_cpu_var(page_states);
+		src = (unsigned long *)&per_cpu(page_states, cpu);
+
+		for (i = 0; i < sizeof(struct page_state)/sizeof(unsigned long);
+				i++) {
+			dest[i] += src[i];
+			src[i] = 0;
+		}
+
+		local_irq_enable();
+	}
+	return NOTIFY_OK;
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
+void __init page_alloc_init(void)
+{
+	hotcpu_notifier(page_alloc_cpu_notify, 0);
+}
+
+/*
+ * setup_per_zone_lowmem_reserve - called whenever
+ *	sysctl_lower_zone_reserve_ratio changes.  Ensures that each zone
+ *	has a correct pages reserved value, so an adequate number of
+ *	pages are left in the zone after a successful __alloc_pages().
+ */
+static void setup_per_zone_lowmem_reserve(void)
+{
+	struct pglist_data *pgdat;
+	int j, idx;
+
+	for_each_pgdat(pgdat) {
+		for (j = 0; j < MAX_NR_ZONES; j++) {
+			struct zone *zone = pgdat->node_zones + j;
+			unsigned long present_pages = zone->present_pages;
+
+			zone->lowmem_reserve[j] = 0;
+
+			for (idx = j-1; idx >= 0; idx--) {
+				struct zone *lower_zone;
+
+				if (sysctl_lowmem_reserve_ratio[idx] < 1)
+					sysctl_lowmem_reserve_ratio[idx] = 1;
+
+				lower_zone = pgdat->node_zones + idx;
+				lower_zone->lowmem_reserve[j] = present_pages /
+					sysctl_lowmem_reserve_ratio[idx];
+				present_pages += lower_zone->present_pages;
+			}
+		}
+	}
+}
+
+/*
+ * setup_per_zone_pages_min - called when min_free_kbytes changes.  Ensures 
+ *	that the pages_{min,low,high} values for each zone are set correctly 
+ *	with respect to min_free_kbytes.
+ */
+void setup_per_zone_pages_min(void)
+{
+	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
+	unsigned long lowmem_pages = 0;
+	struct zone *zone;
+	unsigned long flags;
+
+	/* Calculate total number of !ZONE_HIGHMEM pages */
+	for_each_zone(zone) {
+		if (!is_highmem(zone))
+			lowmem_pages += zone->present_pages;
+	}
+
+	for_each_zone(zone) {
+		unsigned long tmp;
+		spin_lock_irqsave(&zone->lru_lock, flags);
+		tmp = (pages_min * zone->present_pages) / lowmem_pages;
+		if (is_highmem(zone)) {
+			/*
+			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
+			 * need highmem pages, so cap pages_min to a small
+			 * value here.
+			 *
+			 * The (pages_high-pages_low) and (pages_low-pages_min)
+			 * deltas controls asynch page reclaim, and so should
+			 * not be capped for highmem.
+			 */
+			int min_pages;
+
+			min_pages = zone->present_pages / 1024;
+			if (min_pages < SWAP_CLUSTER_MAX)
+				min_pages = SWAP_CLUSTER_MAX;
+			if (min_pages > 128)
+				min_pages = 128;
+			zone->pages_min = min_pages;
+		} else {
+			/*
+			 * If it's a lowmem zone, reserve a number of pages
+			 * proportionate to the zone's size.
+			 */
+			zone->pages_min = tmp;
+		}
+
+		zone->pages_low   = zone->pages_min + tmp / 4;
+		zone->pages_high  = zone->pages_min + tmp / 2;
+		spin_unlock_irqrestore(&zone->lru_lock, flags);
+	}
+}
+
+/*
+ * Initialise min_free_kbytes.
+ *
+ * For small machines we want it small (128k min).  For large machines
+ * we want it large (64MB max).  But it is not linear, because network
+ * bandwidth does not increase linearly with machine size.  We use
+ *
+ * 	min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
+ *	min_free_kbytes = sqrt(lowmem_kbytes * 16)
+ *
+ * which yields
+ *
+ * 16MB:	512k
+ * 32MB:	724k
+ * 64MB:	1024k
+ * 128MB:	1448k
+ * 256MB:	2048k
+ * 512MB:	2896k
+ * 1024MB:	4096k
+ * 2048MB:	5792k
+ * 4096MB:	8192k
+ * 8192MB:	11584k
+ * 16384MB:	16384k
+ */
+static int __init init_per_zone_pages_min(void)
+{
+	unsigned long lowmem_kbytes;
+
+	lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
+
+	min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
+	if (min_free_kbytes < 128)
+		min_free_kbytes = 128;
+	if (min_free_kbytes > 65536)
+		min_free_kbytes = 65536;
+	setup_per_zone_pages_min();
+	setup_per_zone_lowmem_reserve();
+	return 0;
+}
+module_init(init_per_zone_pages_min)
+
+/*
+ * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 
+ *	that we can call two helper functions whenever min_free_kbytes
+ *	changes.
+ */
+int min_free_kbytes_sysctl_handler(ctl_table *table, int write, 
+	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+{
+	proc_dointvec(table, write, file, buffer, length, ppos);
+	setup_per_zone_pages_min();
+	return 0;
+}
+
+/*
+ * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
+ *	proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
+ *	whenever sysctl_lowmem_reserve_ratio changes.
+ *
+ * The reserve ratio obviously has absolutely no relation with the
+ * pages_min watermarks. The lowmem reserve ratio can only make sense
+ * if in function of the boot time zone sizes.
+ */
+int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
+	struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+{
+	proc_dointvec_minmax(table, write, file, buffer, length, ppos);
+	setup_per_zone_lowmem_reserve();
+	return 0;
+}
+
+__initdata int hashdist = HASHDIST_DEFAULT;
+
+#ifdef CONFIG_NUMA
+static int __init set_hashdist(char *str)
+{
+	if (!str)
+		return 0;
+	hashdist = simple_strtoul(str, &str, 0);
+	return 1;
+}
+__setup("hashdist=", set_hashdist);
+#endif
+
+/*
+ * allocate a large system hash table from bootmem
+ * - it is assumed that the hash table must contain an exact power-of-2
+ *   quantity of entries
+ * - limit is the number of hash buckets, not the total allocation size
+ */
+void *__init alloc_large_system_hash(const char *tablename,
+				     unsigned long bucketsize,
+				     unsigned long numentries,
+				     int scale,
+				     int flags,
+				     unsigned int *_hash_shift,
+				     unsigned int *_hash_mask,
+				     unsigned long limit)
+{
+	unsigned long long max = limit;
+	unsigned long log2qty, size;
+	void *table = NULL;
+
+	/* allow the kernel cmdline to have a say */
+	if (!numentries) {
+		/* round applicable memory size up to nearest megabyte */
+		numentries = (flags & HASH_HIGHMEM) ? nr_all_pages : nr_kernel_pages;
+		numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
+		numentries >>= 20 - PAGE_SHIFT;
+		numentries <<= 20 - PAGE_SHIFT;
+
+		/* limit to 1 bucket per 2^scale bytes of low memory */
+		if (scale > PAGE_SHIFT)
+			numentries >>= (scale - PAGE_SHIFT);
+		else
+			numentries <<= (PAGE_SHIFT - scale);
+	}
+	/* rounded up to nearest power of 2 in size */
+	numentries = 1UL << (long_log2(numentries) + 1);
+
+	/* limit allocation size to 1/16 total memory by default */
+	if (max == 0) {
+		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
+		do_div(max, bucketsize);
+	}
+
+	if (numentries > max)
+		numentries = max;
+
+	log2qty = long_log2(numentries);
+
+	do {
+		size = bucketsize << log2qty;
+		if (flags & HASH_EARLY)
+			table = alloc_bootmem(size);
+		else if (hashdist)
+			table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
+		else {
+			unsigned long order;
+			for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++)
+				;
+			table = (void*) __get_free_pages(GFP_ATOMIC, order);
+		}
+	} while (!table && size > PAGE_SIZE && --log2qty);
+
+	if (!table)
+		panic("Failed to allocate %s hash table\n", tablename);
+
+	printk("%s hash table entries: %d (order: %d, %lu bytes)\n",
+	       tablename,
+	       (1U << log2qty),
+	       long_log2(size) - PAGE_SHIFT,
+	       size);
+
+	if (_hash_shift)
+		*_hash_shift = log2qty;
+	if (_hash_mask)
+		*_hash_mask = (1 << log2qty) - 1;
+
+	return table;
+}
diff -urN oldtree/scripts/lxdialog/colors.h newtree/scripts/lxdialog/colors.h
--- oldtree/scripts/lxdialog/colors.h	2006-01-28 19:08:42.000000000 +0000
+++ newtree/scripts/lxdialog/colors.h	2006-01-29 11:18:10.875635448 +0000
@@ -38,7 +38,7 @@
 #define DIALOG_BG                    COLOR_WHITE
 #define DIALOG_HL                    FALSE
 
-#define TITLE_FG                     COLOR_YELLOW
+#define TITLE_FG                     COLOR_BLUE
 #define TITLE_BG                     COLOR_WHITE
 #define TITLE_HL                     TRUE
 
@@ -110,7 +110,7 @@
 #define ITEM_SELECTED_BG             COLOR_BLUE
 #define ITEM_SELECTED_HL             TRUE
 
-#define TAG_FG                       COLOR_YELLOW
+#define TAG_FG                       COLOR_BLUE
 #define TAG_BG                       COLOR_WHITE
 #define TAG_HL                       TRUE
 
@@ -118,7 +118,7 @@
 #define TAG_SELECTED_BG              COLOR_BLUE
 #define TAG_SELECTED_HL              TRUE
 
-#define TAG_KEY_FG                   COLOR_YELLOW
+#define TAG_KEY_FG                   COLOR_BLUE
 #define TAG_KEY_BG                   COLOR_WHITE
 #define TAG_KEY_HL                   TRUE
 
diff -urN oldtree/sound/core/info.c newtree/sound/core/info.c
--- oldtree/sound/core/info.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/sound/core/info.c	2006-01-29 11:18:10.875635448 +0000
@@ -444,8 +444,8 @@
 	return mask;
 }
 
-static inline int _snd_info_entry_ioctl(struct inode *inode, struct file *file,
-					unsigned int cmd, unsigned long arg)
+static long snd_info_entry_ioctl(struct file *file, unsigned int cmd,
+				unsigned long arg)
 {
 	snd_info_private_data_t *data;
 	struct snd_info_entry *entry;
@@ -465,17 +465,6 @@
 	return -ENOTTY;
 }
 
-/* FIXME: need to unlock BKL to allow preemption */
-static int snd_info_entry_ioctl(struct inode *inode, struct file *file,
-				unsigned int cmd, unsigned long arg)
-{
-	int err;
-	unlock_kernel();
-	err = _snd_info_entry_ioctl(inode, file, cmd, arg);
-	lock_kernel();
-	return err;
-}
-
 static int snd_info_entry_mmap(struct file *file, struct vm_area_struct *vma)
 {
 	struct inode *inode = file->f_dentry->d_inode;
@@ -499,15 +488,15 @@
 
 static struct file_operations snd_info_entry_operations =
 {
-	.owner =	THIS_MODULE,
-	.llseek =	snd_info_entry_llseek,
-	.read =		snd_info_entry_read,
-	.write =	snd_info_entry_write,
-	.poll =		snd_info_entry_poll,
-	.ioctl =	snd_info_entry_ioctl,
-	.mmap =		snd_info_entry_mmap,
-	.open =		snd_info_entry_open,
-	.release =	snd_info_entry_release,
+	.owner =		THIS_MODULE,
+	.llseek =		snd_info_entry_llseek,
+	.read =			snd_info_entry_read,
+	.write =		snd_info_entry_write,
+	.poll =			snd_info_entry_poll,
+	.unlocked_ioctl =	snd_info_entry_ioctl,
+	.mmap =			snd_info_entry_mmap,
+	.open =			snd_info_entry_open,
+	.release =		snd_info_entry_release,
 };
 
 /**
diff -urN oldtree/sound/pci/hda/hda_intel.c newtree/sound/pci/hda/hda_intel.c
--- oldtree/sound/pci/hda/hda_intel.c	2006-01-28 19:08:42.000000000 +0000
+++ newtree/sound/pci/hda/hda_intel.c	2006-01-29 11:18:10.877635144 +0000
@@ -70,6 +70,7 @@
 			 "{Intel, ICH6M},"
 			 "{Intel, ICH7},"
 			 "{Intel, ESB2},"
+			 "{Intel, ICH8 },"
 			 "{ATI, SB450},"
 			 "{VIA, VT8251},"
 			 "{VIA, VT8237A},"
@@ -1603,6 +1604,7 @@
 	{ 0x8086, 0x2668, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ICH6 */
 	{ 0x8086, 0x27d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ICH7 */
 	{ 0x8086, 0x269a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ESB2 */
+	{ 0x8086, 0x284b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ICH8 */
 	{ 0x1002, 0x437b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ATI }, /* ATI SB450 */
 	{ 0x1106, 0x3288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_VIA }, /* VIA VT8251/VT8237A */
 	{ 0x1039, 0x7502, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_SIS }, /* SIS966 */
