diff -urN linux-2.6.15/Documentation/acpi/sony_acpi.txt linux-2.6.15-no1/Documentation/acpi/sony_acpi.txt
--- linux-2.6.15/Documentation/acpi/sony_acpi.txt	1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6.15-no1/Documentation/acpi/sony_acpi.txt	2006-01-26 15:41:13.937768904 +0000
@@ -0,0 +1,87 @@
+ACPI Sony Notebook Control Driver (SNC) Readme
+----------------------------------------------
+	Copyright (C) 2004- 2005 Stelian Pop <stelian@popies.net>
+
+This mini-driver drives the ACPI SNC device present in the
+ACPI BIOS of the Sony Vaio laptops.
+
+It gives access to some extra laptop functionalities. In
+its current form, this driver is mainly useful for controlling the
+screen brightness, but it may do more in the future.
+
+You should probably start by trying the sonypi driver, and try
+sony_acpi only if sonypi doesn't work for you.
+
+Usage:
+------
+
+Loading the sony_acpi module will create a /proc/acpi/sony/
+directory populated with a couple of files.
+
+You then read/write integer values from/to those files by using
+standard UNIX tools.
+
+The files are:
+	brightness		current screen brightness
+	brightness_default	screen brightness which will be set
+				when the laptop will be rebooted
+	cdpower			power on/off the internal CD drive
+
+Note that some files may be missing if they are not supported
+by your particular laptop model.
+
+Example usage:
+	# echo "1" > /proc/acpi/sony/brightness
+sets the lowest screen brightness,
+	# echo "8" > /proc/acpi/sony/brightness
+sets the highest screen brightness,
+	# cat /proc/acpi/sony/brightness
+retrieves the current screen brightness.
+
+Development:
+------------
+
+If you want to help with the development of this driver (and
+you are not afraid of any side effects doing strange things with
+your ACPI BIOS could have on your laptop), load the driver and
+pass the option 'debug=1'.
+
+REPEAT: DON'T DO THIS IF YOU DON'T LIKE RISKY BUSINESS.
+
+In your kernel logs you will find the list of all ACPI methods
+the SNC device has on your laptop. You can see the GBRT/SBRT methods
+used to get/set the brightness, but there are others.
+
+I HAVE NO IDEA WHAT THOSE METHODS DO.
+
+The sony_acpi driver creates, for some of those methods (the most
+current ones found on several Vaio models), an entry under
+/proc/acpi/sony/, just like the 'brightness' one. You can create
+other entries corresponding to your own laptop methods by further
+editing the source (see the 'sony_acpi_values' table, and add a new
+structure to this table with your get/set method names).
+
+Your mission, should you accept it, is to try finding out what
+those entries are for, by reading/writing random values from/to those
+files and find out what is the impact on your laptop.
+
+Should you find anything interesting, please report it back to me,
+I will not disavow all knowledge of your actions :)
+
+Bugs/Limitations:
+-----------------
+
+* This driver is not based on official documentation from Sony
+  (because there is none), so there is no guarantee this driver
+  will work at all, or do the right thing. Although this hasn't
+  happened to me, this driver could do very bad things to your
+  laptop, including permanent damage.
+
+* The sony_acpi and sonypi drivers do not interact at all. In the
+  future, sonypi could use sony_acpi to do (part of) its business.
+
+* spicctrl, which is the userspace tool used to communicate with the
+  sonypi driver (through /dev/sonypi) does not try to use the
+  sony_acpi driver. In the future, spicctrl could try sonypi first,
+  and if it isn't present, try sony_acpi instead.
+
diff -urN linux-2.6.15/Makefile linux-2.6.15-no1/Makefile
--- linux-2.6.15/Makefile	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/Makefile	2006-01-26 15:45:07.938195432 +0000
@@ -1,8 +1,8 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 15
-EXTRAVERSION =
-NAME=Sliding Snow Leopard
+EXTRAVERSION = -no1
+NAME=Rebirth
 
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
diff -urN linux-2.6.15/arch/i386/Kconfig linux-2.6.15-no1/arch/i386/Kconfig
--- linux-2.6.15/arch/i386/Kconfig	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/arch/i386/Kconfig	2006-01-26 15:41:13.973763432 +0000
@@ -618,13 +618,10 @@
 	default y
 
 config REGPARM
-	bool "Use register arguments (EXPERIMENTAL)"
-	depends on EXPERIMENTAL
-	default n
+	bool "Use register arguments"
 	help
 	Compile the kernel with -mregparm=3. This uses a different ABI
 	and passes the first three arguments of a function call in registers.
-	This will probably break binary only modules.
 
 	This feature is only enabled for gcc-3.0 and later - earlier compilers
 	generate incorrect output with certain kernel constructs when
diff -urN linux-2.6.15/arch/i386/Kconfig.debug linux-2.6.15-no1/arch/i386/Kconfig.debug
--- linux-2.6.15/arch/i386/Kconfig.debug	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/arch/i386/Kconfig.debug	2006-01-26 15:41:35.840439192 +0000
@@ -43,14 +43,15 @@
 	  of memory corruptions.
 
 config 4KSTACKS
-	bool "Use 4Kb for kernel stacks instead of 8Kb"
-	depends on DEBUG_KERNEL
+	bool "Use 4Kb + 4Kb for kernel stacks instead of 8Kb" if DEBUG_KERNEL
+	default y
 	help
 	  If you say Y here the kernel will use a 4Kb stacksize for the
 	  kernel stack attached to each process/thread. This facilitates
 	  running more threads on a system and also reduces the pressure
 	  on the VM subsystem for higher order allocations. This option
-	  will also use IRQ stacks to compensate for the reduced stackspace.
+	  will also use separate 4Kb IRQ stacks to compensate for the
+	  reduced stackspace.
 
 config X86_FIND_SMP_CONFIG
 	bool
diff -urN linux-2.6.15/arch/i386/Kconfig.debug.orig linux-2.6.15-no1/arch/i386/Kconfig.debug.orig
--- linux-2.6.15/arch/i386/Kconfig.debug.orig	1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6.15-no1/arch/i386/Kconfig.debug.orig	2006-01-03 03:21:10.000000000 +0000
@@ -0,0 +1,65 @@
+menu "Kernel hacking"
+
+source "lib/Kconfig.debug"
+
+config EARLY_PRINTK
+	bool "Early printk" if EMBEDDED && DEBUG_KERNEL
+	default y
+	help
+	  Write kernel log output directly into the VGA buffer or to a serial
+	  port.
+
+	  This is useful for kernel debugging when your machine crashes very
+	  early before the console code is initialized. For normal operation
+	  it is not recommended because it looks ugly and doesn't cooperate
+	  with klogd/syslogd or the X server. You should normally N here,
+	  unless you want to debug such a crash.
+
+config DEBUG_STACKOVERFLOW
+	bool "Check for stack overflows"
+	depends on DEBUG_KERNEL
+	help
+	  This option will cause messages to be printed if free stack space
+	  drops below a certain limit.
+
+config DEBUG_STACK_USAGE
+	bool "Stack utilization instrumentation"
+	depends on DEBUG_KERNEL
+	help
+	  Enables the display of the minimum amount of free stack which each
+	  task has ever had available in the sysrq-T and sysrq-P debug output.
+
+	  This option will slow down process creation somewhat.
+
+comment "Page alloc debug is incompatible with Software Suspend on i386"
+	depends on DEBUG_KERNEL && SOFTWARE_SUSPEND
+
+config DEBUG_PAGEALLOC
+	bool "Page alloc debugging"
+	depends on DEBUG_KERNEL && !SOFTWARE_SUSPEND
+	help
+	  Unmap pages from the kernel linear mapping after free_pages().
+	  This results in a large slowdown, but helps to find certain types
+	  of memory corruptions.
+
+config 4KSTACKS
+	bool "Use 4Kb for kernel stacks instead of 8Kb"
+	depends on DEBUG_KERNEL
+	help
+	  If you say Y here the kernel will use a 4Kb stacksize for the
+	  kernel stack attached to each process/thread. This facilitates
+	  running more threads on a system and also reduces the pressure
+	  on the VM subsystem for higher order allocations. This option
+	  will also use IRQ stacks to compensate for the reduced stackspace.
+
+config X86_FIND_SMP_CONFIG
+	bool
+	depends on X86_LOCAL_APIC || X86_VOYAGER
+	default y
+
+config X86_MPPARSE
+	bool
+	depends on X86_LOCAL_APIC && !X86_VISWS
+	default y
+
+endmenu
diff -urN linux-2.6.15/arch/i386/Makefile linux-2.6.15-no1/arch/i386/Makefile
--- linux-2.6.15/arch/i386/Makefile	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/arch/i386/Makefile	2006-01-26 15:41:13.974763280 +0000
@@ -42,9 +42,9 @@
 GCC_VERSION			:= $(call cc-version)
 cflags-$(CONFIG_REGPARM) 	+= $(shell if [ $(GCC_VERSION) -ge 0300 ] ; then echo "-mregparm=3"; fi ;)
 
-# Disable unit-at-a-time mode, it makes gcc use a lot more stack
-# due to the lack of sharing of stacklots.
-CFLAGS += $(call cc-option,-fno-unit-at-a-time)
+# Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use
+# a lot more stack due to the lack of sharing of stacklots:
+CFLAGS				+= $(shell if [ $(call cc-version) -lt 0400 ] ; then echo $(call cc-option,-fno-unit-at-a-time); fi ;)
 
 CFLAGS += $(cflags-y)
 
diff -urN linux-2.6.15/block/ll_rw_blk.c linux-2.6.15-no1/block/ll_rw_blk.c
--- linux-2.6.15/block/ll_rw_blk.c	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/block/ll_rw_blk.c	2006-01-26 15:41:13.975763128 +0000
@@ -1472,6 +1472,7 @@
  **/
 void generic_unplug_device(request_queue_t *q)
 {
+	might_sleep();
 	spin_lock_irq(q->queue_lock);
 	__generic_unplug_device(q);
 	spin_unlock_irq(q->queue_lock);
@@ -2829,7 +2830,7 @@
  * bi_sector for remaps as it sees fit.  So the values of these fields
  * should NOT be depended on after the call to generic_make_request.
  */
-void generic_make_request(struct bio *bio)
+static inline void __generic_make_request(struct bio *bio)
 {
 	request_queue_t *q;
 	sector_t maxsector;
@@ -2896,6 +2897,57 @@
 	} while (ret);
 }
 
+/*
+ * We only want one ->make_request_fn to be active at a time,
+ * else stack usage with stacked devices could be a problem.
+ * So use current->bio_{list,tail} to keep a list of requests
+ * submited by a make_request_fn function.
+ * current->bio_tail is also used as a flag to say if
+ * generic_make_request is currently active in this task or not.
+ * If it is NULL, then no make_request is active.  If it is non-NULL,
+ * then a make_request is active, and new requests should be added
+ * at the tail
+ */
+void generic_make_request(struct bio *bio)
+{
+	if (current->bio_tail) {
+		/* make_request is active */
+		*(current->bio_tail) = bio;
+		bio->bi_next = NULL;
+		current->bio_tail = &bio->bi_next;
+		return;
+	}
+	/* following loop may be a bit non-obvious, and so deserves some
+	 * explanation.
+	 * Before entering the loop, bio->bi_next is NULL (as all callers
+	 * ensure that) so we have a list with a single bio.
+	 * We pretend that we have just taken it off a longer list, so
+	 * we assign bio_list to the next (which is NULL) and bio_tail
+	 * to &bio_list, thus initialising the bio_list of new bios to be
+	 * added.  __generic_make_request may indeed add some more bios
+	 * through a recursive call to generic_make_request.  If it
+	 * did, we find a non-NULL value in bio_list and re-enter the loop
+	 * from the top.  In this case we really did just take the bio
+	 * of the top of the list (no pretending) and so fixup bio_list and
+	 * bio_tail or bi_next, and call into __generic_make_request again.
+	 *
+	 * The loop was structured like this to make only one call to
+	 * __generic_make_request (which is important as it is large and
+	 * inlined) and to keep the structure simple.
+	 */
+	BUG_ON(bio->bi_next);
+	do {
+		current->bio_list = bio->bi_next;
+		if (bio->bi_next == NULL)
+			current->bio_tail = &current->bio_list;
+		else
+			bio->bi_next = NULL;
+		__generic_make_request(bio);
+		bio = current->bio_list;
+	} while (bio);
+	current->bio_tail = NULL; /* deactivate */
+}
+
 EXPORT_SYMBOL(generic_make_request);
 
 /**
diff -urN linux-2.6.15/drivers/acpi/Kconfig linux-2.6.15-no1/drivers/acpi/Kconfig
--- linux-2.6.15/drivers/acpi/Kconfig	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/drivers/acpi/Kconfig	2006-01-26 15:41:13.976762976 +0000
@@ -233,6 +233,19 @@
 	  If you have a legacy free Toshiba laptop (such as the Libretto L1
 	  series), say Y.
 
+config ACPI_SONY
+	tristate "Sony Laptop Extras"
+	depends on X86 && ACPI
+	  ---help---
+	  This mini-driver drives the ACPI SNC device present in the
+	  ACPI BIOS of the Sony Vaio laptops.
+
+	  It gives access to some extra laptop functionalities. In
+	  its current form, the only thing this driver does is letting
+	  the user set or query the screen brightness.
+
+	  Read <file:Documentation/acpi/sony_acpi.txt> for more information.
+
 config ACPI_CUSTOM_DSDT
 	bool "Include Custom DSDT"
 	depends on !STANDALONE
diff -urN linux-2.6.15/drivers/acpi/Makefile linux-2.6.15-no1/drivers/acpi/Makefile
--- linux-2.6.15/drivers/acpi/Makefile	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/drivers/acpi/Makefile	2006-01-26 15:41:13.977762824 +0000
@@ -55,5 +55,6 @@
 obj-$(CONFIG_ACPI_ASUS)		+= asus_acpi.o
 obj-$(CONFIG_ACPI_IBM)		+= ibm_acpi.o
 obj-$(CONFIG_ACPI_TOSHIBA)	+= toshiba_acpi.o
+obj-$(CONFIG_ACPI_SONY)		+= sony_acpi.o
 obj-y				+= scan.o motherboard.o
 obj-$(CONFIG_ACPI_HOTPLUG_MEMORY)	+= acpi_memhotplug.o
diff -urN linux-2.6.15/drivers/acpi/sony_acpi.c linux-2.6.15-no1/drivers/acpi/sony_acpi.c
--- linux-2.6.15/drivers/acpi/sony_acpi.c	1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6.15-no1/drivers/acpi/sony_acpi.c	2006-01-26 15:41:13.977762824 +0000
@@ -0,0 +1,392 @@
+/*
+ * ACPI Sony Notebook Control Driver (SNC)
+ *
+ * Copyright (C) 2004-2005 Stelian Pop <stelian@popies.net>
+ *
+ * Parts of this driver inspired from asus_acpi.c and ibm_acpi.c
+ * which are copyrighted by their respective authors.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <acpi/acpi_drivers.h>
+#include <acpi/acpi_bus.h>
+#include <asm/uaccess.h>
+
+#define ACPI_SNC_CLASS		"sony"
+#define ACPI_SNC_HID		"SNY5001"
+#define ACPI_SNC_DRIVER_NAME	"ACPI Sony Notebook Control Driver v0.2"
+
+#define LOG_PFX			KERN_WARNING "sony_acpi: "
+
+MODULE_AUTHOR("Stelian Pop");
+MODULE_DESCRIPTION(ACPI_SNC_DRIVER_NAME);
+MODULE_LICENSE("GPL");
+
+static int debug;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "set this to 1 (and RTFM) if you want to help "
+			"the development of this driver");
+
+static int sony_acpi_add (struct acpi_device *device);
+static int sony_acpi_remove (struct acpi_device *device, int type);
+
+static struct acpi_driver sony_acpi_driver = {
+	.name	= ACPI_SNC_DRIVER_NAME,
+	.class	= ACPI_SNC_CLASS,
+	.ids	= ACPI_SNC_HID,
+	.ops	= {
+			.add	= sony_acpi_add,
+			.remove	= sony_acpi_remove,
+		  },
+};
+
+static acpi_handle sony_acpi_handle;
+static struct proc_dir_entry *sony_acpi_dir;
+
+static struct sony_acpi_value {
+	char			*name;	 /* name of the entry */
+	struct proc_dir_entry 	*proc;	 /* /proc entry */
+	char			*acpiget;/* name of the ACPI get function */
+	char			*acpiset;/* name of the ACPI get function */
+	int 			min;	 /* minimum allowed value or -1 */
+	int			max;	 /* maximum allowed value or -1 */
+	int			debug;	 /* active only in debug mode ? */
+} sony_acpi_values[] = {
+	{
+		.name		= "brightness",
+		.acpiget	= "GBRT",
+		.acpiset	= "SBRT",
+		.min		= 1,
+		.max		= 8,
+		.debug		= 0,
+	},
+	{
+		.name		= "brightness_default",
+		.acpiget	= "GPBR",
+		.acpiset	= "SPBR",
+		.min		= 1,
+		.max		= 8,
+		.debug		= 0,
+	},
+	{
+		.name		= "cdpower",
+		.acpiget	= "GCDP",
+		.acpiset	= "SCDP",
+		.min		= -1,
+		.max		= -1,
+		.debug		= 0,
+	},
+	{
+		.name		= "PID",
+		.acpiget	= "GPID",
+		.debug		= 1,
+	},
+	{
+		.name		= "CTR",
+		.acpiget	= "GCTR",
+		.acpiset	= "SCTR",
+		.min		= -1,
+		.max		= -1,
+		.debug		= 1,
+	},
+	{
+		.name		= "PCR",
+		.acpiget	= "GPCR",
+		.acpiset	= "SPCR",
+		.min		= -1,
+		.max		= -1,
+		.debug		= 1,
+	},
+	{
+		.name		= "CMI",
+		.acpiget	= "GCMI",
+		.acpiset	= "SCMI",
+		.min		= -1,
+		.max		= -1,
+		.debug		= 1,
+	},
+	{
+		.name		= NULL,
+	}
+};
+
+static int acpi_callgetfunc(acpi_handle handle, char *name, int *result)
+{
+	struct acpi_buffer output;
+	union acpi_object out_obj;
+	acpi_status status;
+
+	output.length = sizeof(out_obj);
+	output.pointer = &out_obj;
+
+	status = acpi_evaluate_object(handle, name, NULL, &output);
+	if ((status == AE_OK) && (out_obj.type == ACPI_TYPE_INTEGER)) {
+		*result = out_obj.integer.value;
+		return 0;
+	}
+
+	printk(LOG_PFX "acpi_callreadfunc failed\n");
+
+	return -1;
+}
+
+static int acpi_callsetfunc(acpi_handle handle, char *name, int value,
+			    int *result)
+{
+	struct acpi_object_list params;
+	union acpi_object in_obj;
+	struct acpi_buffer output;
+	union acpi_object out_obj;
+	acpi_status status;
+
+	params.count = 1;
+	params.pointer = &in_obj;
+	in_obj.type = ACPI_TYPE_INTEGER;
+	in_obj.integer.value = value;
+
+	output.length = sizeof(out_obj);
+	output.pointer = &out_obj;
+
+	status = acpi_evaluate_object(handle, name, &params, &output);
+	if (status == AE_OK) {
+		if (result != NULL) {
+			if (out_obj.type != ACPI_TYPE_INTEGER) {
+				printk(LOG_PFX "acpi_evaluate_object bad "
+				       "return type\n");
+				return -1;
+			}
+			*result = out_obj.integer.value;
+		}
+		return 0;
+	}
+
+	printk(LOG_PFX "acpi_evaluate_object failed\n");
+
+	return -1;
+}
+
+static int parse_buffer(const char __user *buffer, unsigned long count,
+			int *val) {
+	char s[32];
+	int ret;
+
+	if (count > 31)
+		return -EINVAL;
+	if (copy_from_user(s, buffer, count))
+		return -EFAULT;
+	s[count] = '\0';
+	ret = simple_strtoul(s, NULL, 10);
+	*val = ret;
+	return 0;
+}
+
+static int sony_acpi_read(char* page, char** start, off_t off, int count,
+			  int* eof, void *data)
+{
+	struct sony_acpi_value *item = data;
+	int value;
+
+	if (!item->acpiget)
+		return -EIO;
+
+	if (acpi_callgetfunc(sony_acpi_handle, item->acpiget, &value) < 0)
+		return -EIO;
+
+	return sprintf(page, "%d\n", value);
+}
+
+static int sony_acpi_write(struct file *file, const char __user *buffer,
+			   unsigned long count, void *data)
+{
+	struct sony_acpi_value *item = data;
+	int result;
+	int value;
+
+	if (!item->acpiset)
+		return -EIO;
+
+	if ((result = parse_buffer(buffer, count, &value)) < 0)
+		return result;
+
+	if (item->min != -1 && value < item->min)
+		return -EINVAL;
+	if (item->max != -1 && value > item->max)
+		return -EINVAL;
+
+	if (acpi_callsetfunc(sony_acpi_handle, item->acpiset, value, NULL) < 0)
+		return -EIO;
+
+	return count;
+}
+
+static void sony_acpi_notify(acpi_handle handle, u32 event, void *data)
+{
+	printk(LOG_PFX "sony_acpi_notify\n");
+}
+
+static acpi_status sony_walk_callback(acpi_handle handle, u32 level,
+				      void *context, void **return_value)
+{
+	struct acpi_namespace_node *node;
+	union acpi_operand_object *operand;
+
+	node = (struct acpi_namespace_node *) handle;
+	operand = (union acpi_operand_object *) node->object;
+
+	printk(LOG_PFX "method: name: %4.4s, args %X\n", node->name.ascii,
+	       (u32) operand->method.param_count);
+
+	return AE_OK;
+}
+
+static int __init sony_acpi_add(struct acpi_device *device)
+{
+	acpi_status status;
+	int result;
+	struct sony_acpi_value *item;
+
+	sony_acpi_handle = device->handle;
+
+	acpi_driver_data(device) = NULL;
+	acpi_device_dir(device) = sony_acpi_dir;
+
+	if (debug) {
+		status = acpi_walk_namespace(ACPI_TYPE_METHOD, sony_acpi_handle,
+					     1, sony_walk_callback, NULL, NULL);
+		if (ACPI_FAILURE(status)) {
+			printk(LOG_PFX "unable to walk acpi resources\n");
+			result = -ENODEV;
+			goto outwalk;
+		}
+
+		status = acpi_install_notify_handler(sony_acpi_handle,
+						     ACPI_DEVICE_NOTIFY,
+						     sony_acpi_notify,
+						     NULL);
+		if (ACPI_FAILURE(status)) {
+			printk(LOG_PFX "unable to install notify handler\n");
+			result = -ENODEV;
+			goto outnotify;
+		}
+	}
+
+	for (item = sony_acpi_values; item->name; ++item) {
+		acpi_handle handle;
+
+		if (!debug && item->debug)
+			continue;
+
+		if (item->acpiget &&
+		    ACPI_FAILURE(acpi_get_handle(sony_acpi_handle,
+		    		 item->acpiget, &handle)))
+		    	continue;
+
+		if (item->acpiset &&
+		    ACPI_FAILURE(acpi_get_handle(sony_acpi_handle,
+		    		 item->acpiset, &handle)))
+		    	continue;
+
+		item->proc = create_proc_entry(item->name, 0600,
+					       acpi_device_dir(device));
+		if (!item->proc) {
+			printk(LOG_PFX "unable to create proc entry\n");
+			result = -EIO;
+			goto outproc;
+		}
+
+		item->proc->read_proc = sony_acpi_read;
+		item->proc->write_proc = sony_acpi_write;
+		item->proc->data = item;
+		item->proc->owner = THIS_MODULE;
+	}
+
+	printk(KERN_INFO ACPI_SNC_DRIVER_NAME " successfully installed\n");
+
+	return 0;
+
+outproc:
+	if (debug) {
+		status = acpi_remove_notify_handler(sony_acpi_handle,
+						    ACPI_DEVICE_NOTIFY,
+						    sony_acpi_notify);
+		if (ACPI_FAILURE(status))
+			printk(LOG_PFX "unable to remove notify handler\n");
+	}
+outnotify:
+	for (item = sony_acpi_values; item->name; ++item)
+		if (item->proc)
+			remove_proc_entry(item->name, acpi_device_dir(device));
+outwalk:
+	return result;
+}
+
+
+static int __exit sony_acpi_remove(struct acpi_device *device, int type)
+{
+	acpi_status status;
+	struct sony_acpi_value *item;
+
+	if (debug) {
+		status = acpi_remove_notify_handler(sony_acpi_handle,
+						    ACPI_DEVICE_NOTIFY,
+						    sony_acpi_notify);
+		if (ACPI_FAILURE(status))
+			printk(LOG_PFX "unable to remove notify handler\n");
+	}
+
+	for (item = sony_acpi_values; item->name; ++item)
+		if (item->proc)
+			remove_proc_entry(item->name, acpi_device_dir(device));
+
+	printk(KERN_INFO ACPI_SNC_DRIVER_NAME " successfully removed\n");
+
+	return 0;
+}
+
+static int __init sony_acpi_init(void)
+{
+	int result;
+
+	sony_acpi_dir = proc_mkdir("sony", acpi_root_dir);
+	if (!sony_acpi_dir) {
+		printk(LOG_PFX "unable to create /proc entry\n");
+		return -ENODEV;
+	}
+	sony_acpi_dir->owner = THIS_MODULE;
+
+	result = acpi_bus_register_driver(&sony_acpi_driver);
+	if (result < 0) {
+		remove_proc_entry("sony", acpi_root_dir);
+		return -ENODEV;
+	}
+	return 0;
+}
+
+
+static void __exit sony_acpi_exit(void)
+{
+	acpi_bus_unregister_driver(&sony_acpi_driver);
+	remove_proc_entry("sony", acpi_root_dir);
+}
+
+module_init(sony_acpi_init);
+module_exit(sony_acpi_exit);
diff -urN linux-2.6.15/drivers/char/agp/amd64-agp.c linux-2.6.15-no1/drivers/char/agp/amd64-agp.c
--- linux-2.6.15/drivers/char/agp/amd64-agp.c	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/drivers/char/agp/amd64-agp.c	2006-01-26 15:41:13.978762672 +0000
@@ -600,6 +600,26 @@
 	agp_put_bridge(bridge);
 }
 
+#ifdef CONFIG_PM
+
+static int agp_amd64_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	pci_save_state(pdev);
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+	return 0;
+}
+
+static int agp_amd64_resume(struct pci_dev *pdev)
+{
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+
+	return amd_8151_configure();
+}
+
+#endif /* CONFIG_PM */
+
 static struct pci_device_id agp_amd64_pci_table[] = {
 	{
 	.class		= (PCI_CLASS_BRIDGE_HOST << 8),
@@ -718,6 +738,10 @@
 	.id_table	= agp_amd64_pci_table,
 	.probe		= agp_amd64_probe,
 	.remove		= agp_amd64_remove,
+#ifdef CONFIG_PM
+	.suspend	= agp_amd64_suspend,
+	.resume		= agp_amd64_resume,
+#endif
 };
 
 
diff -urN linux-2.6.15/drivers/char/agp/nvidia-agp.c linux-2.6.15-no1/drivers/char/agp/nvidia-agp.c
--- linux-2.6.15/drivers/char/agp/nvidia-agp.c	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/drivers/char/agp/nvidia-agp.c	2006-01-26 15:41:39.946814928 +0000
@@ -11,6 +11,7 @@
 #include <linux/gfp.h>
 #include <linux/page-flags.h>
 #include <linux/mm.h>
+#include <linux/jiffies.h>
 #include "agp.h"
 
 /* NVIDIA registers */
@@ -256,7 +257,7 @@
 		do {
 			pci_read_config_dword(nvidia_private.dev_1,
 					NVIDIA_1_WBC, &wbc_reg);
-			if ((signed)(end - jiffies) <= 0) {
+			if (time_before_eq(end, jiffies)) {
 				printk(KERN_ERR PFX
 				    "TLB flush took more than 3 seconds.\n");
 			}
diff -urN linux-2.6.15/drivers/hwmon/hdaps.c linux-2.6.15-no1/drivers/hwmon/hdaps.c
--- linux-2.6.15/drivers/hwmon/hdaps.c	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/drivers/hwmon/hdaps.c	2006-01-26 15:41:13.979762520 +0000
@@ -36,7 +36,7 @@
 #include <asm/io.h>
 
 #define HDAPS_LOW_PORT		0x1600	/* first port used by hdaps */
-#define HDAPS_NR_PORTS		0x30	/* number of ports: 0x1600 - 0x162f */
+#define HDAPS_HI_PORT		0x162f	/* last port used by hdaps */
 
 #define HDAPS_PORT_STATE	0x1611	/* device state */
 #define HDAPS_PORT_YPOS		0x1612	/* y-axis position */
@@ -62,6 +62,12 @@
 #define HDAPS_INPUT_FUZZ	4	/* input event threshold */
 #define HDAPS_INPUT_FLAT	4
 
+static struct resource __initdata hdaps_iores = {
+	.flags	= IORESOURCE_IO,
+	.start	= HDAPS_LOW_PORT,
+	.end	= HDAPS_HI_PORT,
+};
+
 static struct timer_list hdaps_timer;
 static struct platform_device *pdev;
 static struct input_dev *hdaps_idev;
@@ -284,34 +290,6 @@
 }
 
 
-/* Device model stuff */
-
-static int hdaps_probe(struct platform_device *dev)
-{
-	int ret;
-
-	ret = hdaps_device_init();
-	if (ret)
-		return ret;
-
-	printk(KERN_INFO "hdaps: device successfully initialized.\n");
-	return 0;
-}
-
-static int hdaps_resume(struct platform_device *dev)
-{
-	return hdaps_device_init();
-}
-
-static struct platform_driver hdaps_driver = {
-	.probe = hdaps_probe,
-	.resume = hdaps_resume,
-	.driver	= {
-		.name = "hdaps",
-		.owner = THIS_MODULE,
-	},
-};
-
 /*
  * hdaps_calibrate - Set our "resting" values.  Callers must hold hdaps_sem.
  */
@@ -320,13 +298,13 @@
 	__hdaps_read_pair(HDAPS_PORT_XPOS, HDAPS_PORT_YPOS, &rest_x, &rest_y);
 }
 
-static void hdaps_mousedev_poll(unsigned long unused)
+static void hdaps_inputdev_poll(unsigned long unused)
 {
 	int x, y;
 
 	/* Cannot sleep.  Try nonblockingly.  If we fail, try again later. */
 	if (down_trylock(&hdaps_sem)) {
-		mod_timer(&hdaps_timer,jiffies + HDAPS_POLL_PERIOD);
+		mod_timer(&hdaps_timer, jiffies + HDAPS_POLL_PERIOD);
 		return;
 	}
 
@@ -339,7 +317,7 @@
 
 	mod_timer(&hdaps_timer, jiffies + HDAPS_POLL_PERIOD);
 
-out:
+ out:
 	up(&hdaps_sem);
 }
 
@@ -473,6 +451,80 @@
 	.attrs = hdaps_attributes,
 };
 
+/* Device model stuff */
+
+static int __devinit hdaps_probe(struct platform_device *dev)
+{
+	int error;
+
+	/* initial calibrate for the input device */
+	hdaps_calibrate();
+
+	hdaps_idev = input_allocate_device();
+	if (!hdaps_idev)
+		return -ENOMEM;
+
+	/* initialize the input class */
+	hdaps_idev->name = "hdaps";
+	hdaps_idev->phys = "hdaps/input0";
+	hdaps_idev->cdev.dev = &dev->dev;
+	hdaps_idev->evbit[0] = BIT(EV_ABS);
+	input_set_abs_params(hdaps_idev, ABS_X,
+			-256, 256, HDAPS_INPUT_FUZZ, HDAPS_INPUT_FLAT);
+	input_set_abs_params(hdaps_idev, ABS_Y,
+			-256, 256, HDAPS_INPUT_FUZZ, HDAPS_INPUT_FLAT);
+
+	error = sysfs_create_group(&pdev->dev.kobj, &hdaps_attribute_group);
+	if (error)
+		goto err_free_input_dev;
+
+	error = hdaps_device_init();
+	if (error)
+		goto err_sysfs_remove_group;
+
+	error = input_register_device(hdaps_idev);
+	if (error)
+		goto err_sysfs_remove_group;
+
+	/* start up our timer for the input device */
+	init_timer(&hdaps_timer);
+	hdaps_timer.function = hdaps_inputdev_poll;
+	hdaps_timer.expires = jiffies + HDAPS_POLL_PERIOD;
+	add_timer(&hdaps_timer);
+
+	return 0;
+
+ err_free_input_dev:
+	input_free_device(hdaps_idev);
+ err_sysfs_remove_group:
+	sysfs_remove_group(&dev->dev.kobj, &hdaps_attribute_group);
+	return error;
+}
+
+static int __devexit hdaps_remove(struct platform_device *dev)
+{
+	del_timer_sync(&hdaps_timer);
+	input_unregister_device(hdaps_idev);
+	sysfs_remove_group(&pdev->dev.kobj, &hdaps_attribute_group);
+
+	return 0;
+}
+
+static int hdaps_resume(struct platform_device *dev)
+{
+	return hdaps_device_init();
+}
+
+static struct platform_driver hdaps_driver = {
+	.driver		= {
+		.name	= "hdaps",
+		.owner	= THIS_MODULE,
+	},
+	.probe		= hdaps_probe,
+	.remove		= __devexit_p(hdaps_remove),
+	.resume		= hdaps_resume,
+};
+
 
 /* Module stuff */
 
@@ -511,7 +563,7 @@
 
 static int __init hdaps_init(void)
 {
-	int ret;
+	int error;
 
 	/* Note that DMI_MATCH(...,"ThinkPad T42") will match "ThinkPad T42p" */
 	struct dmi_system_id hdaps_whitelist[] = {
@@ -532,79 +584,43 @@
 
 	if (!dmi_check_system(hdaps_whitelist)) {
 		printk(KERN_WARNING "hdaps: supported laptop not found!\n");
-		ret = -ENXIO;
-		goto out;
+		return -ENODEV;
 	}
 
-	if (!request_region(HDAPS_LOW_PORT, HDAPS_NR_PORTS, "hdaps")) {
-		ret = -ENXIO;
-		goto out;
+	error = platform_driver_register(&hdaps_driver);
+	if (error)
+		return error;
+
+	pdev = platform_device_alloc("hdaps", -1);
+	if (!pdev) {
+		error = -ENOMEM;
+		goto err_unregister_driver;
 	}
 
-	ret = platform_driver_register(&hdaps_driver);
-	if (ret)
-		goto out_region;
-
-	pdev = platform_device_register_simple("hdaps", -1, NULL, 0);
-	if (IS_ERR(pdev)) {
-		ret = PTR_ERR(pdev);
-		goto out_driver;
-	}
-
-	ret = sysfs_create_group(&pdev->dev.kobj, &hdaps_attribute_group);
-	if (ret)
-		goto out_device;
-
-	hdaps_idev = input_allocate_device();
-	if (!hdaps_idev) {
-		ret = -ENOMEM;
-		goto out_group;
-	}
-
-	/* initial calibrate for the input device */
-	hdaps_calibrate();
-
-	/* initialize the input class */
-	hdaps_idev->name = "hdaps";
-	hdaps_idev->cdev.dev = &pdev->dev;
-	hdaps_idev->evbit[0] = BIT(EV_ABS);
-	input_set_abs_params(hdaps_idev, ABS_X,
-			-256, 256, HDAPS_INPUT_FUZZ, HDAPS_INPUT_FLAT);
-	input_set_abs_params(hdaps_idev, ABS_Y,
-			-256, 256, HDAPS_INPUT_FUZZ, HDAPS_INPUT_FLAT);
-
-	input_register_device(hdaps_idev);
-
-	/* start up our timer for the input device */
-	init_timer(&hdaps_timer);
-	hdaps_timer.function = hdaps_mousedev_poll;
-	hdaps_timer.expires = jiffies + HDAPS_POLL_PERIOD;
-	add_timer(&hdaps_timer);
+	error = platform_device_add_resources(pdev, &hdaps_iores, 1);
+	if (error)
+		goto err_free_device;
+
+	error = platform_device_add(pdev);
+	if (error)
+		goto err_free_device;
 
 	printk(KERN_INFO "hdaps: driver successfully loaded.\n");
 	return 0;
 
-out_group:
-	sysfs_remove_group(&pdev->dev.kobj, &hdaps_attribute_group);
-out_device:
-	platform_device_unregister(pdev);
-out_driver:
+ err_free_device:
+	platform_device_put(pdev);
+ err_unregister_driver:
 	platform_driver_unregister(&hdaps_driver);
-out_region:
-	release_region(HDAPS_LOW_PORT, HDAPS_NR_PORTS);
-out:
-	printk(KERN_WARNING "hdaps: driver init failed (ret=%d)!\n", ret);
-	return ret;
+
+	printk(KERN_WARNING "hdaps: driver init failed (error=%d)!\n", error);
+	return error;
 }
 
 static void __exit hdaps_exit(void)
 {
-	del_timer_sync(&hdaps_timer);
-	input_unregister_device(hdaps_idev);
-	sysfs_remove_group(&pdev->dev.kobj, &hdaps_attribute_group);
 	platform_device_unregister(pdev);
 	platform_driver_unregister(&hdaps_driver);
-	release_region(HDAPS_LOW_PORT, HDAPS_NR_PORTS);
 
 	printk(KERN_INFO "hdaps: driver unloaded.\n");
 }
diff -urN linux-2.6.15/drivers/ide/pci/sis5513.c linux-2.6.15-no1/drivers/ide/pci/sis5513.c
--- linux-2.6.15/drivers/ide/pci/sis5513.c	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/drivers/ide/pci/sis5513.c	2006-01-26 15:41:13.980762368 +0000
@@ -112,6 +112,7 @@
 
 	{ "SiS5596",	PCI_DEVICE_ID_SI_5596,	ATA_16   },
 	{ "SiS5571",	PCI_DEVICE_ID_SI_5571,	ATA_16   },
+	{ "SiS5517",	PCI_DEVICE_ID_SI_5517,	ATA_16   },
 	{ "SiS551x",	PCI_DEVICE_ID_SI_5511,	ATA_16   },
 };
 
@@ -524,6 +525,7 @@
 			case 3:		test1 = 0x30|0x03; break;
 			case 2:		test1 = 0x40|0x04; break;
 			case 1:		test1 = 0x60|0x07; break;
+			case 0:		test1 = 0x00; break;
 			default:	break;
 		}
 		pci_write_config_byte(dev, drive_pci, test1);
diff -urN linux-2.6.15/drivers/input/mouse/Makefile linux-2.6.15-no1/drivers/input/mouse/Makefile
--- linux-2.6.15/drivers/input/mouse/Makefile	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/drivers/input/mouse/Makefile	2006-01-26 15:41:13.980762368 +0000
@@ -15,4 +15,5 @@
 obj-$(CONFIG_MOUSE_HIL)		+= hil_ptr.o
 obj-$(CONFIG_MOUSE_VSXXXAA)	+= vsxxxaa.o
 
-psmouse-objs  := psmouse-base.o alps.o logips2pp.o synaptics.o lifebook.o trackpoint.o
+psmouse-objs  := psmouse-base.o alps.o logips2pp.o synaptics.o lifebook.o \
+		trackpoint.o touchkit_ps2.o
diff -urN linux-2.6.15/drivers/input/mouse/alps.c linux-2.6.15-no1/drivers/input/mouse/alps.c
--- linux-2.6.15/drivers/input/mouse/alps.c	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/drivers/input/mouse/alps.c	2006-01-26 15:41:13.981762216 +0000
@@ -347,6 +347,39 @@
 	return 0;
 }
 
+/*
+ * alps_poll() - poll the touchpad for current motion packet.
+ * Used in resync.
+ */
+static int alps_poll(struct psmouse *psmouse)
+{
+	struct alps_data *priv = psmouse->private;
+	unsigned char buf[6];
+
+	if (priv->i->flags & ALPS_PASS)
+		alps_passthrough_mode(psmouse, 1);
+
+	if (ps2_command(&psmouse->ps2dev, buf, PSMOUSE_CMD_POLL | (psmouse->pktsize << 8)))
+		return -1;
+
+	if ((buf[0] & priv->i->mask0) != priv->i->byte0)
+		return -1;
+
+	if (priv->i->flags & ALPS_PASS)
+		alps_passthrough_mode(psmouse, 0);
+
+	if ((psmouse->badbyte & 0xc8) == 0x08) {
+/*
+ * Poll the track stick ...
+ */
+		if (ps2_command(&psmouse->ps2dev, buf, PSMOUSE_CMD_POLL | (3 << 8)))
+			return -1;
+	}
+
+	memcpy(psmouse->packet, buf, sizeof(buf));
+	return 0;
+}
+
 static int alps_reconnect(struct psmouse *psmouse)
 {
 	struct alps_data *priv = psmouse->private;
@@ -450,10 +483,14 @@
 	input_register_device(priv->dev2);
 
 	psmouse->protocol_handler = alps_process_byte;
+	psmouse->poll = alps_poll;
 	psmouse->disconnect = alps_disconnect;
 	psmouse->reconnect = alps_reconnect;
 	psmouse->pktsize = 6;
 
+	/* We are having trouble resyncing ALPS touchpads so disable it for now */
+	psmouse->resync_time = 0;
+
 	return 0;
 
 init_fail:
diff -urN linux-2.6.15/drivers/input/mouse/logips2pp.c linux-2.6.15-no1/drivers/input/mouse/logips2pp.c
--- linux-2.6.15/drivers/input/mouse/logips2pp.c	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/drivers/input/mouse/logips2pp.c	2006-01-26 15:41:13.982762064 +0000
@@ -117,7 +117,7 @@
 	if (psmouse_sliced_command(psmouse, command))
 		return -1;
 
-	if (ps2_command(&psmouse->ps2dev, param, PSMOUSE_CMD_POLL))
+	if (ps2_command(&psmouse->ps2dev, param, PSMOUSE_CMD_POLL | 0x0300))
 		return -1;
 
 	return 0;
diff -urN linux-2.6.15/drivers/input/mouse/psmouse-base.c linux-2.6.15-no1/drivers/input/mouse/psmouse-base.c
--- linux-2.6.15/drivers/input/mouse/psmouse-base.c	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/drivers/input/mouse/psmouse-base.c	2006-01-26 15:41:13.983761912 +0000
@@ -26,6 +26,7 @@
 #include "alps.h"
 #include "lifebook.h"
 #include "trackpoint.h"
+#include "touchkit_ps2.h"
 
 #define DRIVER_DESC	"PS/2 mouse driver"
 
@@ -58,6 +59,10 @@
 module_param_named(resetafter, psmouse_resetafter, uint, 0644);
 MODULE_PARM_DESC(resetafter, "Reset device after so many bad packets (0 = never).");
 
+static unsigned int psmouse_resync_time = 5;
+module_param_named(resync_time, psmouse_resync_time, uint, 0644);
+MODULE_PARM_DESC(resync_time, "How long can mouse stay idle before forcing resync (in seconds, 0 = never).");
+
 PSMOUSE_DEFINE_ATTR(protocol, S_IWUSR | S_IRUGO,
 			NULL,
 			psmouse_attr_show_protocol, psmouse_attr_set_protocol);
@@ -70,12 +75,16 @@
 PSMOUSE_DEFINE_ATTR(resetafter, S_IWUSR | S_IRUGO,
 			(void *) offsetof(struct psmouse, resetafter),
 			psmouse_show_int_attr, psmouse_set_int_attr);
+PSMOUSE_DEFINE_ATTR(resync_time, S_IWUSR | S_IRUGO,
+			(void *) offsetof(struct psmouse, resync_time),
+			psmouse_show_int_attr, psmouse_set_int_attr);
 
 static struct attribute *psmouse_attributes[] = {
 	&psmouse_attr_protocol.dattr.attr,
 	&psmouse_attr_rate.dattr.attr,
 	&psmouse_attr_resolution.dattr.attr,
 	&psmouse_attr_resetafter.dattr.attr,
+	&psmouse_attr_resync_time.dattr.attr,
 	NULL
 };
 
@@ -98,6 +107,8 @@
  */
 static DECLARE_MUTEX(psmouse_sem);
 
+static struct workqueue_struct *kpsmoused_wq;
+
 struct psmouse_protocol {
 	enum psmouse_type type;
 	char *name;
@@ -178,15 +189,79 @@
 }
 
 /*
- * psmouse_interrupt() handles incoming characters, either gathering them into
- * packets or passing them to the command routine as command output.
+ * __psmouse_set_state() sets new psmouse state and resets all flags.
+ */
+
+static inline void __psmouse_set_state(struct psmouse *psmouse, enum psmouse_state new_state)
+{
+	psmouse->state = new_state;
+	psmouse->pktcnt = psmouse->out_of_sync = 0;
+	psmouse->ps2dev.flags = 0;
+	psmouse->last = jiffies;
+}
+
+
+/*
+ * psmouse_set_state() sets new psmouse state and resets all flags and
+ * counters while holding serio lock so fighting with interrupt handler
+ * is not a concern.
+ */
+
+static void psmouse_set_state(struct psmouse *psmouse, enum psmouse_state new_state)
+{
+	serio_pause_rx(psmouse->ps2dev.serio);
+	__psmouse_set_state(psmouse, new_state);
+	serio_continue_rx(psmouse->ps2dev.serio);
+}
+
+/*
+ * psmouse_handle_byte() processes one byte of the input data stream
+ * by calling corresponding protocol handler.
+ */
+
+static int psmouse_handle_byte(struct psmouse *psmouse, struct pt_regs *regs)
+{
+	psmouse_ret_t rc = psmouse->protocol_handler(psmouse, regs);
+
+	switch (rc) {
+		case PSMOUSE_BAD_DATA:
+			if (psmouse->state == PSMOUSE_ACTIVATED) {
+				printk(KERN_WARNING "psmouse.c: %s at %s lost sync at byte %d\n",
+					psmouse->name, psmouse->phys, psmouse->pktcnt);
+				if (++psmouse->out_of_sync == psmouse->resetafter) {
+					__psmouse_set_state(psmouse, PSMOUSE_IGNORE);
+					printk(KERN_NOTICE "psmouse.c: issuing reconnect request\n");
+					serio_reconnect(psmouse->ps2dev.serio);
+					return -1;
+				}
+			}
+			psmouse->pktcnt = 0;
+			break;
+
+		case PSMOUSE_FULL_PACKET:
+			psmouse->pktcnt = 0;
+			if (psmouse->out_of_sync) {
+				psmouse->out_of_sync = 0;
+				printk(KERN_NOTICE "psmouse.c: %s at %s - driver resynched.\n",
+					psmouse->name, psmouse->phys);
+			}
+			break;
+
+		case PSMOUSE_GOOD_DATA:
+			break;
+	}
+	return 0;
+}
+
+/*
+ * psmouse_interrupt() handles incoming characters, either passing them
+ * for normal processing or gathering them as command response.
  */
 
 static irqreturn_t psmouse_interrupt(struct serio *serio,
 		unsigned char data, unsigned int flags, struct pt_regs *regs)
 {
 	struct psmouse *psmouse = serio_get_drvdata(serio);
-	psmouse_ret_t rc;
 
 	if (psmouse->state == PSMOUSE_IGNORE)
 		goto out;
@@ -208,67 +283,58 @@
 		if  (ps2_handle_response(&psmouse->ps2dev, data))
 			goto out;
 
-	if (psmouse->state == PSMOUSE_INITIALIZING)
+	if (psmouse->state <= PSMOUSE_RESYNCING)
 		goto out;
 
 	if (psmouse->state == PSMOUSE_ACTIVATED &&
 	    psmouse->pktcnt && time_after(jiffies, psmouse->last + HZ/2)) {
-		printk(KERN_WARNING "psmouse.c: %s at %s lost synchronization, throwing %d bytes away.\n",
+		printk(KERN_INFO "psmouse.c: %s at %s lost synchronization, throwing %d bytes away.\n",
 		       psmouse->name, psmouse->phys, psmouse->pktcnt);
-		psmouse->pktcnt = 0;
+		psmouse->badbyte = psmouse->packet[0];
+		__psmouse_set_state(psmouse, PSMOUSE_RESYNCING);
+		queue_work(kpsmoused_wq, &psmouse->resync_work);
+		goto out;
 	}
 
-	psmouse->last = jiffies;
 	psmouse->packet[psmouse->pktcnt++] = data;
-
-	if (psmouse->packet[0] == PSMOUSE_RET_BAT) {
+/*
+ * Check if this is a new device announcement (0xAA 0x00)
+ */
+	if (unlikely(psmouse->packet[0] == PSMOUSE_RET_BAT && psmouse->pktcnt <= 2)) {
 		if (psmouse->pktcnt == 1)
 			goto out;
 
-		if (psmouse->pktcnt == 2) {
-			if (psmouse->packet[1] == PSMOUSE_RET_ID) {
-				psmouse->state = PSMOUSE_IGNORE;
-				serio_reconnect(serio);
-				goto out;
-			}
-			if (psmouse->type == PSMOUSE_SYNAPTICS) {
-				/* neither 0xAA nor 0x00 are valid first bytes
-				 * for a packet in absolute mode
-				 */
-				psmouse->pktcnt = 0;
-				goto out;
-			}
+		if (psmouse->packet[1] == PSMOUSE_RET_ID) {
+			__psmouse_set_state(psmouse, PSMOUSE_IGNORE);
+			serio_reconnect(serio);
+			goto out;
 		}
-	}
-
-	rc = psmouse->protocol_handler(psmouse, regs);
+/*
+ * Not a new device, try processing first byte normally
+ */
+		psmouse->pktcnt = 1;
+		if (psmouse_handle_byte(psmouse, regs))
+			goto out;
 
-	switch (rc) {
-		case PSMOUSE_BAD_DATA:
-			printk(KERN_WARNING "psmouse.c: %s at %s lost sync at byte %d\n",
-				psmouse->name, psmouse->phys, psmouse->pktcnt);
-			psmouse->pktcnt = 0;
+		psmouse->packet[psmouse->pktcnt++] = data;
+	}
 
-			if (++psmouse->out_of_sync == psmouse->resetafter) {
-				psmouse->state = PSMOUSE_IGNORE;
-				printk(KERN_NOTICE "psmouse.c: issuing reconnect request\n");
-				serio_reconnect(psmouse->ps2dev.serio);
-			}
-			break;
+/*
+ * See if we need to force resync because mouse was idle for too long
+ */
+	if (psmouse->state == PSMOUSE_ACTIVATED &&
+	    psmouse->pktcnt == 1 && psmouse->resync_time &&
+	    time_after(jiffies, psmouse->last + psmouse->resync_time * HZ)) {
+		psmouse->badbyte = psmouse->packet[0];
+		__psmouse_set_state(psmouse, PSMOUSE_RESYNCING);
+		queue_work(kpsmoused_wq, &psmouse->resync_work);
+		goto out;
+	}
 
-		case PSMOUSE_FULL_PACKET:
-			psmouse->pktcnt = 0;
-			if (psmouse->out_of_sync) {
-				psmouse->out_of_sync = 0;
-				printk(KERN_NOTICE "psmouse.c: %s at %s - driver resynched.\n",
-					psmouse->name, psmouse->phys);
-			}
-			break;
+	psmouse->last = jiffies;
+	psmouse_handle_byte(psmouse, regs);
 
-		case PSMOUSE_GOOD_DATA:
-			break;
-	}
-out:
+ out:
 	return IRQ_HANDLED;
 }
 
@@ -545,6 +611,13 @@
 	if (max_proto > PSMOUSE_IMEX && trackpoint_detect(psmouse, set_properties) == 0)
 		return PSMOUSE_TRACKPOINT;
 
+	if (touchkit_ps2_detect(psmouse, set_properties) == 0) {
+		if (max_proto >= PSMOUSE_TOUCHKIT_PS2) {
+			if (!set_properties || touchkit_ps2_init(psmouse) == 0)
+				return PSMOUSE_TOUCHKIT_PS2;
+		}
+	}
+
 /*
  * Okay, all failed, we have a standard mouse here. The number of the buttons
  * is still a question, though. We assume 3.
@@ -632,6 +705,13 @@
 		.detect		= trackpoint_detect,
 	},
 	{
+		.type		= PSMOUSE_TOUCHKIT_PS2,
+		.name		= "touchkitPS/2",
+		.alias		= "touchkit",
+		.detect		= touchkit_ps2_detect,
+		.init		= touchkit_ps2_init,
+	},
+	{
 		.type		= PSMOUSE_AUTO,
 		.name		= "auto",
 		.alias		= "any",
@@ -755,21 +835,6 @@
 }
 
 /*
- * psmouse_set_state() sets new psmouse state and resets all flags and
- * counters while holding serio lock so fighting with interrupt handler
- * is not a concern.
- */
-
-static void psmouse_set_state(struct psmouse *psmouse, enum psmouse_state new_state)
-{
-	serio_pause_rx(psmouse->ps2dev.serio);
-	psmouse->state = new_state;
-	psmouse->pktcnt = psmouse->out_of_sync = 0;
-	psmouse->ps2dev.flags = 0;
-	serio_continue_rx(psmouse->ps2dev.serio);
-}
-
-/*
  * psmouse_activate() enables the mouse so that we get motion reports from it.
  */
 
@@ -797,6 +862,100 @@
 	psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
 }
 
+/*
+ * psmouse_poll() - default poll hanlder. Everyone except for ALPS uses it.
+ */
+
+static int psmouse_poll(struct psmouse *psmouse)
+{
+	return ps2_command(&psmouse->ps2dev, psmouse->packet,
+			   PSMOUSE_CMD_POLL | (psmouse->pktsize << 8));
+}
+
+
+/*
+ * psmouse_resync() attempts to re-validate current protocol.
+ */
+
+static void psmouse_resync(void *p)
+{
+	struct psmouse *psmouse = p, *parent = NULL;
+	struct serio *serio = psmouse->ps2dev.serio;
+	psmouse_ret_t rc = PSMOUSE_GOOD_DATA;
+	int failed = 0, enabled = 0;
+	int i;
+
+	down(&psmouse_sem);
+
+	if (psmouse->state != PSMOUSE_RESYNCING)
+		goto out;
+
+	if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) {
+		parent = serio_get_drvdata(serio->parent);
+		psmouse_deactivate(parent);
+	}
+
+/*
+ * Some mice don't ACK commands sent while they are in the middle of
+ * transmitting motion packet. To avoid delay we use ps2_sendbyte()
+ * instead of ps2_command() which would wait for 200ms for an ACK
+ * that may never come.
+ */
+	ps2_sendbyte(&psmouse->ps2dev, PSMOUSE_CMD_DISABLE, 20);
+
+/*
+ * Poll the mouse. If it was reset the packet will be shorter than
+ * psmouse->pktsize and ps2_command will fail. We do not expect and
+ * do not handle scenario when mouse "upgrades" its protocol while
+ * disconnected since it would require additional delay. If we ever
+ * see a mouse that does it we'll adjust the code.
+ */
+	if (psmouse->poll(psmouse))
+		failed = 1;
+	else {
+		psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
+		for (i = 0; i < psmouse->pktsize; i++) {
+			psmouse->pktcnt++;
+			rc = psmouse->protocol_handler(psmouse, NULL);
+			if (rc != PSMOUSE_GOOD_DATA)
+				break;
+		}
+		if (rc != PSMOUSE_FULL_PACKET)
+			failed = 1;
+		psmouse_set_state(psmouse, PSMOUSE_RESYNCING);
+	}
+
+/*
+ * Now try to enable mouse. We try to do that even if poll failed and also
+ * repeat our attempts 5 times, otherwise we may be left out with disabled
+ * mouse.
+ */
+	for (i = 0; i < 5; i++) {
+		if (!ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_ENABLE)) {
+			enabled = 1;
+			break;
+		}
+		msleep(200);
+	}
+
+	if (!enabled) {
+		printk(KERN_WARNING "psmouse.c: failed to re-enable mouse on %s\n",
+			psmouse->ps2dev.serio->phys);
+		failed = 1;
+	}
+
+	if (failed) {
+		psmouse_set_state(psmouse, PSMOUSE_IGNORE);
+		printk(KERN_INFO "psmouse.c: resync failed, issuing reconnect request\n");
+		serio_reconnect(serio);
+	} else
+		psmouse_set_state(psmouse, PSMOUSE_ACTIVATED);
+
+	if (parent)
+		psmouse_activate(parent);
+ out:
+	up(&psmouse_sem);
+}
 
 /*
  * psmouse_cleanup() resets the mouse into power-on state.
@@ -825,6 +984,11 @@
 
 	psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
 
+	/* make sure we don't have a resync in progress */
+	up(&psmouse_sem);
+	flush_workqueue(kpsmoused_wq);
+	down(&psmouse_sem);
+
 	if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) {
 		parent = serio_get_drvdata(serio->parent);
 		psmouse_deactivate(parent);
@@ -862,6 +1026,7 @@
 
 	psmouse->set_rate = psmouse_set_rate;
 	psmouse->set_resolution = psmouse_set_resolution;
+	psmouse->poll = psmouse_poll;
 	psmouse->protocol_handler = psmouse_process_byte;
 	psmouse->pktsize = 3;
 
@@ -917,6 +1082,7 @@
 		goto out;
 
 	ps2_init(&psmouse->ps2dev, serio);
+	INIT_WORK(&psmouse->resync_work, psmouse_resync, psmouse);
 	psmouse->dev = input_dev;
 	sprintf(psmouse->phys, "%s/input0", serio->phys);
 
@@ -937,6 +1103,7 @@
 	psmouse->rate = psmouse_rate;
 	psmouse->resolution = psmouse_resolution;
 	psmouse->resetafter = psmouse_resetafter;
+	psmouse->resync_time = parent ? 0 : psmouse_resync_time;
 	psmouse->smartscroll = psmouse_smartscroll;
 
 	psmouse_switch_protocol(psmouse, NULL);
@@ -1281,13 +1448,21 @@
 
 static int __init psmouse_init(void)
 {
+	kpsmoused_wq = create_singlethread_workqueue("kpsmoused");
+	if (!kpsmoused_wq) {
+		printk(KERN_ERR "psmouse: failed to create kpsmoused workqueue\n");
+		return -ENOMEM;
+	}
+
 	serio_register_driver(&psmouse_drv);
+
 	return 0;
 }
 
 static void __exit psmouse_exit(void)
 {
 	serio_unregister_driver(&psmouse_drv);
+	destroy_workqueue(kpsmoused_wq);
 }
 
 module_init(psmouse_init);
diff -urN linux-2.6.15/drivers/input/mouse/psmouse.h linux-2.6.15-no1/drivers/input/mouse/psmouse.h
--- linux-2.6.15/drivers/input/mouse/psmouse.h	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/drivers/input/mouse/psmouse.h	2006-01-26 15:41:13.984761760 +0000
@@ -7,7 +7,7 @@
 #define PSMOUSE_CMD_GETINFO	0x03e9
 #define PSMOUSE_CMD_SETSTREAM	0x00ea
 #define PSMOUSE_CMD_SETPOLL	0x00f0
-#define PSMOUSE_CMD_POLL	0x03eb
+#define PSMOUSE_CMD_POLL	0x00eb	/* caller sets number of bytes to receive */
 #define PSMOUSE_CMD_GETID	0x02f2
 #define PSMOUSE_CMD_SETRATE	0x10f3
 #define PSMOUSE_CMD_ENABLE	0x00f4
@@ -23,6 +23,7 @@
 enum psmouse_state {
 	PSMOUSE_IGNORE,
 	PSMOUSE_INITIALIZING,
+	PSMOUSE_RESYNCING,
 	PSMOUSE_CMD_MODE,
 	PSMOUSE_ACTIVATED,
 };
@@ -38,9 +39,11 @@
 	void *private;
 	struct input_dev *dev;
 	struct ps2dev ps2dev;
+	struct work_struct resync_work;
 	char *vendor;
 	char *name;
 	unsigned char packet[8];
+	unsigned char badbyte;
 	unsigned char pktcnt;
 	unsigned char pktsize;
 	unsigned char type;
@@ -54,6 +57,7 @@
 	unsigned int rate;
 	unsigned int resolution;
 	unsigned int resetafter;
+	unsigned int resync_time;
 	unsigned int smartscroll;	/* Logitech only */
 
 	psmouse_ret_t (*protocol_handler)(struct psmouse *psmouse, struct pt_regs *regs);
@@ -62,6 +66,7 @@
 
 	int (*reconnect)(struct psmouse *psmouse);
 	void (*disconnect)(struct psmouse *psmouse);
+	int (*poll)(struct psmouse *psmouse);
 
 	void (*pt_activate)(struct psmouse *psmouse);
 	void (*pt_deactivate)(struct psmouse *psmouse);
@@ -79,6 +84,7 @@
 	PSMOUSE_ALPS,
 	PSMOUSE_LIFEBOOK,
 	PSMOUSE_TRACKPOINT,
+	PSMOUSE_TOUCHKIT_PS2,
 	PSMOUSE_AUTO		/* This one should always be last */
 };
 
diff -urN linux-2.6.15/drivers/input/mouse/synaptics.c linux-2.6.15-no1/drivers/input/mouse/synaptics.c
--- linux-2.6.15/drivers/input/mouse/synaptics.c	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/drivers/input/mouse/synaptics.c	2006-01-26 15:41:13.984761760 +0000
@@ -652,6 +652,8 @@
 	psmouse->disconnect = synaptics_disconnect;
 	psmouse->reconnect = synaptics_reconnect;
 	psmouse->pktsize = 6;
+	/* Synaptics can usually stay in sync without extra help */
+	psmouse->resync_time = 0;
 
 	if (SYN_CAP_PASS_THROUGH(priv->capabilities))
 		synaptics_pt_create(psmouse);
diff -urN linux-2.6.15/drivers/input/mouse/touchkit_ps2.c linux-2.6.15-no1/drivers/input/mouse/touchkit_ps2.c
--- linux-2.6.15/drivers/input/mouse/touchkit_ps2.c	1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6.15-no1/drivers/input/mouse/touchkit_ps2.c	2006-01-26 15:41:13.985761608 +0000
@@ -0,0 +1,195 @@
+/* ----------------------------------------------------------------------------
+ * touchkit_ps2.c  --  Driver for eGalax TouchKit PS/2 Touchscreens
+ *
+ * Copyright (C) 2005 by Stefan Lucke
+ * Copyright (C) 2004 by Daniel Ritz
+ * Copyright (C) by Todd E. Johnson (mtouchusb.c)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Based upon touchkitusb.c
+ *
+ * Vendor documentation is available in support section of:
+ * http://www.egalax.com.tw/
+ *
+ */
+
+/*
+ * Changelog:
+ *
+ * 2005-10-14:	whitespace & indentaion cleanup
+ *		touchkit commands defined
+ * initial version 0.1
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include <linux/input.h>
+#include <linux/serio.h>
+#include <linux/libps2.h>
+#include <linux/dmi.h>
+
+#include "psmouse.h"
+#include "touchkit_ps2.h"
+
+#define TOUCHKIT_MIN_XC			0x0
+#define TOUCHKIT_MAX_XC			0x07ff
+#define TOUCHKIT_XC_FUZZ		0x0
+#define TOUCHKIT_XC_FLAT		0x0
+#define TOUCHKIT_MIN_YC			0x0
+#define TOUCHKIT_MAX_YC			0x07ff
+#define TOUCHKIT_YC_FUZZ		0x0
+#define TOUCHKIT_YC_FLAT		0x0
+#define TOUCHKIT_REPORT_DATA_SIZE	8
+
+#define TOUCHKIT_CMD			0x0A
+#define TOUCHKIT_CMD_LENGTH		1
+
+#define TOUCHKIT_CMD_ACTIVE		'A'
+#define TOUCHKIT_CMD_FIRMWARE_VERSION	'D'
+#define TOUCHKIT_CMD_CONTROLLER_TYPE	'E'
+
+#define TOUCHKIT_SEND_PARMS(s,r,c)	((s) << 12 | (r) << 8 | (c))
+
+#define TOUCHKIT_DOWN			0x01
+#define TOUCHKIT_POINT_TOUCH		0x81
+#define TOUCHKIT_POINT_NOTOUCH		0x80
+
+#define TOUCHKIT_GET_TOUCHED(dat)	((((dat)[0]) & TOUCHKIT_DOWN) ? 1 : 0)
+#define TOUCHKIT_GET_X(dat)		(((dat)[1] << 7) | (dat)[2])
+#define TOUCHKIT_GET_Y(dat)		(((dat)[3] << 7) | (dat)[4])
+
+#define DRIVER_VERSION			"v0.2"
+#define DRIVER_AUTHOR			"Stefan Lucke <stefan@lucke.in-berlin.de>"
+#define DRIVER_DESC			"eGalax TouchKit PS/2 Touchscreen Driver"
+
+static int xyswap = 0;
+module_param(xyswap, bool, 0644);
+MODULE_PARM_DESC(xyswap, "If set X and Y axes are swapped.");
+
+static int  xinvert = 0;
+module_param(xinvert, bool, 0644);
+MODULE_PARM_DESC(xinvert, "Invert direction of x axis.");
+
+static int  yinvert = 0;
+module_param(yinvert, bool, 0644);
+MODULE_PARM_DESC(yinvert, "Invert direction of y axis.");
+
+static int  xfuzz = 0;
+module_param(xfuzz, uint, 0644);
+MODULE_PARM_DESC(xinvert, "Fuzz value for x axis.");
+
+static int  yfuzz = 0;
+module_param(yfuzz, uint, 0644);
+MODULE_PARM_DESC(yfuzz, "Fuzz value for y axis.");
+
+static int  smartpad = 0;
+module_param(smartpad, bool, 0644);
+MODULE_PARM_DESC(smartpad, "Act as a smartpad device.");
+
+static int  mouse = 0;
+module_param(mouse, bool, 0644);
+MODULE_PARM_DESC(mouse, "Report mouse button");
+
+static psmouse_ret_t touchkit_ps2_process_byte(struct psmouse *psmouse, struct pt_regs *regs)
+{
+	unsigned char 		*packet = psmouse->packet;
+	struct input_dev 	*dev = psmouse->dev;
+	int 			x,y;
+
+	if (psmouse->pktcnt != 5)
+		return PSMOUSE_GOOD_DATA;
+
+	input_regs(dev, regs);
+
+	if (xyswap) {
+		y = TOUCHKIT_GET_X(packet);
+		x = TOUCHKIT_GET_Y(packet);
+	} else {
+		x = TOUCHKIT_GET_X(packet);
+		y = TOUCHKIT_GET_Y(packet);
+	}
+
+	y = (yinvert) ? TOUCHKIT_MAX_YC - y : y;
+	x = (xinvert) ? TOUCHKIT_MAX_XC - x : x;
+
+	input_report_key(dev,
+			 (mouse) ? BTN_MOUSE : BTN_TOUCH,
+			 TOUCHKIT_GET_TOUCHED(packet));
+
+	if (smartpad)
+		input_report_key(dev, BTN_TOOL_FINGER, 1);
+
+	input_report_abs(dev, ABS_X, x);
+	input_report_abs(dev, ABS_Y, y);
+
+	input_sync(dev);
+
+	return PSMOUSE_FULL_PACKET;
+}
+
+int touchkit_ps2_detect(struct psmouse *psmouse, int set_properties)
+{
+	unsigned char	param[3];
+	int		command;
+
+	param[0] = TOUCHKIT_CMD_LENGTH;
+	param[1] = TOUCHKIT_CMD_ACTIVE;
+	command = TOUCHKIT_SEND_PARMS(2, 3, TOUCHKIT_CMD);
+
+	if (ps2_command(&psmouse->ps2dev, param, command) == 0 &&
+	    param[0] == TOUCHKIT_CMD &&
+	    param[1] == 0x01 &&
+	    param[2] == TOUCHKIT_CMD_ACTIVE){
+		printk(KERN_INFO "touchkit_ps2: device detected\n");
+		if (set_properties) {
+			psmouse->vendor = "eGalax";
+			psmouse->name = "Touchscreen";
+		}
+		return 0;
+	}
+	return -1;
+}
+
+int touchkit_ps2_init(struct psmouse *psmouse)
+{
+	psmouse->dev->evbit[0] = BIT(EV_KEY) | BIT(EV_ABS);
+
+	set_bit((mouse) ? BTN_MOUSE : BTN_TOUCH,psmouse->dev->keybit);
+	if (smartpad)
+		set_bit(BTN_TOOL_FINGER,psmouse->dev->keybit);
+
+	psmouse->dev->absbit[0] = BIT(ABS_X) | BIT(ABS_Y);
+
+	/* Used to Scale Compensated Data */
+	psmouse->dev->absmin[ABS_X] = TOUCHKIT_MIN_XC;
+	psmouse->dev->absmax[ABS_X] = TOUCHKIT_MAX_XC;
+	psmouse->dev->absfuzz[ABS_X] = xfuzz;
+	psmouse->dev->absflat[ABS_X] = TOUCHKIT_XC_FLAT;
+	psmouse->dev->absmin[ABS_Y] = TOUCHKIT_MIN_YC;
+	psmouse->dev->absmax[ABS_Y] = TOUCHKIT_MAX_YC;
+	psmouse->dev->absfuzz[ABS_Y] = yfuzz;
+	psmouse->dev->absflat[ABS_Y] = TOUCHKIT_YC_FLAT;
+
+	input_set_abs_params(psmouse->dev, ABS_X, 0, 0x07ff, xfuzz, 0);
+	input_set_abs_params(psmouse->dev, ABS_Y, 0, 0x07ff, yfuzz, 0);
+
+	psmouse->protocol_handler = touchkit_ps2_process_byte;
+	psmouse->pktsize = 5;
+
+	return 0;
+}
diff -urN linux-2.6.15/drivers/input/mouse/touchkit_ps2.h linux-2.6.15-no1/drivers/input/mouse/touchkit_ps2.h
--- linux-2.6.15/drivers/input/mouse/touchkit_ps2.h	1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6.15-no1/drivers/input/mouse/touchkit_ps2.h	2006-01-26 15:41:13.985761608 +0000
@@ -0,0 +1,18 @@
+/* ----------------------------------------------------------------------------
+ * touchkit_ps2.h  --  Driver for eGalax TouchKit PS/2 Touchscreens
+ *
+ * Copyright (C) 2005 by Stefan Lucke
+ * Copyright (c) 2005 Vojtech Pavlik
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _TOUCHKIT_PS2_H
+#define _TOUCHKIT_PS2_H
+
+int touchkit_ps2_detect(struct psmouse *psmouse, int set_properties);
+int touchkit_ps2_init(struct psmouse *psmouse);
+
+#endif
diff -urN linux-2.6.15/drivers/net/sk98lin/h/skaddr.h linux-2.6.15-no1/drivers/net/sk98lin/h/skaddr.h
--- linux-2.6.15/drivers/net/sk98lin/h/skaddr.h	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/drivers/net/sk98lin/h/skaddr.h	2006-01-26 15:41:30.418263488 +0000
@@ -236,18 +236,6 @@
 	SK_U32	PortNumber,
 	int	Flags);
 
-extern	int	SkAddrXmacMcClear(
-	SK_AC	*pAC,
-	SK_IOC	IoC,
-	SK_U32	PortNumber,
-	int	Flags);
-
-extern	int	SkAddrGmacMcClear(
-	SK_AC	*pAC,
-	SK_IOC	IoC,
-	SK_U32	PortNumber,
-	int	Flags);
-
 extern	int	SkAddrMcAdd(
 	SK_AC		*pAC,
 	SK_IOC		IoC,
@@ -255,35 +243,11 @@
 	SK_MAC_ADDR	*pMc,
 	int		Flags);
 
-extern	int	SkAddrXmacMcAdd(
-	SK_AC		*pAC,
-	SK_IOC		IoC,
-	SK_U32		PortNumber,
-	SK_MAC_ADDR	*pMc,
-	int		Flags);
-
-extern	int	SkAddrGmacMcAdd(
-	SK_AC		*pAC,
-	SK_IOC		IoC,
-	SK_U32		PortNumber,
-	SK_MAC_ADDR	*pMc,
-	int		Flags);
-
 extern	int	SkAddrMcUpdate(
 	SK_AC	*pAC,
 	SK_IOC	IoC,
 	SK_U32	PortNumber);
 
-extern	int	SkAddrXmacMcUpdate(
-	SK_AC	*pAC,
-	SK_IOC	IoC,
-	SK_U32	PortNumber);
-
-extern	int	SkAddrGmacMcUpdate(
-	SK_AC	*pAC,
-	SK_IOC	IoC,
-	SK_U32	PortNumber);
-
 extern	int	SkAddrOverride(
 	SK_AC		*pAC,
 	SK_IOC		IoC,
@@ -297,18 +261,6 @@
 	SK_U32	PortNumber,
 	int	NewPromMode);
 
-extern	int	SkAddrXmacPromiscuousChange(
-	SK_AC	*pAC,
-	SK_IOC	IoC,
-	SK_U32	PortNumber,
-	int	NewPromMode);
-
-extern	int	SkAddrGmacPromiscuousChange(
-	SK_AC	*pAC,
-	SK_IOC	IoC,
-	SK_U32	PortNumber,
-	int	NewPromMode);	
-
 #ifndef SK_SLIM
 extern	int	SkAddrSwap(
 	SK_AC	*pAC,
diff -urN linux-2.6.15/drivers/net/sk98lin/h/skcsum.h linux-2.6.15-no1/drivers/net/sk98lin/h/skcsum.h
--- linux-2.6.15/drivers/net/sk98lin/h/skcsum.h	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/drivers/net/sk98lin/h/skcsum.h	2006-01-26 15:41:30.419263336 +0000
@@ -203,12 +203,6 @@
 	unsigned	Checksum2,
 	int			NetNumber);
 
-extern void SkCsGetSendInfo(
-	SK_AC				*pAc,
-	void				*pIpHeader,
-	SKCS_PACKET_INFO	*pPacketInfo,
-	int					NetNumber);
-
 extern void SkCsSetReceiveFlags(
 	SK_AC		*pAc,
 	unsigned	ReceiveFlags,
diff -urN linux-2.6.15/drivers/net/sk98lin/h/skgeinit.h linux-2.6.15-no1/drivers/net/sk98lin/h/skgeinit.h
--- linux-2.6.15/drivers/net/sk98lin/h/skgeinit.h	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/drivers/net/sk98lin/h/skgeinit.h	2006-01-26 15:41:30.419263336 +0000
@@ -464,12 +464,6 @@
 /*
  * public functions in skgeinit.c
  */
-extern void	SkGePollRxD(
-	SK_AC	*pAC,
-	SK_IOC	IoC,
-	int		Port,
-	SK_BOOL	PollRxD);
-
 extern void	SkGePollTxD(
 	SK_AC	*pAC,
 	SK_IOC	IoC,
@@ -522,10 +516,6 @@
 	int		Led,
 	int		Mode);
 
-extern void	SkGeInitRamIface(
-	SK_AC	*pAC,
-	SK_IOC	IoC);
-
 extern int	SkGeInitAssignRamToQueues(
 	SK_AC	*pAC,
 	int		ActivePort,
@@ -549,11 +539,6 @@
 	SK_IOC	IoC,
 	int		Port);
 
-extern void	SkMacClearRst(
-	SK_AC	*pAC,
-	SK_IOC	IoC,
-	int		Port);
-
 extern void	SkXmInitMac(
 	SK_AC	*pAC,
 	SK_IOC	IoC,
@@ -580,11 +565,6 @@
 	SK_IOC	IoC,
 	int		Port);
 
-extern void	SkMacFlushRxFifo(
-	SK_AC	*pAC,
-	SK_IOC	IoC,
-	int		Port);
-
 extern void	SkMacIrq(
 	SK_AC	*pAC,
 	SK_IOC	IoC,
@@ -601,12 +581,6 @@
 	int		Port,
 	SK_U16	IStatus);
 
-extern void  SkMacSetRxTxEn(
-	SK_AC	*pAC,
-	SK_IOC	IoC,
-	int		Port,
-	int		Para);
-
 extern int  SkMacRxTxEnable(
 	SK_AC	*pAC,
 	SK_IOC	IoC,
@@ -659,16 +633,6 @@
 	int		StartNum,
 	int		StopNum);
 
-extern void	SkXmInitDupMd(
-	SK_AC	*pAC,
-	SK_IOC	IoC,
-	int		Port);
-
-extern void	SkXmInitPauseMd(
-	SK_AC	*pAC,
-	SK_IOC	IoC,
-	int		Port);
-
 extern void	SkXmAutoNegLipaXmac(
 	SK_AC	*pAC,
 	SK_IOC	IoC,
@@ -729,17 +693,6 @@
 	int		Port,
 	SK_BOOL	StartTest);
 
-extern int SkGmEnterLowPowerMode(
-	SK_AC	*pAC,
-	SK_IOC	IoC,
-	int		Port,
-	SK_U8	Mode);
-
-extern int SkGmLeaveLowPowerMode(
-	SK_AC	*pAC,
-	SK_IOC	IoC,
-	int		Port);
-
 #ifdef SK_DIAG
 extern void	SkGePhyRead(
 	SK_AC	*pAC,
@@ -782,7 +735,6 @@
 /*
  * public functions in skgeinit.c
  */
-extern void	SkGePollRxD();
 extern void	SkGePollTxD();
 extern void	SkGeYellowLED();
 extern int	SkGeCfgSync();
@@ -792,7 +744,6 @@
 extern void	SkGeDeInit();
 extern int	SkGeInitPort();
 extern void	SkGeXmitLED();
-extern void	SkGeInitRamIface();
 extern int	SkGeInitAssignRamToQueues();
 
 /*
@@ -801,18 +752,15 @@
 extern void SkMacRxTxDisable();
 extern void	SkMacSoftRst();
 extern void	SkMacHardRst();
-extern void	SkMacClearRst();
 extern void SkMacInitPhy();
 extern int  SkMacRxTxEnable();
 extern void SkMacPromiscMode();
 extern void SkMacHashing();
 extern void SkMacIrqDisable();
 extern void	SkMacFlushTxFifo();
-extern void	SkMacFlushRxFifo();
 extern void	SkMacIrq();
 extern int	SkMacAutoNegDone();
 extern void	SkMacAutoNegLipaPhy();
-extern void SkMacSetRxTxEn();
 extern void	SkXmInitMac();
 extern void	SkXmPhyRead();
 extern void	SkXmPhyWrite();
@@ -820,8 +768,6 @@
 extern void	SkGmPhyRead();
 extern void	SkGmPhyWrite();
 extern void	SkXmClrExactAddr();
-extern void	SkXmInitDupMd();
-extern void	SkXmInitPauseMd();
 extern void	SkXmAutoNegLipaXmac();
 extern int	SkXmUpdateStats();
 extern int	SkGmUpdateStats();
@@ -832,8 +778,6 @@
 extern int	SkXmOverflowStatus();
 extern int	SkGmOverflowStatus();
 extern int	SkGmCableDiagStatus();
-extern int	SkGmEnterLowPowerMode();
-extern int	SkGmLeaveLowPowerMode();
 
 #ifdef SK_DIAG
 extern void	SkGePhyRead();
diff -urN linux-2.6.15/drivers/net/sk98lin/h/skgepnmi.h linux-2.6.15-no1/drivers/net/sk98lin/h/skgepnmi.h
--- linux-2.6.15/drivers/net/sk98lin/h/skgepnmi.h	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/drivers/net/sk98lin/h/skgepnmi.h	2006-01-26 15:41:30.420263184 +0000
@@ -946,10 +946,6 @@
  * Function prototypes
  */
 extern int SkPnmiInit(SK_AC *pAC, SK_IOC IoC, int Level);
-extern int SkPnmiGetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void* pBuf,
-	unsigned int* pLen, SK_U32 Instance, SK_U32 NetIndex);
-extern int SkPnmiPreSetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id,
-	void* pBuf, unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex);
 extern int SkPnmiSetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void* pBuf,
 	unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex);
 extern int SkPnmiGetStruct(SK_AC *pAC, SK_IOC IoC, void* pBuf,
diff -urN linux-2.6.15/drivers/net/sk98lin/h/skgesirq.h linux-2.6.15-no1/drivers/net/sk98lin/h/skgesirq.h
--- linux-2.6.15/drivers/net/sk98lin/h/skgesirq.h	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/drivers/net/sk98lin/h/skgesirq.h	2006-01-26 15:41:30.420263184 +0000
@@ -105,7 +105,6 @@
 
 extern void SkGeSirqIsr(SK_AC *pAC, SK_IOC IoC, SK_U32 Istatus);
 extern int  SkGeSirqEvent(SK_AC *pAC, SK_IOC IoC, SK_U32 Event, SK_EVPARA Para);
-extern void SkHWLinkUp(SK_AC *pAC, SK_IOC IoC, int Port);
 extern void SkHWLinkDown(SK_AC *pAC, SK_IOC IoC, int Port);
 
 #endif	/* _INC_SKGESIRQ_H_ */
diff -urN linux-2.6.15/drivers/net/sk98lin/h/ski2c.h linux-2.6.15-no1/drivers/net/sk98lin/h/ski2c.h
--- linux-2.6.15/drivers/net/sk98lin/h/ski2c.h	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/drivers/net/sk98lin/h/ski2c.h	2006-01-26 15:41:30.421263032 +0000
@@ -162,9 +162,6 @@
 } SK_I2C;
 
 extern int SkI2cInit(SK_AC *pAC, SK_IOC IoC, int Level);
-extern int SkI2cWrite(SK_AC *pAC, SK_IOC IoC, SK_U32 Data, int Dev, int Size,
-					   int Reg, int Burst);
-extern int SkI2cReadSensor(SK_AC *pAC, SK_IOC IoC, SK_SENSOR *pSen);
 #ifdef SK_DIAG
 extern	SK_U32 SkI2cRead(SK_AC *pAC, SK_IOC IoC, int Dev, int Size, int Reg,
 						 int Burst);
diff -urN linux-2.6.15/drivers/net/sk98lin/h/skvpd.h linux-2.6.15-no1/drivers/net/sk98lin/h/skvpd.h
--- linux-2.6.15/drivers/net/sk98lin/h/skvpd.h	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/drivers/net/sk98lin/h/skvpd.h	2006-01-26 15:41:30.422262880 +0000
@@ -191,14 +191,6 @@
 	int			addr);
 #endif	/* SKDIAG */
 
-extern int	VpdSetupPara(
-	SK_AC		*pAC,
-	const char	*key,
-	const char	*buf,
-	int			len,
-	int			type,
-	int			op);
-
 extern SK_VPD_STATUS	*VpdStat(
 	SK_AC		*pAC,
 	SK_IOC		IoC);
@@ -235,11 +227,6 @@
 	SK_AC		*pAC,
 	SK_IOC		IoC);
 
-extern void	VpdErrLog(
-	SK_AC		*pAC,
-	SK_IOC		IoC,
-	char		*msg);
-
 #ifdef	SKDIAG
 extern int	VpdReadBlock(
 	SK_AC		*pAC,
@@ -257,7 +244,6 @@
 #endif	/* SKDIAG */
 #else	/* SK_KR_PROTO */
 extern SK_U32	VpdReadDWord();
-extern int	VpdSetupPara();
 extern SK_VPD_STATUS	*VpdStat();
 extern int	VpdKeys();
 extern int	VpdRead();
@@ -265,7 +251,6 @@
 extern int	VpdWrite();
 extern int	VpdDelete();
 extern int	VpdUpdate();
-extern void	VpdErrLog();
 #endif	/* SK_KR_PROTO */
 
 #endif	/* __INC_SKVPD_H_ */
diff -urN linux-2.6.15/drivers/net/sk98lin/h/skvpd.h.orig linux-2.6.15-no1/drivers/net/sk98lin/h/skvpd.h.orig
--- linux-2.6.15/drivers/net/sk98lin/h/skvpd.h.orig	1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6.15-no1/drivers/net/sk98lin/h/skvpd.h.orig	2006-01-03 03:21:10.000000000 +0000
@@ -0,0 +1,271 @@
+/******************************************************************************
+ *
+ * Name:	skvpd.h
+ * Project:	GEnesis, PCI Gigabit Ethernet Adapter
+ * Version:	$Revision: 1.15 $
+ * Date:	$Date: 2003/01/13 10:39:38 $
+ * Purpose:	Defines and Macros for VPD handling
+ *
+ ******************************************************************************/
+
+/******************************************************************************
+ *
+ *	(C)Copyright 1998-2003 SysKonnect GmbH.
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License as published by
+ *	the Free Software Foundation; either version 2 of the License, or
+ *	(at your option) any later version.
+ *
+ *	The information in this file is provided "AS IS" without warranty.
+ *
+ ******************************************************************************/
+
+/*
+ * skvpd.h	contains Diagnostic specific defines for VPD handling
+ */
+
+#ifndef __INC_SKVPD_H_
+#define __INC_SKVPD_H_
+
+/*
+ * Define Resource Type Identifiers and VPD keywords
+ */
+#define	RES_ID		0x82	/* Resource Type ID String (Product Name) */
+#define RES_VPD_R	0x90	/* start of VPD read only area */
+#define RES_VPD_W	0x91	/* start of VPD read/write area */
+#define RES_END		0x78	/* Resource Type End Tag */
+
+#ifndef VPD_NAME
+#define VPD_NAME	"Name"	/* Product Name, VPD name of RES_ID */
+#endif	/* VPD_NAME */
+#define VPD_PN		"PN"	/* Adapter Part Number */
+#define	VPD_EC		"EC"	/* Adapter Engineering Level */
+#define VPD_MN		"MN"	/* Manufacture ID */
+#define VPD_SN		"SN"	/* Serial Number */
+#define VPD_CP		"CP"	/* Extended Capability */
+#define VPD_RV		"RV"	/* Checksum and Reserved */
+#define	VPD_YA		"YA"	/* Asset Tag Identifier */
+#define VPD_VL		"VL"	/* First Error Log Message (SK specific) */
+#define VPD_VF		"VF"	/* Second Error Log Message (SK specific) */
+#define VPD_RW		"RW"	/* Remaining Read / Write Area */
+
+/* 'type' values for vpd_setup_para() */
+#define VPD_RO_KEY	1	/* RO keys are "PN", "EC", "MN", "SN", "RV" */
+#define VPD_RW_KEY	2	/* RW keys are "Yx", "Vx", and "RW" */
+
+/* 'op' values for vpd_setup_para() */
+#define	ADD_KEY		1	/* add the key at the pos "RV" or "RW" */
+#define OWR_KEY		2	/* overwrite key if already exists */
+
+/*
+ * Define READ and WRITE Constants.
+ */
+
+#define VPD_DEV_ID_GENESIS 	0x4300
+
+#define	VPD_SIZE_YUKON		256
+#define	VPD_SIZE_GENESIS	512
+#define	VPD_SIZE			512
+#define VPD_READ	0x0000
+#define VPD_WRITE	0x8000
+
+#define VPD_STOP(pAC,IoC)	VPD_OUT16(pAC,IoC,PCI_VPD_ADR_REG,VPD_WRITE)
+
+#define VPD_GET_RES_LEN(p)	((unsigned int) \
+					(* (SK_U8 *)&(p)[1]) |\
+					((* (SK_U8 *)&(p)[2]) << 8))
+#define VPD_GET_VPD_LEN(p)	((unsigned int)(* (SK_U8 *)&(p)[2]))
+#define VPD_GET_VAL(p)		((char *)&(p)[3])
+
+#define VPD_MAX_LEN	50
+
+/* VPD status */
+	/* bit 7..1 reserved */
+#define VPD_VALID	(1<<0)	/* VPD data buffer, vpd_free_ro, */
+							/* and vpd_free_rw valid	 */
+
+/*
+ * VPD structs
+ */
+typedef	struct s_vpd_status {
+	unsigned short	Align01;			/* Alignment */
+	unsigned short	vpd_status;			/* VPD status, description see above */
+	int				vpd_free_ro;		/* unused bytes in read only area */
+	int				vpd_free_rw;		/* bytes available in read/write area */
+} SK_VPD_STATUS;
+
+typedef	struct s_vpd {
+	SK_VPD_STATUS	v;					/* VPD status structure */
+	char			vpd_buf[VPD_SIZE];	/* VPD buffer */
+	int				rom_size;			/* VPD ROM Size from PCI_OUR_REG_2 */
+	int				vpd_size;			/* saved VPD-size */
+} SK_VPD;
+
+typedef	struct s_vpd_para {
+	unsigned int	p_len;	/* parameter length */
+	char			*p_val;	/* points to the value */
+} SK_VPD_PARA;
+
+/*
+ * structure of Large Resource Type Identifiers
+ */
+
+/* was removed because of alignment problems */
+
+/*
+ * structure of VPD keywords
+ */
+typedef	struct s_vpd_key {
+	char			p_key[2];	/* 2 bytes ID string */
+	unsigned char	p_len;		/* 1 byte length */
+	char			p_val;		/* start of the value string */
+} SK_VPD_KEY;
+
+
+/*
+ * System specific VPD macros
+ */
+#ifndef SKDIAG
+#ifndef VPD_DO_IO
+#define VPD_OUT8(pAC,IoC,Addr,Val)	(void)SkPciWriteCfgByte(pAC,Addr,Val)
+#define VPD_OUT16(pAC,IoC,Addr,Val)	(void)SkPciWriteCfgWord(pAC,Addr,Val)
+#define VPD_OUT32(pAC,IoC,Addr,Val)	(void)SkPciWriteCfgDWord(pAC,Addr,Val)
+#define VPD_IN8(pAC,IoC,Addr,pVal)	(void)SkPciReadCfgByte(pAC,Addr,pVal)
+#define VPD_IN16(pAC,IoC,Addr,pVal)	(void)SkPciReadCfgWord(pAC,Addr,pVal)
+#define VPD_IN32(pAC,IoC,Addr,pVal)	(void)SkPciReadCfgDWord(pAC,Addr,pVal)
+#else	/* VPD_DO_IO */
+#define VPD_OUT8(pAC,IoC,Addr,Val)	SK_OUT8(IoC,PCI_C(Addr),Val)
+#define VPD_OUT16(pAC,IoC,Addr,Val)	SK_OUT16(IoC,PCI_C(Addr),Val)
+#define VPD_OUT32(pAC,IoC,Addr,Val)	SK_OUT32(IoC,PCI_C(Addr),Val)
+#define VPD_IN8(pAC,IoC,Addr,pVal)	SK_IN8(IoC,PCI_C(Addr),pVal)
+#define VPD_IN16(pAC,IoC,Addr,pVal)	SK_IN16(IoC,PCI_C(Addr),pVal)
+#define VPD_IN32(pAC,IoC,Addr,pVal)	SK_IN32(IoC,PCI_C(Addr),pVal)
+#endif	/* VPD_DO_IO */
+#else	/* SKDIAG */
+#define VPD_OUT8(pAC,Ioc,Addr,Val) {			\
+		if ((pAC)->DgT.DgUseCfgCycle)			\
+			SkPciWriteCfgByte(pAC,Addr,Val);	\
+		else									\
+			SK_OUT8(pAC,PCI_C(Addr),Val);		\
+		}
+#define VPD_OUT16(pAC,Ioc,Addr,Val) {			\
+		if ((pAC)->DgT.DgUseCfgCycle)			\
+			SkPciWriteCfgWord(pAC,Addr,Val);	\
+		else						\
+			SK_OUT16(pAC,PCI_C(Addr),Val);		\
+		}
+#define VPD_OUT32(pAC,Ioc,Addr,Val) {			\
+		if ((pAC)->DgT.DgUseCfgCycle)			\
+			SkPciWriteCfgDWord(pAC,Addr,Val);	\
+		else						\
+			SK_OUT32(pAC,PCI_C(Addr),Val); 		\
+		}
+#define VPD_IN8(pAC,Ioc,Addr,pVal) {			\
+		if ((pAC)->DgT.DgUseCfgCycle) 			\
+			SkPciReadCfgByte(pAC,Addr,pVal);	\
+		else						\
+			SK_IN8(pAC,PCI_C(Addr),pVal); 		\
+		}
+#define VPD_IN16(pAC,Ioc,Addr,pVal) {			\
+		if ((pAC)->DgT.DgUseCfgCycle) 			\
+			SkPciReadCfgWord(pAC,Addr,pVal);	\
+		else						\
+			SK_IN16(pAC,PCI_C(Addr),pVal); 		\
+		}
+#define VPD_IN32(pAC,Ioc,Addr,pVal) {			\
+		if ((pAC)->DgT.DgUseCfgCycle)			\
+			SkPciReadCfgDWord(pAC,Addr,pVal);	\
+		else						\
+			SK_IN32(pAC,PCI_C(Addr),pVal);		\
+		}
+#endif	/* nSKDIAG */
+
+/* function prototypes ********************************************************/
+
+#ifndef	SK_KR_PROTO
+#ifdef SKDIAG
+extern SK_U32	VpdReadDWord(
+	SK_AC		*pAC,
+	SK_IOC		IoC,
+	int			addr);
+#endif	/* SKDIAG */
+
+extern int	VpdSetupPara(
+	SK_AC		*pAC,
+	const char	*key,
+	const char	*buf,
+	int			len,
+	int			type,
+	int			op);
+
+extern SK_VPD_STATUS	*VpdStat(
+	SK_AC		*pAC,
+	SK_IOC		IoC);
+
+extern int	VpdKeys(
+	SK_AC		*pAC,
+	SK_IOC		IoC,
+	char		*buf,
+	int			*len,
+	int			*elements);
+
+extern int	VpdRead(
+	SK_AC		*pAC,
+	SK_IOC		IoC,
+	const char	*key,
+	char		*buf,
+	int			*len);
+
+extern SK_BOOL	VpdMayWrite(
+	char		*key);
+
+extern int	VpdWrite(
+	SK_AC		*pAC,
+	SK_IOC		IoC,
+	const char	*key,
+	const char	*buf);
+
+extern int	VpdDelete(
+	SK_AC		*pAC,
+	SK_IOC		IoC,
+	char		*key);
+
+extern int	VpdUpdate(
+	SK_AC		*pAC,
+	SK_IOC		IoC);
+
+extern void	VpdErrLog(
+	SK_AC		*pAC,
+	SK_IOC		IoC,
+	char		*msg);
+
+#ifdef	SKDIAG
+extern int	VpdReadBlock(
+	SK_AC		*pAC,
+	SK_IOC		IoC,
+	char		*buf,
+	int			addr,
+	int			len);
+
+extern int	VpdWriteBlock(
+	SK_AC		*pAC,
+	SK_IOC		IoC,
+	char		*buf,
+	int			addr,
+	int			len);
+#endif	/* SKDIAG */
+#else	/* SK_KR_PROTO */
+extern SK_U32	VpdReadDWord();
+extern int	VpdSetupPara();
+extern SK_VPD_STATUS	*VpdStat();
+extern int	VpdKeys();
+extern int	VpdRead();
+extern SK_BOOL	VpdMayWrite();
+extern int	VpdWrite();
+extern int	VpdDelete();
+extern int	VpdUpdate();
+extern void	VpdErrLog();
+#endif	/* SK_KR_PROTO */
+
+#endif	/* __INC_SKVPD_H_ */
diff -urN linux-2.6.15/drivers/net/sk98lin/skaddr.c linux-2.6.15-no1/drivers/net/sk98lin/skaddr.c
--- linux-2.6.15/drivers/net/sk98lin/skaddr.c	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/drivers/net/sk98lin/skaddr.c	2006-01-26 15:41:30.423262728 +0000
@@ -87,6 +87,21 @@
 static int	Next0[SK_MAX_MACS] = {0};
 #endif	/* DEBUG */
 
+static int SkAddrGmacMcAdd(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber,
+			   SK_MAC_ADDR *pMc, int Flags);
+static int SkAddrGmacMcClear(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber,
+			     int Flags);
+static int SkAddrGmacMcUpdate(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber);
+static int SkAddrGmacPromiscuousChange(SK_AC *pAC, SK_IOC IoC,
+				       SK_U32 PortNumber, int NewPromMode);
+static int SkAddrXmacMcAdd(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber,
+			   SK_MAC_ADDR *pMc, int Flags);
+static int SkAddrXmacMcClear(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber,
+			     int Flags);
+static int SkAddrXmacMcUpdate(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber);
+static int SkAddrXmacPromiscuousChange(SK_AC *pAC, SK_IOC IoC,
+				       SK_U32 PortNumber, int NewPromMode);
+
 /* functions ******************************************************************/
 
 /******************************************************************************
@@ -372,7 +387,7 @@
  *	SK_ADDR_SUCCESS
  *	SK_ADDR_ILLEGAL_PORT
  */
-int	SkAddrXmacMcClear(
+static int	SkAddrXmacMcClear(
 SK_AC	*pAC,		/* adapter context */
 SK_IOC	IoC,		/* I/O context */
 SK_U32	PortNumber,	/* Index of affected port */
@@ -429,7 +444,7 @@
  *	SK_ADDR_SUCCESS
  *	SK_ADDR_ILLEGAL_PORT
  */
-int	SkAddrGmacMcClear(
+static int	SkAddrGmacMcClear(
 SK_AC	*pAC,		/* adapter context */
 SK_IOC	IoC,		/* I/O context */
 SK_U32	PortNumber,	/* Index of affected port */
@@ -519,7 +534,7 @@
  * Returns:
  *	Hash value of multicast address.
  */
-SK_U32 SkXmacMcHash(
+static SK_U32 SkXmacMcHash(
 unsigned char *pMc)	/* Multicast address */
 {
 	SK_U32 Idx;
@@ -557,7 +572,7 @@
  * Returns:
  *	Hash value of multicast address.
  */
-SK_U32 SkGmacMcHash(
+static SK_U32 SkGmacMcHash(
 unsigned char *pMc)	/* Multicast address */
 {
 	SK_U32 Data;
@@ -672,7 +687,7 @@
  *	SK_MC_ILLEGAL_ADDRESS
  *	SK_MC_RLMT_OVERFLOW
  */
-int	SkAddrXmacMcAdd(
+static int	SkAddrXmacMcAdd(
 SK_AC		*pAC,		/* adapter context */
 SK_IOC		IoC,		/* I/O context */
 SK_U32		PortNumber,	/* Port Number */
@@ -778,7 +793,7 @@
  *	SK_MC_FILTERING_INEXACT
  *	SK_MC_ILLEGAL_ADDRESS
  */
-int	SkAddrGmacMcAdd(
+static int	SkAddrGmacMcAdd(
 SK_AC		*pAC,		/* adapter context */
 SK_IOC		IoC,		/* I/O context */
 SK_U32		PortNumber,	/* Port Number */
@@ -937,7 +952,7 @@
  *	SK_MC_FILTERING_INEXACT
  *	SK_ADDR_ILLEGAL_PORT
  */
-int	SkAddrXmacMcUpdate(
+static int	SkAddrXmacMcUpdate(
 SK_AC	*pAC,		/* adapter context */
 SK_IOC	IoC,		/* I/O context */
 SK_U32	PortNumber)	/* Port Number */
@@ -1082,7 +1097,7 @@
  *	SK_MC_FILTERING_INEXACT
  *	SK_ADDR_ILLEGAL_PORT
  */
-int	SkAddrGmacMcUpdate(
+static int	SkAddrGmacMcUpdate(
 SK_AC	*pAC,		/* adapter context */
 SK_IOC	IoC,		/* I/O context */
 SK_U32	PortNumber)	/* Port Number */
@@ -1468,7 +1483,7 @@
  *	SK_ADDR_SUCCESS
  *	SK_ADDR_ILLEGAL_PORT
  */
-int	SkAddrXmacPromiscuousChange(
+static int	SkAddrXmacPromiscuousChange(
 SK_AC	*pAC,			/* adapter context */
 SK_IOC	IoC,			/* I/O context */
 SK_U32	PortNumber,		/* port whose promiscuous mode changes */
@@ -1585,7 +1600,7 @@
  *	SK_ADDR_SUCCESS
  *	SK_ADDR_ILLEGAL_PORT
  */
-int	SkAddrGmacPromiscuousChange(
+static int	SkAddrGmacPromiscuousChange(
 SK_AC	*pAC,			/* adapter context */
 SK_IOC	IoC,			/* I/O context */
 SK_U32	PortNumber,		/* port whose promiscuous mode changes */
diff -urN linux-2.6.15/drivers/net/sk98lin/skgeinit.c linux-2.6.15-no1/drivers/net/sk98lin/skgeinit.c
--- linux-2.6.15/drivers/net/sk98lin/skgeinit.c	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/drivers/net/sk98lin/skgeinit.c	2006-01-26 15:41:30.424262576 +0000
@@ -59,34 +59,6 @@
 
 /******************************************************************************
  *
- *	SkGePollRxD() - Enable / Disable Descriptor Polling of RxD Ring
- *
- * Description:
- *	Enable or disable the descriptor polling of the receive descriptor
- *	ring (RxD) for port 'Port'.
- *	The new configuration is *not* saved over any SkGeStopPort() and
- *	SkGeInitPort() calls.
- *
- * Returns:
- *	nothing
- */
-void SkGePollRxD(
-SK_AC	*pAC,		/* adapter context */
-SK_IOC	IoC,		/* IO context */
-int		Port,		/* Port Index (MAC_1 + n) */
-SK_BOOL PollRxD)	/* SK_TRUE (enable pol.), SK_FALSE (disable pol.) */
-{
-	SK_GEPORT *pPrt;
-
-	pPrt = &pAC->GIni.GP[Port];
-
-	SK_OUT32(IoC, Q_ADDR(pPrt->PRxQOff, Q_CSR), (PollRxD) ?
-		CSR_ENA_POL : CSR_DIS_POL);
-}	/* SkGePollRxD */
-
-
-/******************************************************************************
- *
  *	SkGePollTxD() - Enable / Disable Descriptor Polling of TxD Rings
  *
  * Description:
@@ -952,7 +924,7 @@
  * Returns:
  *	nothing
  */
-void SkGeInitRamIface(
+static void SkGeInitRamIface(
 SK_AC	*pAC,		/* adapter context */
 SK_IOC	IoC)		/* IO context */
 {
@@ -1409,83 +1381,6 @@
 
 }	/* SkGeInit0*/
 
-#ifdef SK_PCI_RESET
-
-/******************************************************************************
- *
- *	SkGePciReset() - Reset PCI interface
- *
- * Description:
- *	o Read PCI configuration.
- *	o Change power state to 3.
- *	o Change power state to 0.
- *	o Restore PCI configuration.
- *
- * Returns:
- *	0:	Success.
- *	1:	Power state could not be changed to 3.
- */
-static int SkGePciReset(
-SK_AC	*pAC,		/* adapter context */
-SK_IOC	IoC)		/* IO context */
-{
-	int		i;
-	SK_U16	PmCtlSts;
-	SK_U32	Bp1;
-	SK_U32	Bp2;
-	SK_U16	PciCmd;
-	SK_U8	Cls;
-	SK_U8	Lat;
-	SK_U8	ConfigSpace[PCI_CFG_SIZE];
-
-	/*
-	 * Note: Switching to D3 state is like a software reset.
-	 *		 Switching from D3 to D0 is a hardware reset.
-	 *		 We have to save and restore the configuration space.
-	 */
-	for (i = 0; i < PCI_CFG_SIZE; i++) {
-		SkPciReadCfgDWord(pAC, i*4, &ConfigSpace[i]);
-	}
-
-	/* We know the RAM Interface Arbiter is enabled. */
-	SkPciWriteCfgWord(pAC, PCI_PM_CTL_STS, PCI_PM_STATE_D3);
-	SkPciReadCfgWord(pAC, PCI_PM_CTL_STS, &PmCtlSts);
-	
-	if ((PmCtlSts & PCI_PM_STATE_MSK) != PCI_PM_STATE_D3) {
-		return(1);
-	}
-
-	/* Return to D0 state. */
-	SkPciWriteCfgWord(pAC, PCI_PM_CTL_STS, PCI_PM_STATE_D0);
-
-	/* Check for D0 state. */
-	SkPciReadCfgWord(pAC, PCI_PM_CTL_STS, &PmCtlSts);
-	
-	if ((PmCtlSts & PCI_PM_STATE_MSK) != PCI_PM_STATE_D0) {
-		return(1);
-	}
-
-	/* Check PCI Config Registers. */
-	SkPciReadCfgWord(pAC, PCI_COMMAND, &PciCmd);
-	SkPciReadCfgByte(pAC, PCI_CACHE_LSZ, &Cls);
-	SkPciReadCfgDWord(pAC, PCI_BASE_1ST, &Bp1);
-	SkPciReadCfgDWord(pAC, PCI_BASE_2ND, &Bp2);
-	SkPciReadCfgByte(pAC, PCI_LAT_TIM, &Lat);
-	
-	if (PciCmd != 0 || Cls != (SK_U8)0 || Lat != (SK_U8)0 ||
-		(Bp1 & 0xfffffff0L) != 0 || Bp2 != 1) {
-		return(1);
-	}
-
-	/* Restore PCI Config Space. */
-	for (i = 0; i < PCI_CFG_SIZE; i++) {
-		SkPciWriteCfgDWord(pAC, i*4, ConfigSpace[i]);
-	}
-
-	return(0);
-}	/* SkGePciReset */
-
-#endif /* SK_PCI_RESET */
 
 /******************************************************************************
  *
@@ -1524,10 +1419,6 @@
 	/* save CLK_RUN bits (YUKON-Lite) */
 	SK_IN16(IoC, B0_CTST, &CtrlStat);
 
-#ifdef SK_PCI_RESET
-	(void)SkGePciReset(pAC, IoC);
-#endif /* SK_PCI_RESET */
-
 	/* do the SW-reset */
 	SK_OUT8(IoC, B0_CTST, CS_RST_SET);
 
@@ -1991,11 +1882,6 @@
 	int	i;
 	SK_U16	Word;
 
-#ifdef SK_PHY_LP_MODE
-	SK_U8	Byte;
-	SK_U16	PmCtlSts;
-#endif /* SK_PHY_LP_MODE */
-
 #if (!defined(SK_SLIM) && !defined(VCPU))
 	/* ensure I2C is ready */
 	SkI2cWaitIrq(pAC, IoC);
@@ -2010,38 +1896,6 @@
 		}
 	}
 
-#ifdef SK_PHY_LP_MODE
-    /*
-	 * for power saving purposes within mobile environments
-	 * we set the PHY to coma mode and switch to D3 power state.
-	 */
-	if (pAC->GIni.GIYukonLite &&
-		pAC->GIni.GIChipRev >= CHIP_REV_YU_LITE_A3) {
-
-		/* for all ports switch PHY to coma mode */
-		for (i = 0; i < pAC->GIni.GIMacsFound; i++) {
-			
-			SkGmEnterLowPowerMode(pAC, IoC, i, PHY_PM_DEEP_SLEEP);
-		}
-
-		if (pAC->GIni.GIVauxAvail) {
-			/* switch power to VAUX */
-			Byte = PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF;
-
-			SK_OUT8(IoC, B0_POWER_CTRL, Byte);
-		}
-		
-		/* switch to D3 state */
-		SK_IN16(IoC, PCI_C(PCI_PM_CTL_STS), &PmCtlSts);
-
-		PmCtlSts |= PCI_PM_STATE_D3;
-
-		SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_ON);
-
-		SK_OUT16(IoC, PCI_C(PCI_PM_CTL_STS), PmCtlSts);
-	}
-#endif /* SK_PHY_LP_MODE */
-
 	/* Reset all bits in the PCI STATUS register */
 	/*
 	 * Note: PCI Cfg cycles cannot be used, because they are not
diff -urN linux-2.6.15/drivers/net/sk98lin/skgemib.c linux-2.6.15-no1/drivers/net/sk98lin/skgemib.c
--- linux-2.6.15/drivers/net/sk98lin/skgemib.c	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/drivers/net/sk98lin/skgemib.c	2006-01-26 15:41:30.425262424 +0000
@@ -871,13 +871,6 @@
 		sizeof(SK_PNMI_CONF),
 		SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfPhyType),
 		SK_PNMI_RO, MacPrivateConf, 0},
-#ifdef SK_PHY_LP_MODE
-		{OID_SKGE_PHY_LP_MODE,
-		SK_PNMI_MAC_ENTRIES,
-		sizeof(SK_PNMI_CONF),
-		SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfPhyMode),
-		SK_PNMI_RW, MacPrivateConf, 0},
-#endif	
 	{OID_SKGE_LINK_CAP,
 		SK_PNMI_MAC_ENTRIES,
 		sizeof(SK_PNMI_CONF),
diff -urN linux-2.6.15/drivers/net/sk98lin/skgepnmi.c linux-2.6.15-no1/drivers/net/sk98lin/skgepnmi.c
--- linux-2.6.15/drivers/net/sk98lin/skgepnmi.c	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/drivers/net/sk98lin/skgepnmi.c	2006-01-26 15:41:30.428261968 +0000
@@ -56,10 +56,6 @@
  * Public Function prototypes
  */
 int SkPnmiInit(SK_AC *pAC, SK_IOC IoC, int level);
-int SkPnmiGetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void *pBuf,
-	unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex);
-int SkPnmiPreSetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void *pBuf,
-	unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex);
 int SkPnmiSetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void *pBuf,
 	unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex);
 int SkPnmiGetStruct(SK_AC *pAC, SK_IOC IoC, void *pBuf,
@@ -587,7 +583,7 @@
  *                           exist (e.g. port instance 3 on a two port
  *	                         adapter.
  */
-int SkPnmiGetVar(
+static int SkPnmiGetVar(
 SK_AC *pAC,		/* Pointer to adapter context */
 SK_IOC IoC,		/* IO context handle */
 SK_U32 Id,		/* Object ID that is to be processed */
@@ -629,7 +625,7 @@
  *                           exist (e.g. port instance 3 on a two port
  *	                         adapter.
  */
-int SkPnmiPreSetVar(
+static int SkPnmiPreSetVar(
 SK_AC *pAC,		/* Pointer to adapter context */
 SK_IOC IoC,		/* IO context handle */
 SK_U32 Id,		/* Object ID that is to be processed */
@@ -5062,9 +5058,6 @@
 		case OID_SKGE_SPEED_CAP:
 		case OID_SKGE_SPEED_MODE:
 		case OID_SKGE_SPEED_STATUS:
-#ifdef SK_PHY_LP_MODE
-		case OID_SKGE_PHY_LP_MODE:
-#endif
 			if (*pLen < (Limit - LogPortIndex) * sizeof(SK_U8)) {
 
 				*pLen = (Limit - LogPortIndex) * sizeof(SK_U8);
@@ -5140,28 +5133,6 @@
 				Offset += sizeof(SK_U32);
 				break;
 
-#ifdef SK_PHY_LP_MODE
-			case OID_SKGE_PHY_LP_MODE:
-				if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
-					if (LogPortIndex == 0) {
-						continue;
-					}
-					else {
-						/* Get value for physical ports */
-						PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(pAC, LogPortIndex);
-						Val8 = (SK_U8) pAC->GIni.GP[PhysPortIndex].PPhyPowerState;
-						*pBufPtr = Val8;
-					}
-				}
-				else { /* DualNetMode */
-					
-					Val8 = (SK_U8) pAC->GIni.GP[PhysPortIndex].PPhyPowerState;
-					*pBufPtr = Val8;
-				}
-				Offset += sizeof(SK_U8);
-				break;
-#endif
-
 			case OID_SKGE_LINK_CAP:
 				if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
 					if (LogPortIndex == 0) {
@@ -5478,16 +5449,6 @@
 		}
 		break;
 
-#ifdef SK_PHY_LP_MODE
-	case OID_SKGE_PHY_LP_MODE:
-		if (*pLen < Limit - LogPortIndex) {
-
-			*pLen = Limit - LogPortIndex;
-			return (SK_PNMI_ERR_TOO_SHORT);
-		}
-		break;
-#endif
-
 	case OID_SKGE_MTU:
 		if (*pLen < sizeof(SK_U32)) {
 
@@ -5845,116 +5806,6 @@
 			Offset += sizeof(SK_U32);
 			break;
 		
-#ifdef SK_PHY_LP_MODE
-		case OID_SKGE_PHY_LP_MODE:
-			/* The preset ends here */
-			if (Action == SK_PNMI_PRESET) {
-
-				return (SK_PNMI_ERR_OK);
-			}
-
-			if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
-				if (LogPortIndex == 0) {
-					Offset = 0;
-					continue;
-				}
-				else {
-					/* Set value for physical ports */
-					PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(pAC, LogPortIndex);
-
-					switch (*(pBuf + Offset)) {
-						case 0:
-							/* If LowPowerMode is active, we can leave it. */
-							if (pAC->GIni.GP[PhysPortIndex].PPhyPowerState) {
-
-								Val32 = SkGmLeaveLowPowerMode(pAC, IoC, PhysPortIndex);
-								
-								if (pAC->GIni.GP[PhysPortIndex].PPhyPowerState < 3)	{
-									
-									SkDrvInitAdapter(pAC);
-								}
-								break;
-							}
-							else {
-								*pLen = 0;
-								return (SK_PNMI_ERR_GENERAL);
-							}
-						case 1:
-						case 2:
-						case 3:
-						case 4:
-							/* If no LowPowerMode is active, we can enter it. */
-							if (!pAC->GIni.GP[PhysPortIndex].PPhyPowerState) {
-
-								if ((*(pBuf + Offset)) < 3)	{
-								
-									SkDrvDeInitAdapter(pAC);
-								}
-
-								Val32 = SkGmEnterLowPowerMode(pAC, IoC, PhysPortIndex, *pBuf);
-								break;
-							}
-							else {
-								*pLen = 0;
-								return (SK_PNMI_ERR_GENERAL);
-							}
-						default:
-							*pLen = 0;
-							return (SK_PNMI_ERR_BAD_VALUE);
-					}
-				}
-			}
-			else { /* DualNetMode */
-				
-				switch (*(pBuf + Offset)) {
-					case 0:
-						/* If we are in a LowPowerMode, we can leave it. */
-						if (pAC->GIni.GP[PhysPortIndex].PPhyPowerState) {
-
-							Val32 = SkGmLeaveLowPowerMode(pAC, IoC, PhysPortIndex);
-							
-							if (pAC->GIni.GP[PhysPortIndex].PPhyPowerState < 3)	{
-
-								SkDrvInitAdapter(pAC);
-							}
-							break;
-						}
-						else {
-							*pLen = 0;
-							return (SK_PNMI_ERR_GENERAL);
-						}
-					
-					case 1:
-					case 2:
-					case 3:
-					case 4:
-						/* If we are not already in LowPowerMode, we can enter it. */
-						if (!pAC->GIni.GP[PhysPortIndex].PPhyPowerState) {
-
-							if ((*(pBuf + Offset)) < 3)	{
-
-								SkDrvDeInitAdapter(pAC);
-							}
-							else {
-
-								Val32 = SkGmEnterLowPowerMode(pAC, IoC, PhysPortIndex, *pBuf);
-							}
-							break;
-						}
-						else {
-							*pLen = 0;
-							return (SK_PNMI_ERR_GENERAL);
-						}
-					
-					default:
-						*pLen = 0;
-						return (SK_PNMI_ERR_BAD_VALUE);
-				}
-			}
-			Offset += sizeof(SK_U8);
-			break;
-#endif
-
 		default:
             SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_ERR,
                 ("MacPrivateConf: Unknown OID should be handled before set"));
diff -urN linux-2.6.15/drivers/net/sk98lin/skgesirq.c linux-2.6.15-no1/drivers/net/sk98lin/skgesirq.c
--- linux-2.6.15/drivers/net/sk98lin/skgesirq.c	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/drivers/net/sk98lin/skgesirq.c	2006-01-26 15:41:30.430261664 +0000
@@ -265,7 +265,7 @@
  *
  * Returns: N/A
  */
-void SkHWLinkUp(
+static void SkHWLinkUp(
 SK_AC	*pAC,	/* adapter context */
 SK_IOC	IoC,	/* IO context */
 int		Port)	/* Port Index (MAC_1 + n) */
@@ -612,14 +612,6 @@
 				 * we ignore those
 				 */
 				pPrt->HalfDupTimerActive = SK_TRUE;
-#ifdef XXX
-				Len = sizeof(SK_U64);
-				SkPnmiGetVar(pAC, IoC, OID_SKGE_STAT_TX_OCTETS, (char *)&Octets,
-					&Len, (SK_U32)SK_PNMI_PORT_PHYS2INST(pAC, 0),
-					pAC->Rlmt.Port[0].Net->NetNumber);
-				
-				pPrt->LastOctets = Octets;
-#endif /* XXX */
 				/* Snap statistic counters */
 				(void)SkXmUpdateStats(pAC, IoC, 0);
 
@@ -653,14 +645,6 @@
 				 pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOHALF) &&
 				!pPrt->HalfDupTimerActive) {
 				pPrt->HalfDupTimerActive = SK_TRUE;
-#ifdef XXX
-				Len = sizeof(SK_U64);
-				SkPnmiGetVar(pAC, IoC, OID_SKGE_STAT_TX_OCTETS, (char *)&Octets,
-					&Len, (SK_U32)SK_PNMI_PORT_PHYS2INST(pAC, 1),
-					pAC->Rlmt.Port[1].Net->NetNumber);
-				
-				pPrt->LastOctets = Octets;
-#endif /* XXX */
 				/* Snap statistic counters */
 				(void)SkXmUpdateStats(pAC, IoC, 1);
 
@@ -2085,12 +2069,6 @@
 			pPrt->HalfDupTimerActive = SK_FALSE;
 			if (pPrt->PLinkModeStatus == SK_LMODE_STAT_HALF ||
 				pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOHALF) {
-#ifdef XXX
-				Len = sizeof(SK_U64);
-				SkPnmiGetVar(pAC, IoC, OID_SKGE_STAT_TX_OCTETS, (char *)&Octets,
-					&Len, (SK_U32)SK_PNMI_PORT_PHYS2INST(pAC, Port),
-					pAC->Rlmt.Port[Port].Net->NetNumber);
-#endif /* XXX */
 				/* Snap statistic counters */
 				(void)SkXmUpdateStats(pAC, IoC, Port);
 
diff -urN linux-2.6.15/drivers/net/sk98lin/ski2c.c linux-2.6.15-no1/drivers/net/sk98lin/ski2c.c
--- linux-2.6.15/drivers/net/sk98lin/ski2c.c	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/drivers/net/sk98lin/ski2c.c	2006-01-26 15:41:30.430261664 +0000
@@ -396,7 +396,7 @@
  *			1:	error,	 transfer does not complete, I2C transfer
  *						 killed, wait loop terminated.
  */
-int	SkI2cWait(
+static int	SkI2cWait(
 SK_AC	*pAC,	/* Adapter Context */
 SK_IOC	IoC,	/* I/O Context */
 int		Event)	/* complete event to wait for (I2C_READ or I2C_WRITE) */
@@ -481,7 +481,7 @@
  * returns	0:	success
  *			1:	error
  */
-int SkI2cWrite(
+static int SkI2cWrite(
 SK_AC	*pAC,		/* Adapter Context */
 SK_IOC	IoC,		/* I/O Context */
 SK_U32	I2cData,	/* I2C Data to write */
@@ -538,7 +538,7 @@
  *		1 if the read is completed
  *		0 if the read must be continued (I2C Bus still allocated)
  */
-int	SkI2cReadSensor(
+static int	SkI2cReadSensor(
 SK_AC		*pAC,	/* Adapter Context */
 SK_IOC		IoC,	/* I/O Context */
 SK_SENSOR	*pSen)	/* Sensor to be read */
diff -urN linux-2.6.15/drivers/net/sk98lin/sklm80.c linux-2.6.15-no1/drivers/net/sk98lin/sklm80.c
--- linux-2.6.15/drivers/net/sk98lin/sklm80.c	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/drivers/net/sk98lin/sklm80.c	2006-01-26 15:41:30.431261512 +0000
@@ -34,79 +34,7 @@
 #include "h/lm80.h"
 #include "h/skdrv2nd.h"		/* Adapter Control- and Driver specific Def. */
 
-#ifdef	SK_DIAG
-#define	BREAK_OR_WAIT(pAC,IoC,Event)	SkI2cWait(pAC,IoC,Event)
-#else	/* nSK_DIAG */
 #define	BREAK_OR_WAIT(pAC,IoC,Event)	break
-#endif	/* nSK_DIAG */
-
-#ifdef	SK_DIAG
-/*
- * read the register 'Reg' from the device 'Dev'
- *
- * return 	read error	-1
- *		success		the read value
- */
-int	SkLm80RcvReg(
-SK_IOC	IoC,		/* Adapter Context */
-int		Dev,		/* I2C device address */
-int		Reg)		/* register to read */
-{
-	int	Val = 0;
-	int	TempExt;
-
-	/* Signal device number */
-	if (SkI2cSndDev(IoC, Dev, I2C_WRITE)) {
-		return(-1);
-	}
-
-	if (SkI2cSndByte(IoC, Reg)) {
-		return(-1);
-	}
-
-	/* repeat start */
-	if (SkI2cSndDev(IoC, Dev, I2C_READ)) {
-		return(-1);
-	}
-
-	switch (Reg) {
-	case LM80_TEMP_IN:
-		Val = (int)SkI2cRcvByte(IoC, 1);
-
-		/* First: correct the value: it might be negative */
-		if ((Val & 0x80) != 0) {
-			/* Value is negative */
-			Val = Val - 256;
-		}
-		Val = Val * SK_LM80_TEMP_LSB;
-		SkI2cStop(IoC);
-		
-		TempExt = (int)SkLm80RcvReg(IoC, LM80_ADDR, LM80_TEMP_CTRL);
-		
-		if (Val > 0) {
-			Val += ((TempExt >> 7) * SK_LM80_TEMPEXT_LSB);
-		}
-		else {
-			Val -= ((TempExt >> 7) * SK_LM80_TEMPEXT_LSB);
-		}
-		return(Val);
-		break;
-	case LM80_VT0_IN:
-	case LM80_VT1_IN:
-	case LM80_VT2_IN:
-	case LM80_VT3_IN:
-		Val = (int)SkI2cRcvByte(IoC, 1) * SK_LM80_VT_LSB;
-		break;
-	
-	default:
-		Val = (int)SkI2cRcvByte(IoC, 1);
-		break;
-	}
-
-	SkI2cStop(IoC);
-	return(Val);
-}
-#endif	/* SK_DIAG */
 
 /*
  * read a sensors value (LM80 specific)
diff -urN linux-2.6.15/drivers/net/sk98lin/skrlmt.c linux-2.6.15-no1/drivers/net/sk98lin/skrlmt.c
--- linux-2.6.15/drivers/net/sk98lin/skrlmt.c	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/drivers/net/sk98lin/skrlmt.c	2006-01-26 15:41:30.432261360 +0000
@@ -282,7 +282,6 @@
 
 SK_MAC_ADDR	SkRlmtMcAddr =	{{0x01,  0x00,  0x5A,  0x52,  0x4C,  0x4D}};
 SK_MAC_ADDR	BridgeMcAddr =	{{0x01,  0x80,  0xC2,  0x00,  0x00,  0x00}};
-SK_MAC_ADDR	BcAddr = 		{{0xFF,  0xFF,  0xFF,  0xFF,  0xFF,  0xFF}};
 
 /* local variables ************************************************************/
 
diff -urN linux-2.6.15/drivers/net/sk98lin/skvpd.c linux-2.6.15-no1/drivers/net/sk98lin/skvpd.c
--- linux-2.6.15/drivers/net/sk98lin/skvpd.c	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/drivers/net/sk98lin/skvpd.c	2006-01-26 15:41:30.433261208 +0000
@@ -132,65 +132,6 @@
 
 #endif	/* SKDIAG */
 
-#if 0
-
-/*
-	Write the dword 'data' at address 'addr' into the VPD EEPROM, and
-	verify that the data is written.
-
- Needed Time:
-
-.				MIN		MAX
-. -------------------------------------------------------------------
-. write				1.8 ms		3.6 ms
-. internal write cyles		0.7 ms		7.0 ms
-. -------------------------------------------------------------------
-. over all program time	 	2.5 ms		10.6 ms
-. read				1.3 ms		2.6 ms
-. -------------------------------------------------------------------
-. over all 			3.8 ms		13.2 ms
-.
-
-
- Returns	0:	success
-			1:	error,	I2C transfer does not terminate
-			2:	error,	data verify error
-
- */
-static int VpdWriteDWord(
-SK_AC	*pAC,	/* pAC pointer */
-SK_IOC	IoC,	/* IO Context */
-int		addr,	/* VPD address */
-SK_U32	data)	/* VPD data to write */
-{
-	/* start VPD write */
-	/* Don't swap here, it's a data stream of bytes */
-	SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL,
-		("VPD write dword at addr 0x%x, data = 0x%x\n",addr,data));
-	VPD_OUT32(pAC, IoC, PCI_VPD_DAT_REG, (SK_U32)data);
-	/* But do it here */
-	addr |= VPD_WRITE;
-
-	VPD_OUT16(pAC, IoC, PCI_VPD_ADR_REG, (SK_U16)(addr | VPD_WRITE));
-
-	/* this may take up to 10,6 ms */
-	if (VpdWait(pAC, IoC, VPD_WRITE)) {
-		SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
-			("Write Timed Out\n"));
-		return(1);
-	};
-
-	/* verify data */
-	if (VpdReadDWord(pAC, IoC, addr) != data) {
-		SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR | SK_DBGCAT_FATAL,
-			("Data Verify Error\n"));
-		return(2);
-	}
-	return(0);
-}	/* VpdWriteDWord */
-
-#endif	/* 0 */
-
 /*
  *	Read one Stream of 'len' bytes of VPD data, starting at 'addr' from
  *	or to the I2C EEPROM.
@@ -728,7 +669,7 @@
  *		6:	fatal VPD error
  *
  */
-int	VpdSetupPara(
+static int	VpdSetupPara(
 SK_AC	*pAC,		/* common data base */
 const char	*key,	/* keyword to insert */
 const char	*buf,	/* buffer with the keyword value */
@@ -1148,50 +1089,3 @@
 	return(0);
 }
 
-
-
-/*
- *	Read the contents of the VPD EEPROM and copy it to the VPD buffer
- *	if not already done. If the keyword "VF" is not present it will be
- *	created and the error log message will be stored to this keyword.
- *	If "VF" is not present the error log message will be stored to the
- *	keyword "VL". "VL" will created or overwritten if "VF" is present.
- *	The VPD read/write area is saved to the VPD EEPROM.
- *
- * returns nothing, errors will be ignored.
- */
-void VpdErrLog(
-SK_AC	*pAC,	/* common data base */
-SK_IOC	IoC,	/* IO Context */
-char	*msg)	/* error log message */
-{
-	SK_VPD_PARA *v, vf;	/* VF */
-	int len;
-
-	SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_TX,
-		("VPD error log msg %s\n", msg));
-	if ((pAC->vpd.v.vpd_status & VPD_VALID) == 0) {
-		if (VpdInit(pAC, IoC) != 0) {
-			SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
-				("VPD init error\n"));
-			return;
-		}
-	}
-
-	len = strlen(msg);
-	if (len > VPD_MAX_LEN) {
-		/* cut it */
-		len = VPD_MAX_LEN;
-	}
-	if ((v = vpd_find_para(pAC, VPD_VF, &vf)) != NULL) {
-		SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_TX, ("overwrite VL\n"));
-		(void)VpdSetupPara(pAC, VPD_VL, msg, len, VPD_RW_KEY, OWR_KEY);
-	}
-	else {
-		SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_TX, ("write VF\n"));
-		(void)VpdSetupPara(pAC, VPD_VF, msg, len, VPD_RW_KEY, ADD_KEY);
-	}
-
-	(void)VpdUpdate(pAC, IoC);
-}
-
diff -urN linux-2.6.15/drivers/net/sk98lin/skxmac2.c linux-2.6.15-no1/drivers/net/sk98lin/skxmac2.c
--- linux-2.6.15/drivers/net/sk98lin/skxmac2.c	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/drivers/net/sk98lin/skxmac2.c	2006-01-26 15:41:30.435260904 +0000
@@ -41,13 +41,13 @@
 #endif
 
 #ifdef GENESIS
-BCOM_HACK BcomRegA1Hack[] = {
+static BCOM_HACK BcomRegA1Hack[] = {
  { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
  { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
  { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
  { 0, 0 }
 };
-BCOM_HACK BcomRegC0Hack[] = {
+static BCOM_HACK BcomRegC0Hack[] = {
  { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 }, { 0x17, 0x0013 },
  { 0x15, 0x0A04 }, { 0x18, 0x0420 },
  { 0, 0 }
@@ -790,7 +790,7 @@
  * Returns:
  *	nothing
  */
-void SkMacFlushRxFifo(
+static void SkMacFlushRxFifo(
 SK_AC	*pAC,	/* adapter context */
 SK_IOC	IoC,	/* IO context */
 int		Port)	/* Port Index (MAC_1 + n) */
@@ -1231,38 +1231,6 @@
 }	/* SkMacHardRst */
 
 
-/******************************************************************************
- *
- *	SkMacClearRst() - Clear the MAC reset
- *
- * Description:	calls a clear MAC reset routine dep. on board type
- *
- * Returns:
- *	nothing
- */
-void SkMacClearRst(
-SK_AC	*pAC,	/* adapter context */
-SK_IOC	IoC,	/* IO context */
-int		Port)	/* Port Index (MAC_1 + n) */
-{
-	
-#ifdef GENESIS
-	if (pAC->GIni.GIGenesis) {
-		
-		SkXmClearRst(pAC, IoC, Port);
-	}
-#endif /* GENESIS */
-	
-#ifdef YUKON
-	if (pAC->GIni.GIYukon) {
-		
-		SkGmClearRst(pAC, IoC, Port);
-	}
-#endif /* YUKON */
-
-}	/* SkMacClearRst */
-
-
 #ifdef GENESIS
 /******************************************************************************
  *
@@ -1713,7 +1681,7 @@
  * Returns:
  *	nothing
  */
-void SkXmInitDupMd(
+static void SkXmInitDupMd(
 SK_AC	*pAC,		/* adapter context */
 SK_IOC	IoC,		/* IO context */
 int		Port)		/* Port Index (MAC_1 + n) */
@@ -1761,7 +1729,7 @@
  * Returns:
  *	nothing
  */
-void SkXmInitPauseMd(
+static void SkXmInitPauseMd(
 SK_AC	*pAC,		/* adapter context */
 SK_IOC	IoC,		/* IO context */
 int		Port)		/* Port Index (MAC_1 + n) */
@@ -2076,283 +2044,7 @@
 }	/* SkXmInitPhyBcom */
 #endif /* GENESIS */
 
-
 #ifdef YUKON
-#ifndef SK_SLIM
-/******************************************************************************
- *
- *	SkGmEnterLowPowerMode()
- *
- * Description:	
- *	This function sets the Marvell Alaska PHY to the low power mode
- *	given by parameter mode.
- *	The following low power modes are available:
- *		
- *		- Coma Mode (Deep Sleep):
- *			Power consumption: ~15 - 30 mW
- *			The PHY cannot wake up on its own.
- *
- *		- IEEE 22.2.4.1.5 compatible power down mode
- *			Power consumption: ~240 mW
- *			The PHY cannot wake up on its own.
- *
- *		- energy detect mode
- *			Power consumption: ~160 mW
- *			The PHY can wake up on its own by detecting activity
- *			on the CAT 5 cable.
- *
- *		- energy detect plus mode
- *			Power consumption: ~150 mW
- *			The PHY can wake up on its own by detecting activity
- *			on the CAT 5 cable.
- *			Connected devices can be woken up by sending normal link
- *			pulses every one second.
- *
- * Note:
- *
- * Returns:
- *		0: ok
- *		1: error
- */
-int SkGmEnterLowPowerMode(
-SK_AC	*pAC,		/* adapter context */
-SK_IOC	IoC,		/* IO context */
-int		Port,		/* Port Index (e.g. MAC_1) */
-SK_U8	Mode)		/* low power mode */
-{
-	SK_U16	Word;
-	SK_U32	DWord;
-	SK_U8	LastMode;
-	int		Ret = 0;
-
-	if (pAC->GIni.GIYukonLite &&
-	    pAC->GIni.GIChipRev >= CHIP_REV_YU_LITE_A3) {
-
-		/* save current power mode */
-		LastMode = pAC->GIni.GP[Port].PPhyPowerState;
-		pAC->GIni.GP[Port].PPhyPowerState = Mode;
-
-		switch (Mode) {
-			/* coma mode (deep sleep) */
-			case PHY_PM_DEEP_SLEEP:
-				/* setup General Purpose Control Register */
-				GM_OUT16(IoC, 0, GM_GP_CTRL, GM_GPCR_FL_PASS |
-					GM_GPCR_SPEED_100 | GM_GPCR_AU_ALL_DIS);
-
-				/* apply COMA mode workaround */
-				SkGmPhyWrite(pAC, IoC, Port, 29, 0x001f);
-				SkGmPhyWrite(pAC, IoC, Port, 30, 0xfff3);
-
-				SK_IN32(IoC, PCI_C(PCI_OUR_REG_1), &DWord);
-
-				SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_ON);
-				
-				/* Set PHY to Coma Mode */
-				SK_OUT32(IoC, PCI_C(PCI_OUR_REG_1), DWord | PCI_PHY_COMA);
-				
-				SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
-
-			break;
-			
-			/* IEEE 22.2.4.1.5 compatible power down mode */
-			case PHY_PM_IEEE_POWER_DOWN:
-				/*
-				 * - disable MAC 125 MHz clock
-				 * - allow MAC power down
-				 */
-				SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_CTRL, &Word);
-				Word |= PHY_M_PC_DIS_125CLK;
-				Word &=	~PHY_M_PC_MAC_POW_UP;
-				SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, Word);
-
-				/*
-				 * register changes must be followed by a software
-				 * reset to take effect
-				 */
-				SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CTRL, &Word);
-				Word |= PHY_CT_RESET;
-				SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CTRL, Word);
-
-				/* switch IEEE compatible power down mode on */
-				SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CTRL, &Word);
-				Word |= PHY_CT_PDOWN;
-				SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CTRL, Word);
-			break;
-
-			/* energy detect and energy detect plus mode */
-			case PHY_PM_ENERGY_DETECT:
-			case PHY_PM_ENERGY_DETECT_PLUS:
-				/*
-				 * - disable MAC 125 MHz clock
-				 */
-				SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_CTRL, &Word);
-				Word |= PHY_M_PC_DIS_125CLK;
-				SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, Word);
-				
-				/* activate energy detect mode 1 */
-				SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_CTRL, &Word);
-
-				/* energy detect mode */
-				if (Mode == PHY_PM_ENERGY_DETECT) {
-					Word |= PHY_M_PC_EN_DET;
-				}
-				/* energy detect plus mode */
-				else {
-					Word |= PHY_M_PC_EN_DET_PLUS;
-				}
-
-				SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, Word);
-
-				/*
-				 * reinitialize the PHY to force a software reset
-				 * which is necessary after the register settings
-				 * for the energy detect modes.
-				 * Furthermore reinitialisation prevents that the
-				 * PHY is running out of a stable state.
-				 */
-				SkGmInitPhyMarv(pAC, IoC, Port, SK_FALSE);
-			break;
-
-			/* don't change current power mode */
-			default:
-				pAC->GIni.GP[Port].PPhyPowerState = LastMode;
-				Ret = 1;
-			break;
-		}
-	}
-	/* low power modes are not supported by this chip */
-	else {
-		Ret = 1;
-	}
-
-	return(Ret);
-
-}	/* SkGmEnterLowPowerMode */
-
-/******************************************************************************
- *
- *	SkGmLeaveLowPowerMode()
- *
- * Description:	
- *	Leave the current low power mode and switch to normal mode
- *
- * Note:
- *
- * Returns:
- *		0:	ok
- *		1:	error
- */
-int SkGmLeaveLowPowerMode(
-SK_AC	*pAC,		/* adapter context */
-SK_IOC	IoC,		/* IO context */
-int		Port)		/* Port Index (e.g. MAC_1) */
-{
-	SK_U32	DWord;
-	SK_U16	Word;
-	SK_U8	LastMode;
-	int		Ret = 0;
-
-	if (pAC->GIni.GIYukonLite &&
-		pAC->GIni.GIChipRev >= CHIP_REV_YU_LITE_A3) {
-
-		/* save current power mode */
-		LastMode = pAC->GIni.GP[Port].PPhyPowerState;
-		pAC->GIni.GP[Port].PPhyPowerState = PHY_PM_OPERATIONAL_MODE;
-
-		switch (LastMode) {
-			/* coma mode (deep sleep) */
-			case PHY_PM_DEEP_SLEEP:
-				SK_IN32(IoC, PCI_C(PCI_OUR_REG_1), &DWord);
-
-				SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_ON);
-				
-				/* Release PHY from Coma Mode */
-				SK_OUT32(IoC, PCI_C(PCI_OUR_REG_1), DWord & ~PCI_PHY_COMA);
-				
-				SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
-				
-				SK_IN32(IoC, B2_GP_IO, &DWord);
-
-				/* set to output */
-				DWord |= (GP_DIR_9 | GP_IO_9);
-
-				/* set PHY reset */
-				SK_OUT32(IoC, B2_GP_IO, DWord);
-
-				DWord &= ~GP_IO_9; /* clear PHY reset (active high) */
-
-				/* clear PHY reset */
-				SK_OUT32(IoC, B2_GP_IO, DWord);
-			break;
-			
-			/* IEEE 22.2.4.1.5 compatible power down mode */
-			case PHY_PM_IEEE_POWER_DOWN:
-				/*
-				 * - enable MAC 125 MHz clock
-				 * - set MAC power up
-				 */
-				SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_CTRL, &Word);
-				Word &= ~PHY_M_PC_DIS_125CLK;
-				Word |=	PHY_M_PC_MAC_POW_UP;
-				SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, Word);
-
-				/*
-				 * register changes must be followed by a software
-				 * reset to take effect
-				 */
-				SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CTRL, &Word);
-				Word |= PHY_CT_RESET;
-				SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CTRL, Word);
-
-				/* switch IEEE compatible power down mode off */
-				SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CTRL, &Word);
-				Word &= ~PHY_CT_PDOWN;
-				SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CTRL, Word);
-			break;
-
-			/* energy detect and energy detect plus mode */
-			case PHY_PM_ENERGY_DETECT:
-			case PHY_PM_ENERGY_DETECT_PLUS:
-				/*
-				 * - enable MAC 125 MHz clock
-				 */
-				SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_CTRL, &Word);
-				Word &= ~PHY_M_PC_DIS_125CLK;
-				SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, Word);
-				
-				/* disable energy detect mode */
-				SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_CTRL, &Word);
-				Word &= ~PHY_M_PC_EN_DET_MSK;
-				SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, Word);
-
-				/*
-				 * reinitialize the PHY to force a software reset
-				 * which is necessary after the register settings
-				 * for the energy detect modes.
-				 * Furthermore reinitialisation prevents that the
-				 * PHY is running out of a stable state.
-				 */
-				SkGmInitPhyMarv(pAC, IoC, Port, SK_FALSE);
-			break;
-
-			/* don't change current power mode */
-			default:
-				pAC->GIni.GP[Port].PPhyPowerState = LastMode;
-				Ret = 1;
-			break;
-		}
-	}
-	/* low power modes are not supported by this chip */
-	else {
-		Ret = 1;
-	}
-
-	return(Ret);
-
-}	/* SkGmLeaveLowPowerMode */
-#endif /* !SK_SLIM */
-
-
 /******************************************************************************
  *
  *	SkGmInitPhyMarv() - Initialize the Marvell Phy registers
@@ -3420,145 +3112,6 @@
 }	/* SkMacAutoNegDone */
 
 
-#ifdef GENESIS
-/******************************************************************************
- *
- *	SkXmSetRxTxEn() - Special Set Rx/Tx Enable and some features in XMAC
- *
- * Description:
- *  sets MAC or PHY LoopBack and Duplex Mode in the MMU Command Reg.
- *  enables Rx/Tx
- *
- * Returns: N/A
- */
-static void SkXmSetRxTxEn(
-SK_AC	*pAC,		/* Adapter Context */
-SK_IOC	IoC,		/* IO context */
-int		Port,		/* Port Index (MAC_1 + n) */
-int		Para)		/* Parameter to set: MAC or PHY LoopBack, Duplex Mode */
-{
-	SK_U16	Word;
-
-	XM_IN16(IoC, Port, XM_MMU_CMD, &Word);
-
-	switch (Para & (SK_MAC_LOOPB_ON | SK_MAC_LOOPB_OFF)) {
-	case SK_MAC_LOOPB_ON:
-		Word |= XM_MMU_MAC_LB;
-		break;
-	case SK_MAC_LOOPB_OFF:
-		Word &= ~XM_MMU_MAC_LB;
-		break;
-	}
-
-	switch (Para & (SK_PHY_LOOPB_ON | SK_PHY_LOOPB_OFF)) {
-	case SK_PHY_LOOPB_ON:
-		Word |= XM_MMU_GMII_LOOP;
-		break;
-	case SK_PHY_LOOPB_OFF:
-		Word &= ~XM_MMU_GMII_LOOP;
-		break;
-	}
-	
-	switch (Para & (SK_PHY_FULLD_ON | SK_PHY_FULLD_OFF)) {
-	case SK_PHY_FULLD_ON:
-		Word |= XM_MMU_GMII_FD;
-		break;
-	case SK_PHY_FULLD_OFF:
-		Word &= ~XM_MMU_GMII_FD;
-		break;
-	}
-	
-	XM_OUT16(IoC, Port, XM_MMU_CMD, Word | XM_MMU_ENA_RX | XM_MMU_ENA_TX);
-
-	/* dummy read to ensure writing */
-	XM_IN16(IoC, Port, XM_MMU_CMD, &Word);
-
-}	/* SkXmSetRxTxEn */
-#endif /* GENESIS */
-
-
-#ifdef YUKON
-/******************************************************************************
- *
- *	SkGmSetRxTxEn() - Special Set Rx/Tx Enable and some features in GMAC
- *
- * Description:
- *  sets MAC LoopBack and Duplex Mode in the General Purpose Control Reg.
- *  enables Rx/Tx
- *
- * Returns: N/A
- */
-static void SkGmSetRxTxEn(
-SK_AC	*pAC,		/* Adapter Context */
-SK_IOC	IoC,		/* IO context */
-int		Port,		/* Port Index (MAC_1 + n) */
-int		Para)		/* Parameter to set: MAC LoopBack, Duplex Mode */
-{
-	SK_U16	Ctrl;
-	
-	GM_IN16(IoC, Port, GM_GP_CTRL, &Ctrl);
-
-	switch (Para & (SK_MAC_LOOPB_ON | SK_MAC_LOOPB_OFF)) {
-	case SK_MAC_LOOPB_ON:
-		Ctrl |= GM_GPCR_LOOP_ENA;
-		break;
-	case SK_MAC_LOOPB_OFF:
-		Ctrl &= ~GM_GPCR_LOOP_ENA;
-		break;
-	}
-
-	switch (Para & (SK_PHY_FULLD_ON | SK_PHY_FULLD_OFF)) {
-	case SK_PHY_FULLD_ON:
-		Ctrl |= GM_GPCR_DUP_FULL;
-		break;
-	case SK_PHY_FULLD_OFF:
-		Ctrl &= ~GM_GPCR_DUP_FULL;
-		break;
-	}
-	
-    GM_OUT16(IoC, Port, GM_GP_CTRL, (SK_U16)(Ctrl | GM_GPCR_RX_ENA |
-		GM_GPCR_TX_ENA));
-
-	/* dummy read to ensure writing */
-	GM_IN16(IoC, Port, GM_GP_CTRL, &Ctrl);
-
-}	/* SkGmSetRxTxEn */
-#endif /* YUKON */
-
-
-#ifndef SK_SLIM
-/******************************************************************************
- *
- *	SkMacSetRxTxEn() - Special Set Rx/Tx Enable and parameters
- *
- * Description:	calls the Special Set Rx/Tx Enable routines dep. on board type
- *
- * Returns: N/A
- */
-void SkMacSetRxTxEn(
-SK_AC	*pAC,		/* Adapter Context */
-SK_IOC	IoC,		/* IO context */
-int		Port,		/* Port Index (MAC_1 + n) */
-int		Para)
-{
-#ifdef GENESIS
-	if (pAC->GIni.GIGenesis) {
-		
-		SkXmSetRxTxEn(pAC, IoC, Port, Para);
-	}
-#endif /* GENESIS */
-	
-#ifdef YUKON
-	if (pAC->GIni.GIYukon) {
-		
-		SkGmSetRxTxEn(pAC, IoC, Port, Para);
-	}
-#endif /* YUKON */
-
-}	/* SkMacSetRxTxEn */
-#endif /* !SK_SLIM */
-
-
 /******************************************************************************
  *
  *	SkMacRxTxEnable() - Enable Rx/Tx activity if port is up
@@ -3976,7 +3529,7 @@
  * Returns:
  *	nothing
  */
-void SkXmIrq(
+static void SkXmIrq(
 SK_AC	*pAC,		/* adapter context */
 SK_IOC	IoC,		/* IO context */
 int		Port)		/* Port Index (MAC_1 + n) */
@@ -4112,7 +3665,7 @@
  * Returns:
  *	nothing
  */
-void SkGmIrq(
+static void SkGmIrq(
 SK_AC	*pAC,		/* adapter context */
 SK_IOC	IoC,		/* IO context */
 int		Port)		/* Port Index (MAC_1 + n) */
diff -urN linux-2.6.15/drivers/net/tulip/media.c linux-2.6.15-no1/drivers/net/tulip/media.c
--- linux-2.6.15/drivers/net/tulip/media.c	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/drivers/net/tulip/media.c	2006-01-26 15:41:43.439283992 +0000
@@ -44,8 +44,10 @@
 
 /* MII transceiver control section.
    Read and write the MII registers using software-generated serial
-   MDIO protocol.  See the MII specifications or DP83840A data sheet
-   for details. */
+   MDIO protocol.
+   See IEEE 802.3-2002.pdf (Section 2, Chapter "22.2.4 Management functions")
+   or DP83840A data sheet for more details.
+   */
 
 int tulip_mdio_read(struct net_device *dev, int phy_id, int location)
 {
@@ -272,13 +274,29 @@
 				int reset_length = p[2 + init_length];
 				misc_info = (u16*)(reset_sequence + reset_length);
 				if (startup) {
+					int timeout = 10;	/* max 1 ms */
 					iowrite32(mtable->csr12dir | 0x100, ioaddr + CSR12);
 					for (i = 0; i < reset_length; i++)
 						iowrite32(reset_sequence[i], ioaddr + CSR12);
+
+					/* flush posted writes */
+					ioread32(ioaddr + CSR12);
+
+					/* Sect 3.10.3 in DP83840A.pdf (p39) */
+					udelay(500);
+
+					/* Section 4.2 in DP83840A.pdf (p43) */
+					/* and IEEE 802.3 "22.2.4.1.1 Reset" */
+					while (timeout-- &&
+						(tulip_mdio_read (dev, phy_num, MII_BMCR) & BMCR_RESET))
+						udelay(100);
 				}
 				for (i = 0; i < init_length; i++)
 					iowrite32(init_sequence[i], ioaddr + CSR12);
+
+				ioread32(ioaddr + CSR12);	/* flush posted writes */
 			}
+
 			tmp_info = get_u16(&misc_info[1]);
 			if (tmp_info)
 				tp->advertising[phy_num] = tmp_info | 1;
diff -urN linux-2.6.15/drivers/net/tulip/tulip.h linux-2.6.15-no1/drivers/net/tulip/tulip.h
--- linux-2.6.15/drivers/net/tulip/tulip.h	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/drivers/net/tulip/tulip.h	2006-01-26 15:41:43.441283688 +0000
@@ -474,8 +474,11 @@
 			udelay(10);
 
 		if (!i)
-			printk(KERN_DEBUG "%s: tulip_stop_rxtx() failed\n",
-					pci_name(tp->pdev));
+			printk(KERN_DEBUG "%s: tulip_stop_rxtx() failed"
+					" (CSR5 0x%x CSR6 0x%x)\n",
+					pci_name(tp->pdev),
+					ioread32(ioaddr + CSR5),
+					ioread32(ioaddr + CSR6));
 	}
 }
 
diff -urN linux-2.6.15/drivers/net/tulip/tulip_core.c linux-2.6.15-no1/drivers/net/tulip/tulip_core.c
--- linux-2.6.15/drivers/net/tulip/tulip_core.c	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/drivers/net/tulip/tulip_core.c	2006-01-26 15:41:43.440283840 +0000
@@ -22,7 +22,7 @@
 #else
 #define DRV_VERSION	"1.1.13"
 #endif
-#define DRV_RELDATE	"May 11, 2002"
+#define DRV_RELDATE	"December 15, 2004"
 
 
 #include <linux/module.h>
diff -urN linux-2.6.15/drivers/net/wireless/ipw2200.c linux-2.6.15-no1/drivers/net/wireless/ipw2200.c
--- linux-2.6.15/drivers/net/wireless/ipw2200.c	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/drivers/net/wireless/ipw2200.c	2006-01-26 15:41:13.990760848 +0000
@@ -1870,7 +1870,8 @@
 }
 
 #define HOST_COMPLETE_TIMEOUT HZ
-static int ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
+
+static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
 {
 	int rc = 0;
 	unsigned long flags;
@@ -1899,7 +1900,7 @@
 		     priv->status);
 	printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
 
-	rc = ipw_queue_tx_hcmd(priv, cmd->cmd, &cmd->param, cmd->len, 0);
+	rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
 	if (rc) {
 		priv->status &= ~STATUS_HCMD_ACTIVE;
 		IPW_ERROR("Failed to send %s: Reason %d\n",
@@ -1934,7 +1935,7 @@
 		goto exit;
 	}
 
-      exit:
+exit:
 	if (priv->cmdlog) {
 		priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
 		priv->cmdlog_pos %= priv->cmdlog_len;
@@ -1942,61 +1943,62 @@
 	return rc;
 }
 
-static int ipw_send_host_complete(struct ipw_priv *priv)
+static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
+{
+	struct host_cmd cmd = {
+		.cmd = command,
+	};
+
+	return __ipw_send_cmd(priv, &cmd);
+}
+
+static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
+			    void *data)
 {
 	struct host_cmd cmd = {
-		.cmd = IPW_CMD_HOST_COMPLETE,
-		.len = 0
+		.cmd = command,
+		.len = len,
+		.param = data,
 	};
 
+	return __ipw_send_cmd(priv, &cmd);
+}
+
+static int ipw_send_host_complete(struct ipw_priv *priv)
+{
 	if (!priv) {
 		IPW_ERROR("Invalid args\n");
 		return -1;
 	}
 
-	return ipw_send_cmd(priv, &cmd);
+	return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
 }
 
 static int ipw_send_system_config(struct ipw_priv *priv,
 				  struct ipw_sys_config *config)
 {
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_SYSTEM_CONFIG,
-		.len = sizeof(*config)
-	};
-
 	if (!priv || !config) {
 		IPW_ERROR("Invalid args\n");
 		return -1;
 	}
 
-	memcpy(cmd.param, config, sizeof(*config));
-	return ipw_send_cmd(priv, &cmd);
+	return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG, sizeof(*config),
+					config);
 }
 
 static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
 {
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_SSID,
-		.len = min(len, IW_ESSID_MAX_SIZE)
-	};
-
 	if (!priv || !ssid) {
 		IPW_ERROR("Invalid args\n");
 		return -1;
 	}
 
-	memcpy(cmd.param, ssid, cmd.len);
-	return ipw_send_cmd(priv, &cmd);
+	return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
+					ssid);
 }
 
 static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
 {
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_ADAPTER_ADDRESS,
-		.len = ETH_ALEN
-	};
-
 	if (!priv || !mac) {
 		IPW_ERROR("Invalid args\n");
 		return -1;
@@ -2005,8 +2007,8 @@
 	IPW_DEBUG_INFO("%s: Setting MAC to " MAC_FMT "\n",
 		       priv->net_dev->name, MAC_ARG(mac));
 
-	memcpy(cmd.param, mac, ETH_ALEN);
-	return ipw_send_cmd(priv, &cmd);
+	return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN,
+					mac);
 }
 
 /*
@@ -2065,51 +2067,40 @@
 static int ipw_send_scan_request_ext(struct ipw_priv *priv,
 				     struct ipw_scan_request_ext *request)
 {
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_SCAN_REQUEST_EXT,
-		.len = sizeof(*request)
-	};
-
-	memcpy(cmd.param, request, sizeof(*request));
-	return ipw_send_cmd(priv, &cmd);
+	return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
+					sizeof(*request), request);
 }
 
 static int ipw_send_scan_abort(struct ipw_priv *priv)
 {
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_SCAN_ABORT,
-		.len = 0
-	};
-
 	if (!priv) {
 		IPW_ERROR("Invalid args\n");
 		return -1;
 	}
 
-	return ipw_send_cmd(priv, &cmd);
+	return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
 }
 
 static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
 {
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_SENSITIVITY_CALIB,
-		.len = sizeof(struct ipw_sensitivity_calib)
+	struct ipw_sensitivity_calib calib = {
+		.beacon_rssi_raw = sens,
 	};
-	struct ipw_sensitivity_calib *calib = (struct ipw_sensitivity_calib *)
-	    &cmd.param;
-	calib->beacon_rssi_raw = sens;
-	return ipw_send_cmd(priv, &cmd);
+
+	return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
+					&calib);
 }
 
 static int ipw_send_associate(struct ipw_priv *priv,
 			      struct ipw_associate *associate)
 {
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_ASSOCIATE,
-		.len = sizeof(*associate)
-	};
-
 	struct ipw_associate tmp_associate;
+
+	if (!priv || !associate) {
+		IPW_ERROR("Invalid args\n");
+		return -1;
+	}
+
 	memcpy(&tmp_associate, associate, sizeof(*associate));
 	tmp_associate.policy_support =
 	    cpu_to_le16(tmp_associate.policy_support);
@@ -2122,80 +2113,56 @@
 	    cpu_to_le16(tmp_associate.beacon_interval);
 	tmp_associate.atim_window = cpu_to_le16(tmp_associate.atim_window);
 
-	if (!priv || !associate) {
-		IPW_ERROR("Invalid args\n");
-		return -1;
-	}
-
-	memcpy(cmd.param, &tmp_associate, sizeof(*associate));
-	return ipw_send_cmd(priv, &cmd);
+	return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(tmp_associate),
+					&tmp_associate);
 }
 
 static int ipw_send_supported_rates(struct ipw_priv *priv,
 				    struct ipw_supported_rates *rates)
 {
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_SUPPORTED_RATES,
-		.len = sizeof(*rates)
-	};
-
 	if (!priv || !rates) {
 		IPW_ERROR("Invalid args\n");
 		return -1;
 	}
 
-	memcpy(cmd.param, rates, sizeof(*rates));
-	return ipw_send_cmd(priv, &cmd);
+	return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
+					rates);
 }
 
 static int ipw_set_random_seed(struct ipw_priv *priv)
 {
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_SEED_NUMBER,
-		.len = sizeof(u32)
-	};
+	u32 val;
 
 	if (!priv) {
 		IPW_ERROR("Invalid args\n");
 		return -1;
 	}
 
-	get_random_bytes(&cmd.param, sizeof(u32));
+	get_random_bytes(&val, sizeof(val));
 
-	return ipw_send_cmd(priv, &cmd);
+	return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
 }
 
 static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
 {
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_CARD_DISABLE,
-		.len = sizeof(u32)
-	};
-
 	if (!priv) {
 		IPW_ERROR("Invalid args\n");
 		return -1;
 	}
 
-	*((u32 *) & cmd.param) = phy_off;
-
-	return ipw_send_cmd(priv, &cmd);
+	return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(phy_off),
+					&phy_off);
 }
 
 static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
 {
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_TX_POWER,
-		.len = sizeof(*power)
-	};
-
 	if (!priv || !power) {
 		IPW_ERROR("Invalid args\n");
 		return -1;
 	}
 
-	memcpy(cmd.param, power, sizeof(*power));
-	return ipw_send_cmd(priv, &cmd);
+	return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power),
+					power);
 }
 
 static int ipw_set_tx_power(struct ipw_priv *priv)
@@ -2247,18 +2214,14 @@
 	struct ipw_rts_threshold rts_threshold = {
 		.rts_threshold = rts,
 	};
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_RTS_THRESHOLD,
-		.len = sizeof(rts_threshold)
-	};
 
 	if (!priv) {
 		IPW_ERROR("Invalid args\n");
 		return -1;
 	}
 
-	memcpy(cmd.param, &rts_threshold, sizeof(rts_threshold));
-	return ipw_send_cmd(priv, &cmd);
+	return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
+				sizeof(rts_threshold), &rts_threshold);
 }
 
 static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
@@ -2266,27 +2229,19 @@
 	struct ipw_frag_threshold frag_threshold = {
 		.frag_threshold = frag,
 	};
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_FRAG_THRESHOLD,
-		.len = sizeof(frag_threshold)
-	};
 
 	if (!priv) {
 		IPW_ERROR("Invalid args\n");
 		return -1;
 	}
 
-	memcpy(cmd.param, &frag_threshold, sizeof(frag_threshold));
-	return ipw_send_cmd(priv, &cmd);
+	return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
+				sizeof(frag_threshold), &frag_threshold);
 }
 
 static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
 {
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_POWER_MODE,
-		.len = sizeof(u32)
-	};
-	u32 *param = (u32 *) (&cmd.param);
+	u32 param;
 
 	if (!priv) {
 		IPW_ERROR("Invalid args\n");
@@ -2297,17 +2252,18 @@
 	 * level */
 	switch (mode) {
 	case IPW_POWER_BATTERY:
-		*param = IPW_POWER_INDEX_3;
+		param = IPW_POWER_INDEX_3;
 		break;
 	case IPW_POWER_AC:
-		*param = IPW_POWER_MODE_CAM;
+		param = IPW_POWER_MODE_CAM;
 		break;
 	default:
-		*param = mode;
+		param = mode;
 		break;
 	}
 
-	return ipw_send_cmd(priv, &cmd);
+	return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
+					&param);
 }
 
 static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
@@ -2316,18 +2272,14 @@
 		.short_retry_limit = slimit,
 		.long_retry_limit = llimit
 	};
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_RETRY_LIMIT,
-		.len = sizeof(retry_limit)
-	};
 
 	if (!priv) {
 		IPW_ERROR("Invalid args\n");
 		return -1;
 	}
 
-	memcpy(cmd.param, &retry_limit, sizeof(retry_limit));
-	return ipw_send_cmd(priv, &cmd);
+	return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
+					&retry_limit);
 }
 
 /*
@@ -5672,54 +5624,44 @@
 
 static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
 {
-	struct ipw_tgi_tx_key *key;
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_TGI_TX_KEY,
-		.len = sizeof(*key)
-	};
+	struct ipw_tgi_tx_key key;
 
 	if (!(priv->ieee->sec.flags & (1 << index)))
 		return;
 
-	key = (struct ipw_tgi_tx_key *)&cmd.param;
-	key->key_id = index;
-	memcpy(key->key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
-	key->security_type = type;
-	key->station_index = 0;	/* always 0 for BSS */
-	key->flags = 0;
+	key.key_id = index;
+	memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
+	key.security_type = type;
+	key.station_index = 0;	/* always 0 for BSS */
+	key.flags = 0;
 	/* 0 for new key; previous value of counter (after fatal error) */
-	key->tx_counter[0] = 0;
-	key->tx_counter[1] = 0;
+	key.tx_counter[0] = 0;
+	key.tx_counter[1] = 0;
 
-	ipw_send_cmd(priv, &cmd);
+	ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
 }
 
 static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
 {
-	struct ipw_wep_key *key;
+	struct ipw_wep_key key;
 	int i;
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_WEP_KEY,
-		.len = sizeof(*key)
-	};
 
-	key = (struct ipw_wep_key *)&cmd.param;
-	key->cmd_id = DINO_CMD_WEP_KEY;
-	key->seq_num = 0;
+	key.cmd_id = DINO_CMD_WEP_KEY;
+	key.seq_num = 0;
 
 	/* Note: AES keys cannot be set for multiple times.
 	 * Only set it at the first time. */
 	for (i = 0; i < 4; i++) {
-		key->key_index = i | type;
+		key.key_index = i | type;
 		if (!(priv->ieee->sec.flags & (1 << i))) {
-			key->key_size = 0;
+			key.key_size = 0;
 			continue;
 		}
 
-		key->key_size = priv->ieee->sec.key_sizes[i];
-		memcpy(key->key, priv->ieee->sec.keys[i], key->key_size);
+		key.key_size = priv->ieee->sec.key_sizes[i];
+		memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
 
-		ipw_send_cmd(priv, &cmd);
+		ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
 	}
 }
 
@@ -6216,15 +6158,10 @@
 static int ipw_set_rsn_capa(struct ipw_priv *priv,
 			    char *capabilities, int length)
 {
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_RSN_CAPABILITIES,
-		.len = length,
-	};
-
 	IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
 
-	memcpy(cmd.param, capabilities, length);
-	return ipw_send_cmd(priv, &cmd);
+	return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
+					capabilities);
 }
 
 /*
@@ -7011,25 +6948,15 @@
 static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
 				       *qos_param)
 {
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_QOS_PARAMETERS,
-		.len = (sizeof(struct ieee80211_qos_parameters) * 3)
-	};
-
-	memcpy(cmd.param, qos_param, sizeof(*qos_param) * 3);
-	return ipw_send_cmd(priv, &cmd);
+	return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS, qos_param,
+					sizeof(*qos_param) * 3);
 }
 
 static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
 				     *qos_param)
 {
-	struct host_cmd cmd = {
-		.cmd = IPW_CMD_WME_INFO,
-		.len = sizeof(*qos_param)
-	};
-
-	memcpy(cmd.param, qos_param, sizeof(*qos_param));
-	return ipw_send_cmd(priv, &cmd);
+	return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, qos_param,
+					sizeof(*qos_param));
 }
 
 #endif				/* CONFIG_IPW_QOS */
@@ -9649,11 +9576,6 @@
 	u16 remaining_bytes;
 	int fc;
 
-	/* If there isn't room in the queue, we return busy and let the
-	 * network stack requeue the packet for us */
-	if (ipw_queue_space(q) < q->high_mark)
-		return NETDEV_TX_BUSY;
-
 	switch (priv->ieee->iw_mode) {
 	case IW_MODE_ADHOC:
 		hdr_len = IEEE80211_3ADDR_LEN;
@@ -9871,7 +9793,7 @@
 
       fail_unlock:
 	spin_unlock_irqrestore(&priv->lock, flags);
-	return 1;
+	return -1;
 }
 
 static struct net_device_stats *ipw_net_get_stats(struct net_device *dev)
diff -urN linux-2.6.15/drivers/net/wireless/ipw2200.h linux-2.6.15-no1/drivers/net/wireless/ipw2200.h
--- linux-2.6.15/drivers/net/wireless/ipw2200.h	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/drivers/net/wireless/ipw2200.h	2006-01-26 15:41:13.992760544 +0000
@@ -1860,7 +1860,7 @@
 	u8 cmd;
 	u8 len;
 	u16 reserved;
-	u32 param[TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH];
+	u32 *param;
 } __attribute__ ((packed));
 
 struct ipw_cmd_log {
diff -urN linux-2.6.15/drivers/pci/quirks.c linux-2.6.15-no1/drivers/pci/quirks.c
--- linux-2.6.15/drivers/pci/quirks.c	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/drivers/pci/quirks.c	2006-01-26 15:41:13.993760392 +0000
@@ -1125,6 +1125,9 @@
 	case 0x27c4:
 		ich = 7;
 		break;
+	case 0x2828:	/* ICH8M */
+		ich = 8;
+		break;
 	default:
 		/* we do not handle this PCI device */
 		return;
@@ -1144,7 +1147,7 @@
 		else
 			return;			/* not in combined mode */
 	} else {
-		WARN_ON((ich != 6) && (ich != 7));
+		WARN_ON((ich != 6) && (ich != 7) && (ich != 8));
 		tmp &= 0x3;  /* interesting bits 1:0 */
 		if (tmp & (1 << 0))
 			comb = (1 << 2);	/* PATA port 0, SATA port 1 */
diff -urN linux-2.6.15/drivers/pci/quirks.c.orig linux-2.6.15-no1/drivers/pci/quirks.c.orig
--- linux-2.6.15/drivers/pci/quirks.c.orig	1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6.15-no1/drivers/pci/quirks.c.orig	2006-01-03 03:21:10.000000000 +0000
@@ -0,0 +1,1318 @@
+/*
+ *  This file contains work-arounds for many known PCI hardware
+ *  bugs.  Devices present only on certain architectures (host
+ *  bridges et cetera) should be handled in arch-specific code.
+ *
+ *  Note: any quirks for hotpluggable devices must _NOT_ be declared __init.
+ *
+ *  Copyright (c) 1999 Martin Mares <mj@ucw.cz>
+ *
+ *  Init/reset quirks for USB host controllers should be in the
+ *  USB quirks file, where their drivers can access reuse it.
+ *
+ *  The bridge optimization stuff has been removed. If you really
+ *  have a silly BIOS which is unable to set your host bridge right,
+ *  use the PowerTweak utility (see http://powertweak.sourceforge.net).
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/acpi.h>
+#include "pci.h"
+
+/* Deal with broken BIOS'es that neglect to enable passive release,
+   which can cause problems in combination with the 82441FX/PPro MTRRs */
+static void __devinit quirk_passive_release(struct pci_dev *dev)
+{
+	struct pci_dev *d = NULL;
+	unsigned char dlc;
+
+	/* We have to make sure a particular bit is set in the PIIX3
+	   ISA bridge, so we have to go out and find it. */
+	while ((d = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, d))) {
+		pci_read_config_byte(d, 0x82, &dlc);
+		if (!(dlc & 1<<1)) {
+			printk(KERN_ERR "PCI: PIIX3: Enabling Passive Release on %s\n", pci_name(d));
+			dlc |= 1<<1;
+			pci_write_config_byte(d, 0x82, dlc);
+		}
+	}
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82441,	quirk_passive_release );
+
+/*  The VIA VP2/VP3/MVP3 seem to have some 'features'. There may be a workaround
+    but VIA don't answer queries. If you happen to have good contacts at VIA
+    ask them for me please -- Alan 
+    
+    This appears to be BIOS not version dependent. So presumably there is a 
+    chipset level fix */
+int isa_dma_bridge_buggy;		/* Exported */
+    
+static void __devinit quirk_isa_dma_hangs(struct pci_dev *dev)
+{
+	if (!isa_dma_bridge_buggy) {
+		isa_dma_bridge_buggy=1;
+		printk(KERN_INFO "Activating ISA DMA hang workarounds.\n");
+	}
+}
+	/*
+	 * Its not totally clear which chipsets are the problematic ones
+	 * We know 82C586 and 82C596 variants are affected.
+	 */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C586_0,	quirk_isa_dma_hangs );
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C596,	quirk_isa_dma_hangs );
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82371SB_0,  quirk_isa_dma_hangs );
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL,	PCI_DEVICE_ID_AL_M1533, 	quirk_isa_dma_hangs );
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC,	PCI_DEVICE_ID_NEC_CBUS_1,	quirk_isa_dma_hangs );
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC,	PCI_DEVICE_ID_NEC_CBUS_2,	quirk_isa_dma_hangs );
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC,	PCI_DEVICE_ID_NEC_CBUS_3,	quirk_isa_dma_hangs );
+
+int pci_pci_problems;
+
+/*
+ *	Chipsets where PCI->PCI transfers vanish or hang
+ */
+static void __devinit quirk_nopcipci(struct pci_dev *dev)
+{
+	if ((pci_pci_problems & PCIPCI_FAIL)==0) {
+		printk(KERN_INFO "Disabling direct PCI/PCI transfers.\n");
+		pci_pci_problems |= PCIPCI_FAIL;
+	}
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_5597,		quirk_nopcipci );
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_496,		quirk_nopcipci );
+
+/*
+ *	Triton requires workarounds to be used by the drivers
+ */
+static void __devinit quirk_triton(struct pci_dev *dev)
+{
+	if ((pci_pci_problems&PCIPCI_TRITON)==0) {
+		printk(KERN_INFO "Limiting direct PCI/PCI transfers.\n");
+		pci_pci_problems |= PCIPCI_TRITON;
+	}
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 	PCI_DEVICE_ID_INTEL_82437, 	quirk_triton ); 
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 	PCI_DEVICE_ID_INTEL_82437VX, 	quirk_triton ); 
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 	PCI_DEVICE_ID_INTEL_82439, 	quirk_triton ); 
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 	PCI_DEVICE_ID_INTEL_82439TX, 	quirk_triton ); 
+
+/*
+ *	VIA Apollo KT133 needs PCI latency patch
+ *	Made according to a windows driver based patch by George E. Breese
+ *	see PCI Latency Adjust on http://www.viahardware.com/download/viatweak.shtm
+ *      Also see http://www.au-ja.org/review-kt133a-1-en.phtml for
+ *      the info on which Mr Breese based his work.
+ *
+ *	Updated based on further information from the site and also on
+ *	information provided by VIA 
+ */
+static void __devinit quirk_vialatency(struct pci_dev *dev)
+{
+	struct pci_dev *p;
+	u8 rev;
+	u8 busarb;
+	/* Ok we have a potential problem chipset here. Now see if we have
+	   a buggy southbridge */
+	   
+	p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, NULL);
+	if (p!=NULL) {
+		pci_read_config_byte(p, PCI_CLASS_REVISION, &rev);
+		/* 0x40 - 0x4f == 686B, 0x10 - 0x2f == 686A; thanks Dan Hollis */
+		/* Check for buggy part revisions */
+		if (rev < 0x40 || rev > 0x42)
+			goto exit;
+	} else {
+		p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, NULL);
+		if (p==NULL)	/* No problem parts */
+			goto exit;
+		pci_read_config_byte(p, PCI_CLASS_REVISION, &rev);
+		/* Check for buggy part revisions */
+		if (rev < 0x10 || rev > 0x12) 
+			goto exit;
+	}
+	
+	/*
+	 *	Ok we have the problem. Now set the PCI master grant to 
+	 *	occur every master grant. The apparent bug is that under high
+	 *	PCI load (quite common in Linux of course) you can get data
+	 *	loss when the CPU is held off the bus for 3 bus master requests
+	 *	This happens to include the IDE controllers....
+	 *
+	 *	VIA only apply this fix when an SB Live! is present but under
+	 *	both Linux and Windows this isnt enough, and we have seen
+	 *	corruption without SB Live! but with things like 3 UDMA IDE
+	 *	controllers. So we ignore that bit of the VIA recommendation..
+	 */
+
+	pci_read_config_byte(dev, 0x76, &busarb);
+	/* Set bit 4 and bi 5 of byte 76 to 0x01 
+	   "Master priority rotation on every PCI master grant */
+	busarb &= ~(1<<5);
+	busarb |= (1<<4);
+	pci_write_config_byte(dev, 0x76, busarb);
+	printk(KERN_INFO "Applying VIA southbridge workaround.\n");
+exit:
+	pci_dev_put(p);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8363_0,	quirk_vialatency );
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8371_1,	quirk_vialatency );
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8361,		quirk_vialatency );
+
+/*
+ *	VIA Apollo VP3 needs ETBF on BT848/878
+ */
+static void __devinit quirk_viaetbf(struct pci_dev *dev)
+{
+	if ((pci_pci_problems&PCIPCI_VIAETBF)==0) {
+		printk(KERN_INFO "Limiting direct PCI/PCI transfers.\n");
+		pci_pci_problems |= PCIPCI_VIAETBF;
+	}
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C597_0,	quirk_viaetbf );
+
+static void __devinit quirk_vsfx(struct pci_dev *dev)
+{
+	if ((pci_pci_problems&PCIPCI_VSFX)==0) {
+		printk(KERN_INFO "Limiting direct PCI/PCI transfers.\n");
+		pci_pci_problems |= PCIPCI_VSFX;
+	}
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C576,	quirk_vsfx );
+
+/*
+ *	Ali Magik requires workarounds to be used by the drivers
+ *	that DMA to AGP space. Latency must be set to 0xA and triton
+ *	workaround applied too
+ *	[Info kindly provided by ALi]
+ */	
+static void __init quirk_alimagik(struct pci_dev *dev)
+{
+	if ((pci_pci_problems&PCIPCI_ALIMAGIK)==0) {
+		printk(KERN_INFO "Limiting direct PCI/PCI transfers.\n");
+		pci_pci_problems |= PCIPCI_ALIMAGIK|PCIPCI_TRITON;
+	}
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, 	PCI_DEVICE_ID_AL_M1647, 	quirk_alimagik );
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, 	PCI_DEVICE_ID_AL_M1651, 	quirk_alimagik );
+
+/*
+ *	Natoma has some interesting boundary conditions with Zoran stuff
+ *	at least
+ */
+static void __devinit quirk_natoma(struct pci_dev *dev)
+{
+	if ((pci_pci_problems&PCIPCI_NATOMA)==0) {
+		printk(KERN_INFO "Limiting direct PCI/PCI transfers.\n");
+		pci_pci_problems |= PCIPCI_NATOMA;
+	}
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 	PCI_DEVICE_ID_INTEL_82441, 	quirk_natoma ); 
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 	PCI_DEVICE_ID_INTEL_82443LX_0, 	quirk_natoma ); 
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 	PCI_DEVICE_ID_INTEL_82443LX_1, 	quirk_natoma ); 
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 	PCI_DEVICE_ID_INTEL_82443BX_0, 	quirk_natoma ); 
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 	PCI_DEVICE_ID_INTEL_82443BX_1, 	quirk_natoma ); 
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 	PCI_DEVICE_ID_INTEL_82443BX_2, 	quirk_natoma );
+
+/*
+ *  This chip can cause PCI parity errors if config register 0xA0 is read
+ *  while DMAs are occurring.
+ */
+static void __devinit quirk_citrine(struct pci_dev *dev)
+{
+	dev->cfg_size = 0xA0;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM,	PCI_DEVICE_ID_IBM_CITRINE,	quirk_citrine );
+
+/*
+ *  S3 868 and 968 chips report region size equal to 32M, but they decode 64M.
+ *  If it's needed, re-allocate the region.
+ */
+static void __devinit quirk_s3_64M(struct pci_dev *dev)
+{
+	struct resource *r = &dev->resource[0];
+
+	if ((r->start & 0x3ffffff) || r->end != r->start + 0x3ffffff) {
+		r->start = 0;
+		r->end = 0x3ffffff;
+	}
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3,	PCI_DEVICE_ID_S3_868,		quirk_s3_64M );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3,	PCI_DEVICE_ID_S3_968,		quirk_s3_64M );
+
+static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region,
+	unsigned size, int nr, const char *name)
+{
+	region &= ~(size-1);
+	if (region) {
+		struct pci_bus_region bus_region;
+		struct resource *res = dev->resource + nr;
+
+		res->name = pci_name(dev);
+		res->start = region;
+		res->end = region + size - 1;
+		res->flags = IORESOURCE_IO;
+
+		/* Convert from PCI bus to resource space.  */
+		bus_region.start = res->start;
+		bus_region.end = res->end;
+		pcibios_bus_to_resource(dev, res, &bus_region);
+
+		pci_claim_resource(dev, nr);
+		printk("PCI quirk: region %04x-%04x claimed by %s\n", region, region + size - 1, name);
+	}
+}	
+
+/*
+ *	ATI Northbridge setups MCE the processor if you even
+ *	read somewhere between 0x3b0->0x3bb or read 0x3d3
+ */
+static void __devinit quirk_ati_exploding_mce(struct pci_dev *dev)
+{
+	printk(KERN_INFO "ATI Northbridge, reserving I/O ports 0x3b0 to 0x3bb.\n");
+	/* Mae rhaid i ni beidio ag edrych ar y lleoliadiau I/O hyn */
+	request_region(0x3b0, 0x0C, "RadeonIGP");
+	request_region(0x3d3, 0x01, "RadeonIGP");
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI,	PCI_DEVICE_ID_ATI_RS100,   quirk_ati_exploding_mce );
+
+/*
+ * Let's make the southbridge information explicit instead
+ * of having to worry about people probing the ACPI areas,
+ * for example.. (Yes, it happens, and if you read the wrong
+ * ACPI register it will put the machine to sleep with no
+ * way of waking it up again. Bummer).
+ *
+ * ALI M7101: Two IO regions pointed to by words at
+ *	0xE0 (64 bytes of ACPI registers)
+ *	0xE2 (32 bytes of SMB registers)
+ */
+static void __devinit quirk_ali7101_acpi(struct pci_dev *dev)
+{
+	u16 region;
+
+	pci_read_config_word(dev, 0xE0, &region);
+	quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES, "ali7101 ACPI");
+	pci_read_config_word(dev, 0xE2, &region);
+	quirk_io_region(dev, region, 32, PCI_BRIDGE_RESOURCES+1, "ali7101 SMB");
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL,	PCI_DEVICE_ID_AL_M7101,		quirk_ali7101_acpi );
+
+static void piix4_io_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
+{
+	u32 devres;
+	u32 mask, size, base;
+
+	pci_read_config_dword(dev, port, &devres);
+	if ((devres & enable) != enable)
+		return;
+	mask = (devres >> 16) & 15;
+	base = devres & 0xffff;
+	size = 16;
+	for (;;) {
+		unsigned bit = size >> 1;
+		if ((bit & mask) == bit)
+			break;
+		size = bit;
+	}
+	/*
+	 * For now we only print it out. Eventually we'll want to
+	 * reserve it (at least if it's in the 0x1000+ range), but
+	 * let's get enough confirmation reports first. 
+	 */
+	base &= -size;
+	printk("%s PIO at %04x-%04x\n", name, base, base + size - 1);
+}
+
+static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
+{
+	u32 devres;
+	u32 mask, size, base;
+
+	pci_read_config_dword(dev, port, &devres);
+	if ((devres & enable) != enable)
+		return;
+	base = devres & 0xffff0000;
+	mask = (devres & 0x3f) << 16;
+	size = 128 << 16;
+	for (;;) {
+		unsigned bit = size >> 1;
+		if ((bit & mask) == bit)
+			break;
+		size = bit;
+	}
+	/*
+	 * For now we only print it out. Eventually we'll want to
+	 * reserve it, but let's get enough confirmation reports first. 
+	 */
+	base &= -size;
+	printk("%s MMIO at %04x-%04x\n", name, base, base + size - 1);
+}
+
+/*
+ * PIIX4 ACPI: Two IO regions pointed to by longwords at
+ *	0x40 (64 bytes of ACPI registers)
+ *	0x90 (16 bytes of SMB registers)
+ * and a few strange programmable PIIX4 device resources.
+ */
+static void __devinit quirk_piix4_acpi(struct pci_dev *dev)
+{
+	u32 region, res_a;
+
+	pci_read_config_dword(dev, 0x40, &region);
+	quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES, "PIIX4 ACPI");
+	pci_read_config_dword(dev, 0x90, &region);
+	quirk_io_region(dev, region, 16, PCI_BRIDGE_RESOURCES+1, "PIIX4 SMB");
+
+	/* Device resource A has enables for some of the other ones */
+	pci_read_config_dword(dev, 0x5c, &res_a);
+
+	piix4_io_quirk(dev, "PIIX4 devres B", 0x60, 3 << 21);
+	piix4_io_quirk(dev, "PIIX4 devres C", 0x64, 3 << 21);
+
+	/* Device resource D is just bitfields for static resources */
+
+	/* Device 12 enabled? */
+	if (res_a & (1 << 29)) {
+		piix4_io_quirk(dev, "PIIX4 devres E", 0x68, 1 << 20);
+		piix4_mem_quirk(dev, "PIIX4 devres F", 0x6c, 1 << 7);
+	}
+	/* Device 13 enabled? */
+	if (res_a & (1 << 30)) {
+		piix4_io_quirk(dev, "PIIX4 devres G", 0x70, 1 << 20);
+		piix4_mem_quirk(dev, "PIIX4 devres H", 0x74, 1 << 7);
+	}
+	piix4_io_quirk(dev, "PIIX4 devres I", 0x78, 1 << 20);
+	piix4_io_quirk(dev, "PIIX4 devres J", 0x7c, 1 << 20);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82371AB_3,	quirk_piix4_acpi );
+
+/*
+ * ICH4, ICH4-M, ICH5, ICH5-M ACPI: Three IO regions pointed to by longwords at
+ *	0x40 (128 bytes of ACPI, GPIO & TCO registers)
+ *	0x58 (64 bytes of GPIO I/O space)
+ */
+static void __devinit quirk_ich4_lpc_acpi(struct pci_dev *dev)
+{
+	u32 region;
+
+	pci_read_config_dword(dev, 0x40, &region);
+	quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, "ICH4 ACPI/GPIO/TCO");
+
+	pci_read_config_dword(dev, 0x58, &region);
+	quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH4 GPIO");
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801AA_0,		quirk_ich4_lpc_acpi );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801AB_0,		quirk_ich4_lpc_acpi );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801BA_0,		quirk_ich4_lpc_acpi );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801BA_10,	quirk_ich4_lpc_acpi );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801CA_0,		quirk_ich4_lpc_acpi );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801CA_12,	quirk_ich4_lpc_acpi );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801DB_0,		quirk_ich4_lpc_acpi );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801DB_12,	quirk_ich4_lpc_acpi );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_82801EB_0,		quirk_ich4_lpc_acpi );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,    PCI_DEVICE_ID_INTEL_ESB_1,		quirk_ich4_lpc_acpi );
+
+static void __devinit quirk_ich6_lpc_acpi(struct pci_dev *dev)
+{
+	u32 region;
+
+	pci_read_config_dword(dev, 0x40, &region);
+	quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, "ICH6 ACPI/GPIO/TCO");
+
+	pci_read_config_dword(dev, 0x48, &region);
+	quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH6 GPIO");
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc_acpi );
+
+/*
+ * VIA ACPI: One IO region pointed to by longword at
+ *	0x48 or 0x20 (256 bytes of ACPI registers)
+ */
+static void __devinit quirk_vt82c586_acpi(struct pci_dev *dev)
+{
+	u8 rev;
+	u32 region;
+
+	pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
+	if (rev & 0x10) {
+		pci_read_config_dword(dev, 0x48, &region);
+		region &= PCI_BASE_ADDRESS_IO_MASK;
+		quirk_io_region(dev, region, 256, PCI_BRIDGE_RESOURCES, "vt82c586 ACPI");
+	}
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C586_3,	quirk_vt82c586_acpi );
+
+/*
+ * VIA VT82C686 ACPI: Three IO region pointed to by (long)words at
+ *	0x48 (256 bytes of ACPI registers)
+ *	0x70 (128 bytes of hardware monitoring register)
+ *	0x90 (16 bytes of SMB registers)
+ */
+static void __devinit quirk_vt82c686_acpi(struct pci_dev *dev)
+{
+	u16 hm;
+	u32 smb;
+
+	quirk_vt82c586_acpi(dev);
+
+	pci_read_config_word(dev, 0x70, &hm);
+	hm &= PCI_BASE_ADDRESS_IO_MASK;
+	quirk_io_region(dev, hm, 128, PCI_BRIDGE_RESOURCES + 1, "vt82c686 HW-mon");
+
+	pci_read_config_dword(dev, 0x90, &smb);
+	smb &= PCI_BASE_ADDRESS_IO_MASK;
+	quirk_io_region(dev, smb, 16, PCI_BRIDGE_RESOURCES + 2, "vt82c686 SMB");
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C686_4,	quirk_vt82c686_acpi );
+
+/*
+ * VIA VT8235 ISA Bridge: Two IO regions pointed to by words at
+ *	0x88 (128 bytes of power management registers)
+ *	0xd0 (16 bytes of SMB registers)
+ */
+static void __devinit quirk_vt8235_acpi(struct pci_dev *dev)
+{
+	u16 pm, smb;
+
+	pci_read_config_word(dev, 0x88, &pm);
+	pm &= PCI_BASE_ADDRESS_IO_MASK;
+	quirk_io_region(dev, pm, 128, PCI_BRIDGE_RESOURCES, "vt8235 PM");
+
+	pci_read_config_word(dev, 0xd0, &smb);
+	smb &= PCI_BASE_ADDRESS_IO_MASK;
+	quirk_io_region(dev, smb, 16, PCI_BRIDGE_RESOURCES + 1, "vt8235 SMB");
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8235,	quirk_vt8235_acpi);
+
+
+#ifdef CONFIG_X86_IO_APIC 
+
+#include <asm/io_apic.h>
+
+/*
+ * VIA 686A/B: If an IO-APIC is active, we need to route all on-chip
+ * devices to the external APIC.
+ *
+ * TODO: When we have device-specific interrupt routers,
+ * this code will go away from quirks.
+ */
+static void __devinit quirk_via_ioapic(struct pci_dev *dev)
+{
+	u8 tmp;
+	
+	if (nr_ioapics < 1)
+		tmp = 0;    /* nothing routed to external APIC */
+	else
+		tmp = 0x1f; /* all known bits (4-0) routed to external APIC */
+		
+	printk(KERN_INFO "PCI: %sbling Via external APIC routing\n",
+	       tmp == 0 ? "Disa" : "Ena");
+
+	/* Offset 0x58: External APIC IRQ output control */
+	pci_write_config_byte (dev, 0x58, tmp);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C686,	quirk_via_ioapic );
+
+/*
+ * VIA 8237: Some BIOSs don't set the 'Bypass APIC De-Assert Message' Bit.
+ * This leads to doubled level interrupt rates.
+ * Set this bit to get rid of cycle wastage.
+ * Otherwise uncritical.
+ */
+static void __devinit quirk_via_vt8237_bypass_apic_deassert(struct pci_dev *dev)
+{
+	u8 misc_control2;
+#define BYPASS_APIC_DEASSERT 8
+
+	pci_read_config_byte(dev, 0x5B, &misc_control2);
+	if (!(misc_control2 & BYPASS_APIC_DEASSERT)) {
+		printk(KERN_INFO "PCI: Bypassing VIA 8237 APIC De-Assert Message\n");
+		pci_write_config_byte(dev, 0x5B, misc_control2|BYPASS_APIC_DEASSERT);
+	}
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_8237,		quirk_via_vt8237_bypass_apic_deassert);
+
+/*
+ * The AMD io apic can hang the box when an apic irq is masked.
+ * We check all revs >= B0 (yet not in the pre production!) as the bug
+ * is currently marked NoFix
+ *
+ * We have multiple reports of hangs with this chipset that went away with
+ * noapic specified. For the moment we assume its the errata. We may be wrong
+ * of course. However the advice is demonstrably good even if so..
+ */
+static void __devinit quirk_amd_ioapic(struct pci_dev *dev)
+{
+	u8 rev;
+
+	pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
+	if (rev >= 0x02) {
+		printk(KERN_WARNING "I/O APIC: AMD Errata #22 may be present. In the event of instability try\n");
+		printk(KERN_WARNING "        : booting with the \"noapic\" option.\n");
+	}
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD,	PCI_DEVICE_ID_AMD_VIPER_7410,	quirk_amd_ioapic );
+
+static void __init quirk_ioapic_rmw(struct pci_dev *dev)
+{
+	if (dev->devfn == 0 && dev->bus->number == 0)
+		sis_apic_bug = 1;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI,	PCI_ANY_ID,			quirk_ioapic_rmw );
+
+int pci_msi_quirk;
+
+#define AMD8131_revA0        0x01
+#define AMD8131_revB0        0x11
+#define AMD8131_MISC         0x40
+#define AMD8131_NIOAMODE_BIT 0
+static void __init quirk_amd_8131_ioapic(struct pci_dev *dev) 
+{ 
+        unsigned char revid, tmp;
+        
+	pci_msi_quirk = 1;
+	printk(KERN_WARNING "PCI: MSI quirk detected. pci_msi_quirk set.\n");
+
+        if (nr_ioapics == 0) 
+                return;
+
+        pci_read_config_byte(dev, PCI_REVISION_ID, &revid);
+        if (revid == AMD8131_revA0 || revid == AMD8131_revB0) {
+                printk(KERN_INFO "Fixing up AMD8131 IOAPIC mode\n"); 
+                pci_read_config_byte( dev, AMD8131_MISC, &tmp);
+                tmp &= ~(1 << AMD8131_NIOAMODE_BIT);
+                pci_write_config_byte( dev, AMD8131_MISC, tmp);
+        }
+} 
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_APIC,         quirk_amd_8131_ioapic ); 
+
+static void __init quirk_svw_msi(struct pci_dev *dev)
+{
+	pci_msi_quirk = 1;
+	printk(KERN_WARNING "PCI: MSI quirk detected. pci_msi_quirk set.\n");
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCNB_LE, quirk_svw_msi );
+#endif /* CONFIG_X86_IO_APIC */
+
+
+/*
+ * FIXME: it is questionable that quirk_via_acpi
+ * is needed.  It shows up as an ISA bridge, and does not
+ * support the PCI_INTERRUPT_LINE register at all.  Therefore
+ * it seems like setting the pci_dev's 'irq' to the
+ * value of the ACPI SCI interrupt is only done for convenience.
+ *	-jgarzik
+ */
+static void __devinit quirk_via_acpi(struct pci_dev *d)
+{
+	/*
+	 * VIA ACPI device: SCI IRQ line in PCI config byte 0x42
+	 */
+	u8 irq;
+	pci_read_config_byte(d, 0x42, &irq);
+	irq &= 0xf;
+	if (irq && (irq != 2))
+		d->irq = irq;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C586_3,	quirk_via_acpi );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C686_4,	quirk_via_acpi );
+
+/*
+ * Via 686A/B:  The PCI_INTERRUPT_LINE register for the on-chip
+ * devices, USB0/1, AC97, MC97, and ACPI, has an unusual feature:
+ * when written, it makes an internal connection to the PIC.
+ * For these devices, this register is defined to be 4 bits wide.
+ * Normally this is fine.  However for IO-APIC motherboards, or
+ * non-x86 architectures (yes Via exists on PPC among other places),
+ * we must mask the PCI_INTERRUPT_LINE value versus 0xf to get
+ * interrupts delivered properly.
+ */
+static void quirk_via_irq(struct pci_dev *dev)
+{
+	u8 irq, new_irq;
+
+	new_irq = dev->irq & 0xf;
+	pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
+	if (new_irq != irq) {
+		printk(KERN_INFO "PCI: Via IRQ fixup for %s, from %d to %d\n",
+			pci_name(dev), irq, new_irq);
+		udelay(15);	/* unknown if delay really needed */
+		pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq);
+	}
+}
+DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_via_irq);
+
+/*
+ * VIA VT82C598 has its device ID settable and many BIOSes
+ * set it to the ID of VT82C597 for backward compatibility.
+ * We need to switch it off to be able to recognize the real
+ * type of the chip.
+ */
+static void __devinit quirk_vt82c598_id(struct pci_dev *dev)
+{
+	pci_write_config_byte(dev, 0xfc, 0);
+	pci_read_config_word(dev, PCI_DEVICE_ID, &dev->device);
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA,	PCI_DEVICE_ID_VIA_82C597_0,	quirk_vt82c598_id );
+
+/*
+ * CardBus controllers have a legacy base address that enables them
+ * to respond as i82365 pcmcia controllers.  We don't want them to
+ * do this even if the Linux CardBus driver is not loaded, because
+ * the Linux i82365 driver does not (and should not) handle CardBus.
+ */
+static void __devinit quirk_cardbus_legacy(struct pci_dev *dev)
+{
+	if ((PCI_CLASS_BRIDGE_CARDBUS << 8) ^ dev->class)
+		return;
+	pci_write_config_dword(dev, PCI_CB_LEGACY_MODE_BASE, 0);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_cardbus_legacy);
+
+/*
+ * Following the PCI ordering rules is optional on the AMD762. I'm not
+ * sure what the designers were smoking but let's not inhale...
+ *
+ * To be fair to AMD, it follows the spec by default, its BIOS people
+ * who turn it off!
+ */
+static void __devinit quirk_amd_ordering(struct pci_dev *dev)
+{
+	u32 pcic;
+	pci_read_config_dword(dev, 0x4C, &pcic);
+	if ((pcic&6)!=6) {
+		pcic |= 6;
+		printk(KERN_WARNING "BIOS failed to enable PCI standards compliance, fixing this error.\n");
+		pci_write_config_dword(dev, 0x4C, pcic);
+		pci_read_config_dword(dev, 0x84, &pcic);
+		pcic |= (1<<23);	/* Required in this mode */
+		pci_write_config_dword(dev, 0x84, pcic);
+	}
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD,	PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering );
+
+/*
+ *	DreamWorks provided workaround for Dunord I-3000 problem
+ *
+ *	This card decodes and responds to addresses not apparently
+ *	assigned to it. We force a larger allocation to ensure that
+ *	nothing gets put too close to it.
+ */
+static void __devinit quirk_dunord ( struct pci_dev * dev )
+{
+	struct resource *r = &dev->resource [1];
+	r->start = 0;
+	r->end = 0xffffff;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DUNORD,	PCI_DEVICE_ID_DUNORD_I3000,	quirk_dunord );
+
+/*
+ * i82380FB mobile docking controller: its PCI-to-PCI bridge
+ * is subtractive decoding (transparent), and does indicate this
+ * in the ProgIf. Unfortunately, the ProgIf value is wrong - 0x80
+ * instead of 0x01.
+ */
+static void __devinit quirk_transparent_bridge(struct pci_dev *dev)
+{
+	dev->transparent = 1;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82380FB,	quirk_transparent_bridge );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA,	0x605,	quirk_transparent_bridge );
+
+/*
+ * Common misconfiguration of the MediaGX/Geode PCI master that will
+ * reduce PCI bandwidth from 70MB/s to 25MB/s.  See the GXM/GXLV/GX1
+ * datasheets found at http://www.national.com/ds/GX for info on what
+ * these bits do.  <christer@weinigel.se>
+ */
+static void __init quirk_mediagx_master(struct pci_dev *dev)
+{
+	u8 reg;
+	pci_read_config_byte(dev, 0x41, &reg);
+	if (reg & 2) {
+		reg &= ~2;
+		printk(KERN_INFO "PCI: Fixup for MediaGX/Geode Slave Disconnect Boundary (0x41=0x%02x)\n", reg);
+                pci_write_config_byte(dev, 0x41, reg);
+	}
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CYRIX,	PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master );
+
+/*
+ * As per PCI spec, ignore base address registers 0-3 of the IDE controllers
+ * running in Compatible mode (bits 0 and 2 in the ProgIf for primary and
+ * secondary channels respectively). If the device reports Compatible mode
+ * but does use BAR0-3 for address decoding, we assume that firmware has
+ * programmed these BARs with standard values (0x1f0,0x3f4 and 0x170,0x374).
+ * Exceptions (if they exist) must be handled in chip/architecture specific
+ * fixups.
+ *
+ * Note: for non x86 people. You may need an arch specific quirk to handle
+ * moving IDE devices to native mode as well. Some plug in card devices power
+ * up in compatible mode and assume the BIOS will adjust them.
+ *
+ * Q: should we load the 0x1f0,0x3f4 into the registers or zap them as
+ * we do now ? We don't want is pci_enable_device to come along
+ * and assign new resources. Both approaches work for that.
+ */ 
+static void __devinit quirk_ide_bases(struct pci_dev *dev)
+{
+       struct resource *res;
+       int first_bar = 2, last_bar = 0;
+
+       if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE)
+               return;
+
+       res = &dev->resource[0];
+
+       /* primary channel: ProgIf bit 0, BAR0, BAR1 */
+       if (!(dev->class & 1) && (res[0].flags || res[1].flags)) { 
+               res[0].start = res[0].end = res[0].flags = 0;
+               res[1].start = res[1].end = res[1].flags = 0;
+               first_bar = 0;
+               last_bar = 1;
+       }
+
+       /* secondary channel: ProgIf bit 2, BAR2, BAR3 */
+       if (!(dev->class & 4) && (res[2].flags || res[3].flags)) { 
+               res[2].start = res[2].end = res[2].flags = 0;
+               res[3].start = res[3].end = res[3].flags = 0;
+               last_bar = 3;
+       }
+
+       if (!last_bar)
+               return;
+
+       printk(KERN_INFO "PCI: Ignoring BAR%d-%d of IDE controller %s\n",
+              first_bar, last_bar, pci_name(dev));
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, quirk_ide_bases);
+
+/*
+ *	Ensure C0 rev restreaming is off. This is normally done by
+ *	the BIOS but in the odd case it is not the results are corruption
+ *	hence the presence of a Linux check
+ */
+static void __init quirk_disable_pxb(struct pci_dev *pdev)
+{
+	u16 config;
+	u8 rev;
+	
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
+	if (rev != 0x04)		/* Only C0 requires this */
+		return;
+	pci_read_config_word(pdev, 0x40, &config);
+	if (config & (1<<6)) {
+		config &= ~(1<<6);
+		pci_write_config_word(pdev, 0x40, config);
+		printk(KERN_INFO "PCI: C0 revision 450NX. Disabling PCI restreaming.\n");
+	}
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82454NX,	quirk_disable_pxb );
+
+
+/*
+ *	Serverworks CSB5 IDE does not fully support native mode
+ */
+static void __devinit quirk_svwks_csb5ide(struct pci_dev *pdev)
+{
+	u8 prog;
+	pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
+	if (prog & 5) {
+		prog &= ~5;
+		pdev->class &= ~5;
+		pci_write_config_byte(pdev, PCI_CLASS_PROG, prog);
+		/* need to re-assign BARs for compat mode */
+		quirk_ide_bases(pdev);
+	}
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, quirk_svwks_csb5ide );
+
+/*
+ *	Intel 82801CAM ICH3-M datasheet says IDE modes must be the same
+ */
+static void __init quirk_ide_samemode(struct pci_dev *pdev)
+{
+	u8 prog;
+
+	pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
+
+	if (((prog & 1) && !(prog & 4)) || ((prog & 4) && !(prog & 1))) {
+		printk(KERN_INFO "PCI: IDE mode mismatch; forcing legacy mode\n");
+		prog &= ~5;
+		pdev->class &= ~5;
+		pci_write_config_byte(pdev, PCI_CLASS_PROG, prog);
+		/* need to re-assign BARs for compat mode */
+		quirk_ide_bases(pdev);
+	}
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, quirk_ide_samemode);
+
+/* This was originally an Alpha specific thing, but it really fits here.
+ * The i82375 PCI/EISA bridge appears as non-classified. Fix that.
+ */
+static void __init quirk_eisa_bridge(struct pci_dev *dev)
+{
+	dev->class = PCI_CLASS_BRIDGE_EISA << 8;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82375,	quirk_eisa_bridge );
+
+/*
+ * On ASUS P4B boards, the SMBus PCI Device within the ICH2/4 southbridge
+ * is not activated. The myth is that Asus said that they do not want the
+ * users to be irritated by just another PCI Device in the Win98 device
+ * manager. (see the file prog/hotplug/README.p4b in the lm_sensors 
+ * package 2.7.0 for details)
+ *
+ * The SMBus PCI Device can be activated by setting a bit in the ICH LPC 
+ * bridge. Unfortunately, this device has no subvendor/subdevice ID. So it 
+ * becomes necessary to do this tweak in two steps -- I've chosen the Host
+ * bridge as trigger.
+ */
+static int __initdata asus_hides_smbus = 0;
+
+static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
+{
+	if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) {
+		if (dev->device == PCI_DEVICE_ID_INTEL_82845_HB)
+			switch(dev->subsystem_device) {
+			case 0x8025: /* P4B-LX */
+			case 0x8070: /* P4B */
+			case 0x8088: /* P4B533 */
+			case 0x1626: /* L3C notebook */
+				asus_hides_smbus = 1;
+			}
+		if (dev->device == PCI_DEVICE_ID_INTEL_82845G_HB)
+			switch(dev->subsystem_device) {
+			case 0x80b1: /* P4GE-V */
+			case 0x80b2: /* P4PE */
+			case 0x8093: /* P4B533-V */
+				asus_hides_smbus = 1;
+			}
+		if (dev->device == PCI_DEVICE_ID_INTEL_82850_HB)
+			switch(dev->subsystem_device) {
+			case 0x8030: /* P4T533 */
+				asus_hides_smbus = 1;
+			}
+		if (dev->device == PCI_DEVICE_ID_INTEL_7205_0)
+			switch (dev->subsystem_device) {
+			case 0x8070: /* P4G8X Deluxe */
+				asus_hides_smbus = 1;
+			}
+		if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB)
+			switch (dev->subsystem_device) {
+			case 0x1751: /* M2N notebook */
+			case 0x1821: /* M5N notebook */
+				asus_hides_smbus = 1;
+			}
+		if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
+			switch (dev->subsystem_device) {
+			case 0x184b: /* W1N notebook */
+			case 0x186a: /* M6Ne notebook */
+				asus_hides_smbus = 1;
+			}
+		if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB) {
+			switch (dev->subsystem_device) {
+			case 0x1882: /* M6V notebook */
+				asus_hides_smbus = 1;
+			}
+		}
+	} else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
+		if (dev->device ==  PCI_DEVICE_ID_INTEL_82855PM_HB)
+			switch(dev->subsystem_device) {
+			case 0x088C: /* HP Compaq nc8000 */
+			case 0x0890: /* HP Compaq nc6000 */
+				asus_hides_smbus = 1;
+			}
+		if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB)
+			switch (dev->subsystem_device) {
+			case 0x12bc: /* HP D330L */
+			case 0x12bd: /* HP D530 */
+				asus_hides_smbus = 1;
+			}
+	} else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_TOSHIBA)) {
+		if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB)
+			switch(dev->subsystem_device) {
+			case 0x0001: /* Toshiba Satellite A40 */
+				asus_hides_smbus = 1;
+			}
+		if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
+			switch(dev->subsystem_device) {
+			case 0x0001: /* Toshiba Tecra M2 */
+				asus_hides_smbus = 1;
+			}
+       } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG)) {
+               if (dev->device ==  PCI_DEVICE_ID_INTEL_82855PM_HB)
+                       switch(dev->subsystem_device) {
+                       case 0xC00C: /* Samsung P35 notebook */
+                               asus_hides_smbus = 1;
+                       }
+	} else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ)) {
+		if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
+			switch(dev->subsystem_device) {
+			case 0x0058: /* Compaq Evo N620c */
+				asus_hides_smbus = 1;
+			}
+	}
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82845_HB,	asus_hides_smbus_hostbridge );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82845G_HB,	asus_hides_smbus_hostbridge );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82850_HB,	asus_hides_smbus_hostbridge );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82865_HB,	asus_hides_smbus_hostbridge );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_7205_0,	asus_hides_smbus_hostbridge );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82855PM_HB,	asus_hides_smbus_hostbridge );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82855GM_HB,	asus_hides_smbus_hostbridge );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge );
+
+static void __init asus_hides_smbus_lpc(struct pci_dev *dev)
+{
+	u16 val;
+	
+	if (likely(!asus_hides_smbus))
+		return;
+
+	pci_read_config_word(dev, 0xF2, &val);
+	if (val & 0x8) {
+		pci_write_config_word(dev, 0xF2, val & (~0x8));
+		pci_read_config_word(dev, 0xF2, &val);
+		if (val & 0x8)
+			printk(KERN_INFO "PCI: i801 SMBus device continues to play 'hide and seek'! 0x%x\n", val);
+		else
+			printk(KERN_INFO "PCI: Enabled i801 SMBus device\n");
+	}
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801DB_0,	asus_hides_smbus_lpc );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801BA_0,	asus_hides_smbus_lpc );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801CA_12,	asus_hides_smbus_lpc );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801DB_12,	asus_hides_smbus_lpc );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_82801EB_0,	asus_hides_smbus_lpc );
+
+static void __init asus_hides_smbus_lpc_ich6(struct pci_dev *dev)
+{
+	u32 val, rcba;
+	void __iomem *base;
+
+	if (likely(!asus_hides_smbus))
+		return;
+	pci_read_config_dword(dev, 0xF0, &rcba);
+	base = ioremap_nocache(rcba & 0xFFFFC000, 0x4000); /* use bits 31:14, 16 kB aligned */
+	if (base == NULL) return;
+	val=readl(base + 0x3418); /* read the Function Disable register, dword mode only */
+	writel(val & 0xFFFFFFF7, base + 0x3418); /* enable the SMBus device */
+	iounmap(base);
+	printk(KERN_INFO "PCI: Enabled ICH6/i801 SMBus device\n");
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_ICH6_1,	asus_hides_smbus_lpc_ich6 );
+
+/*
+ * SiS 96x south bridge: BIOS typically hides SMBus device...
+ */
+static void __init quirk_sis_96x_smbus(struct pci_dev *dev)
+{
+	u8 val = 0;
+	printk(KERN_INFO "Enabling SiS 96x SMBus.\n");
+	pci_read_config_byte(dev, 0x77, &val);
+	pci_write_config_byte(dev, 0x77, val & ~0x10);
+	pci_read_config_byte(dev, 0x77, &val);
+}
+
+/*
+ * ... This is further complicated by the fact that some SiS96x south
+ * bridges pretend to be 85C503/5513 instead.  In that case see if we
+ * spotted a compatible north bridge to make sure.
+ * (pci_find_device doesn't work yet)
+ *
+ * We can also enable the sis96x bit in the discovery register..
+ */
+static int __devinitdata sis_96x_compatible = 0;
+
+#define SIS_DETECT_REGISTER 0x40
+
+static void __init quirk_sis_503(struct pci_dev *dev)
+{
+	u8 reg;
+	u16 devid;
+
+	pci_read_config_byte(dev, SIS_DETECT_REGISTER, &reg);
+	pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg | (1 << 6));
+	pci_read_config_word(dev, PCI_DEVICE_ID, &devid);
+	if (((devid & 0xfff0) != 0x0960) && (devid != 0x0018)) {
+		pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg);
+		return;
+	}
+
+	/* Make people aware that we changed the config.. */
+	printk(KERN_WARNING "Uncovering SIS%x that hid as a SIS503 (compatible=%d)\n", devid, sis_96x_compatible);
+
+	/*
+	 * Ok, it now shows up as a 96x.. The 96x quirks are after
+	 * the 503 quirk in the quirk table, so they'll automatically
+	 * run and enable things like the SMBus device
+	 */
+	dev->device = devid;
+}
+
+static void __init quirk_sis_96x_compatible(struct pci_dev *dev)
+{
+	sis_96x_compatible = 1;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_645,		quirk_sis_96x_compatible );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_646,		quirk_sis_96x_compatible );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_648,		quirk_sis_96x_compatible );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_650,		quirk_sis_96x_compatible );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_651,		quirk_sis_96x_compatible );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_735,		quirk_sis_96x_compatible );
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_503,		quirk_sis_503 );
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_961,		quirk_sis_96x_smbus );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_962,		quirk_sis_96x_smbus );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_963,		quirk_sis_96x_smbus );
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI,	PCI_DEVICE_ID_SI_LPC,		quirk_sis_96x_smbus );
+
+#ifdef CONFIG_X86_IO_APIC
+static void __init quirk_alder_ioapic(struct pci_dev *pdev)
+{
+	int i;
+
+	if ((pdev->class >> 8) != 0xff00)
+		return;
+
+	/* the first BAR is the location of the IO APIC...we must
+	 * not touch this (and it's already covered by the fixmap), so
+	 * forcibly insert it into the resource tree */
+	if (pci_resource_start(pdev, 0) && pci_resource_len(pdev, 0))
+		insert_resource(&iomem_resource, &pdev->resource[0]);
+
+	/* The next five BARs all seem to be rubbish, so just clean
+	 * them out */
+	for (i=1; i < 6; i++) {
+		memset(&pdev->resource[i], 0, sizeof(pdev->resource[i]));
+	}
+
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_EESSC,	quirk_alder_ioapic );
+#endif
+
+#ifdef CONFIG_SCSI_SATA_INTEL_COMBINED
+static void __devinit quirk_intel_ide_combined(struct pci_dev *pdev)
+{
+	u8 prog, comb, tmp;
+	int ich = 0;
+
+	/*
+	 * Narrow down to Intel SATA PCI devices.
+	 */
+	switch (pdev->device) {
+	/* PCI ids taken from drivers/scsi/ata_piix.c */
+	case 0x24d1:
+	case 0x24df:
+	case 0x25a3:
+	case 0x25b0:
+		ich = 5;
+		break;
+	case 0x2651:
+	case 0x2652:
+	case 0x2653:
+	case 0x2680:	/* ESB2 */
+		ich = 6;
+		break;
+	case 0x27c0:
+	case 0x27c4:
+		ich = 7;
+		break;
+	default:
+		/* we do not handle this PCI device */
+		return;
+	}
+
+	/*
+	 * Read combined mode register.
+	 */
+	pci_read_config_byte(pdev, 0x90, &tmp);	/* combined mode reg */
+
+	if (ich == 5) {
+		tmp &= 0x6;  /* interesting bits 2:1, PATA primary/secondary */
+		if (tmp == 0x4)		/* bits 10x */
+			comb = (1 << 0);	/* SATA port 0, PATA port 1 */
+		else if (tmp == 0x6)	/* bits 11x */
+			comb = (1 << 2);	/* PATA port 0, SATA port 1 */
+		else
+			return;			/* not in combined mode */
+	} else {
+		WARN_ON((ich != 6) && (ich != 7));
+		tmp &= 0x3;  /* interesting bits 1:0 */
+		if (tmp & (1 << 0))
+			comb = (1 << 2);	/* PATA port 0, SATA port 1 */
+		else if (tmp & (1 << 1))
+			comb = (1 << 0);	/* SATA port 0, PATA port 1 */
+		else
+			return;			/* not in combined mode */
+	}
+
+	/*
+	 * Read programming interface register.
+	 * (Tells us if it's legacy or native mode)
+	 */
+	pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
+
+	/* if SATA port is in native mode, we're ok. */
+	if (prog & comb)
+		return;
+
+	/* SATA port is in legacy mode.  Reserve port so that
+	 * IDE driver does not attempt to use it.  If request_region
+	 * fails, it will be obvious at boot time, so we don't bother
+	 * checking return values.
+	 */
+	if (comb == (1 << 0))
+		request_region(0x1f0, 8, "libata");	/* port 0 */
+	else
+		request_region(0x170, 8, "libata");	/* port 1 */
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,    PCI_ANY_ID,	  quirk_intel_ide_combined );
+#endif /* CONFIG_SCSI_SATA_INTEL_COMBINED */
+
+
+int pcie_mch_quirk;
+
+static void __devinit quirk_pcie_mch(struct pci_dev *pdev)
+{
+	pcie_mch_quirk = 1;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_E7520_MCH,	quirk_pcie_mch );
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_E7320_MCH,	quirk_pcie_mch );
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_E7525_MCH,	quirk_pcie_mch );
+
+
+/*
+ * It's possible for the MSI to get corrupted if shpc and acpi
+ * are used together on certain PXH-based systems.
+ */
+static void __devinit quirk_pcie_pxh(struct pci_dev *dev)
+{
+	disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI),
+					PCI_CAP_ID_MSI);
+	dev->no_msi = 1;
+
+	printk(KERN_WARNING "PCI: PXH quirk detected, "
+		"disabling MSI for SHPC device\n");
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_PXHD_0,	quirk_pcie_pxh);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_PXHD_1,	quirk_pcie_pxh);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_PXH_0,	quirk_pcie_pxh);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_PXH_1,	quirk_pcie_pxh);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL,	PCI_DEVICE_ID_INTEL_PXHV,	quirk_pcie_pxh);
+
+
+static void __devinit quirk_netmos(struct pci_dev *dev)
+{
+	unsigned int num_parallel = (dev->subsystem_device & 0xf0) >> 4;
+	unsigned int num_serial = dev->subsystem_device & 0xf;
+
+	/*
+	 * These Netmos parts are multiport serial devices with optional
+	 * parallel ports.  Even when parallel ports are present, they
+	 * are identified as class SERIAL, which means the serial driver
+	 * will claim them.  To prevent this, mark them as class OTHER.
+	 * These combo devices should be claimed by parport_serial.
+	 *
+	 * The subdevice ID is of the form 0x00PS, where <P> is the number
+	 * of parallel ports and <S> is the number of serial ports.
+	 */
+	switch (dev->device) {
+	case PCI_DEVICE_ID_NETMOS_9735:
+	case PCI_DEVICE_ID_NETMOS_9745:
+	case PCI_DEVICE_ID_NETMOS_9835:
+	case PCI_DEVICE_ID_NETMOS_9845:
+	case PCI_DEVICE_ID_NETMOS_9855:
+		if ((dev->class >> 8) == PCI_CLASS_COMMUNICATION_SERIAL &&
+		    num_parallel) {
+			printk(KERN_INFO "PCI: Netmos %04x (%u parallel, "
+				"%u serial); changing class SERIAL to OTHER "
+				"(use parport_serial)\n",
+				dev->device, num_parallel, num_serial);
+			dev->class = (PCI_CLASS_COMMUNICATION_OTHER << 8) |
+			    (dev->class & 0xff);
+		}
+	}
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID, quirk_netmos);
+
+
+static void __devinit fixup_rev1_53c810(struct pci_dev* dev)
+{
+	/* rev 1 ncr53c810 chips don't set the class at all which means
+	 * they don't get their resources remapped. Fix that here.
+	 */
+
+	if (dev->class == PCI_CLASS_NOT_DEFINED) {
+		printk(KERN_INFO "NCR 53c810 rev 1 detected, setting PCI class.\n");
+		dev->class = PCI_CLASS_STORAGE_SCSI;
+	}
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810);
+
+
+static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end)
+{
+	while (f < end) {
+		if ((f->vendor == dev->vendor || f->vendor == (u16) PCI_ANY_ID) &&
+ 		    (f->device == dev->device || f->device == (u16) PCI_ANY_ID)) {
+			pr_debug("PCI: Calling quirk %p for %s\n", f->hook, pci_name(dev));
+			f->hook(dev);
+		}
+		f++;
+	}
+}
+
+extern struct pci_fixup __start_pci_fixups_early[];
+extern struct pci_fixup __end_pci_fixups_early[];
+extern struct pci_fixup __start_pci_fixups_header[];
+extern struct pci_fixup __end_pci_fixups_header[];
+extern struct pci_fixup __start_pci_fixups_final[];
+extern struct pci_fixup __end_pci_fixups_final[];
+extern struct pci_fixup __start_pci_fixups_enable[];
+extern struct pci_fixup __end_pci_fixups_enable[];
+
+
+void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
+{
+	struct pci_fixup *start, *end;
+
+	switch(pass) {
+	case pci_fixup_early:
+		start = __start_pci_fixups_early;
+		end = __end_pci_fixups_early;
+		break;
+
+	case pci_fixup_header:
+		start = __start_pci_fixups_header;
+		end = __end_pci_fixups_header;
+		break;
+
+	case pci_fixup_final:
+		start = __start_pci_fixups_final;
+		end = __end_pci_fixups_final;
+		break;
+
+	case pci_fixup_enable:
+		start = __start_pci_fixups_enable;
+		end = __end_pci_fixups_enable;
+		break;
+
+	default:
+		/* stupid compiler warning, you would think with an enum... */
+		return;
+	}
+	pci_do_fixups(dev, start, end);
+}
+
+EXPORT_SYMBOL(pcie_mch_quirk);
+#ifdef CONFIG_HOTPLUG
+EXPORT_SYMBOL(pci_fixup_device);
+#endif
diff -urN linux-2.6.15/drivers/scsi/libata-core.c linux-2.6.15-no1/drivers/scsi/libata-core.c
--- linux-2.6.15/drivers/scsi/libata-core.c	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/drivers/scsi/libata-core.c	2006-01-26 15:41:13.996759936 +0000
@@ -1032,18 +1032,22 @@
 {
 	u16 modes;
 
-	/* Usual case. Word 53 indicates word 88 is valid */
-	if (adev->id[ATA_ID_FIELD_VALID] & (1 << 2)) {
+	/* Usual case. Word 53 indicates word 64 is valid */
+	if (adev->id[ATA_ID_FIELD_VALID] & (1 << 1)) {
 		modes = adev->id[ATA_ID_PIO_MODES] & 0x03;
 		modes <<= 3;
 		modes |= 0x7;
 		return modes;
 	}
 
-	/* If word 88 isn't valid then Word 51 holds the PIO timing number
-	   for the maximum. Turn it into a mask and return it */
-	modes = (2 << (adev->id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
+	/* If word 64 isn't valid then Word 51 high byte holds the PIO timing
+	   number for the maximum. Turn it into a mask and return it */
+	modes = (2 << ((adev->id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF)) - 1 ;
 	return modes;
+	/* But wait.. there's more. Design your standards by committee and
+	   you too can get a free iordy field to process. However its the
+	   speeds not the modes that are supported... Note drivers using the
+	   timing API will get this right anyway */
 }
 
 static int ata_qc_wait_err(struct ata_queued_cmd *qc,
diff -urN linux-2.6.15/drivers/scsi/libata-core.c.orig linux-2.6.15-no1/drivers/scsi/libata-core.c.orig
--- linux-2.6.15/drivers/scsi/libata-core.c.orig	1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6.15-no1/drivers/scsi/libata-core.c.orig	2006-01-03 03:21:10.000000000 +0000
@@ -0,0 +1,4966 @@
+/*
+ *  libata-core.c - helper library for ATA
+ *
+ *  Maintained by:  Jeff Garzik <jgarzik@pobox.com>
+ *    		    Please ALWAYS copy linux-ide@vger.kernel.org
+ *		    on emails.
+ *
+ *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
+ *  Copyright 2003-2004 Jeff Garzik
+ *
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2, or (at your option)
+ *  any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; see the file COPYING.  If not, write to
+ *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ *  libata documentation is available via 'make {ps|pdf}docs',
+ *  as Documentation/DocBook/libata.*
+ *
+ *  Hardware documentation available from http://www.t13.org/ and
+ *  http://www.sata-io.org/
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/spinlock.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/suspend.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
+#include <linux/scatterlist.h>
+#include <scsi/scsi.h>
+#include "scsi_priv.h"
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <linux/libata.h>
+#include <asm/io.h>
+#include <asm/semaphore.h>
+#include <asm/byteorder.h>
+
+#include "libata.h"
+
+static unsigned int ata_busy_sleep (struct ata_port *ap,
+				    unsigned long tmout_pat,
+			    	    unsigned long tmout);
+static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev);
+static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev);
+static void ata_set_mode(struct ata_port *ap);
+static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
+static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift);
+static int fgb(u32 bitmap);
+static int ata_choose_xfer_mode(const struct ata_port *ap,
+				u8 *xfer_mode_out,
+				unsigned int *xfer_shift_out);
+static void __ata_qc_complete(struct ata_queued_cmd *qc);
+
+static unsigned int ata_unique_id = 1;
+static struct workqueue_struct *ata_wq;
+
+int atapi_enabled = 0;
+module_param(atapi_enabled, int, 0444);
+MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
+
+MODULE_AUTHOR("Jeff Garzik");
+MODULE_DESCRIPTION("Library module for ATA devices");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+/**
+ *	ata_tf_load_pio - send taskfile registers to host controller
+ *	@ap: Port to which output is sent
+ *	@tf: ATA taskfile register set
+ *
+ *	Outputs ATA taskfile to standard ATA host controller.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+
+static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+	unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
+
+	if (tf->ctl != ap->last_ctl) {
+		outb(tf->ctl, ioaddr->ctl_addr);
+		ap->last_ctl = tf->ctl;
+		ata_wait_idle(ap);
+	}
+
+	if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
+		outb(tf->hob_feature, ioaddr->feature_addr);
+		outb(tf->hob_nsect, ioaddr->nsect_addr);
+		outb(tf->hob_lbal, ioaddr->lbal_addr);
+		outb(tf->hob_lbam, ioaddr->lbam_addr);
+		outb(tf->hob_lbah, ioaddr->lbah_addr);
+		VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
+			tf->hob_feature,
+			tf->hob_nsect,
+			tf->hob_lbal,
+			tf->hob_lbam,
+			tf->hob_lbah);
+	}
+
+	if (is_addr) {
+		outb(tf->feature, ioaddr->feature_addr);
+		outb(tf->nsect, ioaddr->nsect_addr);
+		outb(tf->lbal, ioaddr->lbal_addr);
+		outb(tf->lbam, ioaddr->lbam_addr);
+		outb(tf->lbah, ioaddr->lbah_addr);
+		VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
+			tf->feature,
+			tf->nsect,
+			tf->lbal,
+			tf->lbam,
+			tf->lbah);
+	}
+
+	if (tf->flags & ATA_TFLAG_DEVICE) {
+		outb(tf->device, ioaddr->device_addr);
+		VPRINTK("device 0x%X\n", tf->device);
+	}
+
+	ata_wait_idle(ap);
+}
+
+/**
+ *	ata_tf_load_mmio - send taskfile registers to host controller
+ *	@ap: Port to which output is sent
+ *	@tf: ATA taskfile register set
+ *
+ *	Outputs ATA taskfile to standard ATA host controller using MMIO.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+
+static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+	unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
+
+	if (tf->ctl != ap->last_ctl) {
+		writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
+		ap->last_ctl = tf->ctl;
+		ata_wait_idle(ap);
+	}
+
+	if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
+		writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
+		writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
+		writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
+		writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
+		writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
+		VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
+			tf->hob_feature,
+			tf->hob_nsect,
+			tf->hob_lbal,
+			tf->hob_lbam,
+			tf->hob_lbah);
+	}
+
+	if (is_addr) {
+		writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
+		writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
+		writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
+		writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
+		writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
+		VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
+			tf->feature,
+			tf->nsect,
+			tf->lbal,
+			tf->lbam,
+			tf->lbah);
+	}
+
+	if (tf->flags & ATA_TFLAG_DEVICE) {
+		writeb(tf->device, (void __iomem *) ioaddr->device_addr);
+		VPRINTK("device 0x%X\n", tf->device);
+	}
+
+	ata_wait_idle(ap);
+}
+
+
+/**
+ *	ata_tf_load - send taskfile registers to host controller
+ *	@ap: Port to which output is sent
+ *	@tf: ATA taskfile register set
+ *
+ *	Outputs ATA taskfile to standard ATA host controller using MMIO
+ *	or PIO as indicated by the ATA_FLAG_MMIO flag.
+ *	Writes the control, feature, nsect, lbal, lbam, and lbah registers.
+ *	Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
+ *	hob_lbal, hob_lbam, and hob_lbah.
+ *
+ *	This function waits for idle (!BUSY and !DRQ) after writing
+ *	registers.  If the control register has a new value, this
+ *	function also waits for idle after writing control and before
+ *	writing the remaining registers.
+ *
+ *	May be used as the tf_load() entry in ata_port_operations.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
+{
+	if (ap->flags & ATA_FLAG_MMIO)
+		ata_tf_load_mmio(ap, tf);
+	else
+		ata_tf_load_pio(ap, tf);
+}
+
+/**
+ *	ata_exec_command_pio - issue ATA command to host controller
+ *	@ap: port to which command is being issued
+ *	@tf: ATA taskfile register set
+ *
+ *	Issues PIO write to ATA command register, with proper
+ *	synchronization with interrupt handler / other threads.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+
+static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
+{
+	DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
+
+       	outb(tf->command, ap->ioaddr.command_addr);
+	ata_pause(ap);
+}
+
+
+/**
+ *	ata_exec_command_mmio - issue ATA command to host controller
+ *	@ap: port to which command is being issued
+ *	@tf: ATA taskfile register set
+ *
+ *	Issues MMIO write to ATA command register, with proper
+ *	synchronization with interrupt handler / other threads.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+
+static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
+{
+	DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
+
+       	writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
+	ata_pause(ap);
+}
+
+
+/**
+ *	ata_exec_command - issue ATA command to host controller
+ *	@ap: port to which command is being issued
+ *	@tf: ATA taskfile register set
+ *
+ *	Issues PIO/MMIO write to ATA command register, with proper
+ *	synchronization with interrupt handler / other threads.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
+{
+	if (ap->flags & ATA_FLAG_MMIO)
+		ata_exec_command_mmio(ap, tf);
+	else
+		ata_exec_command_pio(ap, tf);
+}
+
+/**
+ *	ata_tf_to_host - issue ATA taskfile to host controller
+ *	@ap: port to which command is being issued
+ *	@tf: ATA taskfile register set
+ *
+ *	Issues ATA taskfile register set to ATA host controller,
+ *	with proper synchronization with interrupt handler and
+ *	other threads.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+
+static inline void ata_tf_to_host(struct ata_port *ap,
+				  const struct ata_taskfile *tf)
+{
+	ap->ops->tf_load(ap, tf);
+	ap->ops->exec_command(ap, tf);
+}
+
+/**
+ *	ata_tf_read_pio - input device's ATA taskfile shadow registers
+ *	@ap: Port from which input is read
+ *	@tf: ATA taskfile register set for storing input
+ *
+ *	Reads ATA taskfile registers for currently-selected device
+ *	into @tf.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+
+static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+
+	tf->command = ata_check_status(ap);
+	tf->feature = inb(ioaddr->error_addr);
+	tf->nsect = inb(ioaddr->nsect_addr);
+	tf->lbal = inb(ioaddr->lbal_addr);
+	tf->lbam = inb(ioaddr->lbam_addr);
+	tf->lbah = inb(ioaddr->lbah_addr);
+	tf->device = inb(ioaddr->device_addr);
+
+	if (tf->flags & ATA_TFLAG_LBA48) {
+		outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
+		tf->hob_feature = inb(ioaddr->error_addr);
+		tf->hob_nsect = inb(ioaddr->nsect_addr);
+		tf->hob_lbal = inb(ioaddr->lbal_addr);
+		tf->hob_lbam = inb(ioaddr->lbam_addr);
+		tf->hob_lbah = inb(ioaddr->lbah_addr);
+	}
+}
+
+/**
+ *	ata_tf_read_mmio - input device's ATA taskfile shadow registers
+ *	@ap: Port from which input is read
+ *	@tf: ATA taskfile register set for storing input
+ *
+ *	Reads ATA taskfile registers for currently-selected device
+ *	into @tf via MMIO.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+
+static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+
+	tf->command = ata_check_status(ap);
+	tf->feature = readb((void __iomem *)ioaddr->error_addr);
+	tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
+	tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
+	tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
+	tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
+	tf->device = readb((void __iomem *)ioaddr->device_addr);
+
+	if (tf->flags & ATA_TFLAG_LBA48) {
+		writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
+		tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
+		tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
+		tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
+		tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
+		tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
+	}
+}
+
+
+/**
+ *	ata_tf_read - input device's ATA taskfile shadow registers
+ *	@ap: Port from which input is read
+ *	@tf: ATA taskfile register set for storing input
+ *
+ *	Reads ATA taskfile registers for currently-selected device
+ *	into @tf.
+ *
+ *	Reads nsect, lbal, lbam, lbah, and device.  If ATA_TFLAG_LBA48
+ *	is set, also reads the hob registers.
+ *
+ *	May be used as the tf_read() entry in ata_port_operations.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+{
+	if (ap->flags & ATA_FLAG_MMIO)
+		ata_tf_read_mmio(ap, tf);
+	else
+		ata_tf_read_pio(ap, tf);
+}
+
+/**
+ *	ata_check_status_pio - Read device status reg & clear interrupt
+ *	@ap: port where the device is
+ *
+ *	Reads ATA taskfile status register for currently-selected device
+ *	and return its value. This also clears pending interrupts
+ *      from this device
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+static u8 ata_check_status_pio(struct ata_port *ap)
+{
+	return inb(ap->ioaddr.status_addr);
+}
+
+/**
+ *	ata_check_status_mmio - Read device status reg & clear interrupt
+ *	@ap: port where the device is
+ *
+ *	Reads ATA taskfile status register for currently-selected device
+ *	via MMIO and return its value. This also clears pending interrupts
+ *      from this device
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+static u8 ata_check_status_mmio(struct ata_port *ap)
+{
+       	return readb((void __iomem *) ap->ioaddr.status_addr);
+}
+
+
+/**
+ *	ata_check_status - Read device status reg & clear interrupt
+ *	@ap: port where the device is
+ *
+ *	Reads ATA taskfile status register for currently-selected device
+ *	and return its value. This also clears pending interrupts
+ *      from this device
+ *
+ *	May be used as the check_status() entry in ata_port_operations.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+u8 ata_check_status(struct ata_port *ap)
+{
+	if (ap->flags & ATA_FLAG_MMIO)
+		return ata_check_status_mmio(ap);
+	return ata_check_status_pio(ap);
+}
+
+
+/**
+ *	ata_altstatus - Read device alternate status reg
+ *	@ap: port where the device is
+ *
+ *	Reads ATA taskfile alternate status register for
+ *	currently-selected device and return its value.
+ *
+ *	Note: may NOT be used as the check_altstatus() entry in
+ *	ata_port_operations.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+u8 ata_altstatus(struct ata_port *ap)
+{
+	if (ap->ops->check_altstatus)
+		return ap->ops->check_altstatus(ap);
+
+	if (ap->flags & ATA_FLAG_MMIO)
+		return readb((void __iomem *)ap->ioaddr.altstatus_addr);
+	return inb(ap->ioaddr.altstatus_addr);
+}
+
+
+/**
+ *	ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
+ *	@tf: Taskfile to convert
+ *	@fis: Buffer into which data will output
+ *	@pmp: Port multiplier port
+ *
+ *	Converts a standard ATA taskfile to a Serial ATA
+ *	FIS structure (Register - Host to Device).
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+
+void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
+{
+	fis[0] = 0x27;	/* Register - Host to Device FIS */
+	fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
+					    bit 7 indicates Command FIS */
+	fis[2] = tf->command;
+	fis[3] = tf->feature;
+
+	fis[4] = tf->lbal;
+	fis[5] = tf->lbam;
+	fis[6] = tf->lbah;
+	fis[7] = tf->device;
+
+	fis[8] = tf->hob_lbal;
+	fis[9] = tf->hob_lbam;
+	fis[10] = tf->hob_lbah;
+	fis[11] = tf->hob_feature;
+
+	fis[12] = tf->nsect;
+	fis[13] = tf->hob_nsect;
+	fis[14] = 0;
+	fis[15] = tf->ctl;
+
+	fis[16] = 0;
+	fis[17] = 0;
+	fis[18] = 0;
+	fis[19] = 0;
+}
+
+/**
+ *	ata_tf_from_fis - Convert SATA FIS to ATA taskfile
+ *	@fis: Buffer from which data will be input
+ *	@tf: Taskfile to output
+ *
+ *	Converts a serial ATA FIS structure to a standard ATA taskfile.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+
+void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
+{
+	tf->command	= fis[2];	/* status */
+	tf->feature	= fis[3];	/* error */
+
+	tf->lbal	= fis[4];
+	tf->lbam	= fis[5];
+	tf->lbah	= fis[6];
+	tf->device	= fis[7];
+
+	tf->hob_lbal	= fis[8];
+	tf->hob_lbam	= fis[9];
+	tf->hob_lbah	= fis[10];
+
+	tf->nsect	= fis[12];
+	tf->hob_nsect	= fis[13];
+}
+
+static const u8 ata_rw_cmds[] = {
+	/* pio multi */
+	ATA_CMD_READ_MULTI,
+	ATA_CMD_WRITE_MULTI,
+	ATA_CMD_READ_MULTI_EXT,
+	ATA_CMD_WRITE_MULTI_EXT,
+	/* pio */
+	ATA_CMD_PIO_READ,
+	ATA_CMD_PIO_WRITE,
+	ATA_CMD_PIO_READ_EXT,
+	ATA_CMD_PIO_WRITE_EXT,
+	/* dma */
+	ATA_CMD_READ,
+	ATA_CMD_WRITE,
+	ATA_CMD_READ_EXT,
+	ATA_CMD_WRITE_EXT
+};
+
+/**
+ *	ata_rwcmd_protocol - set taskfile r/w commands and protocol
+ *	@qc: command to examine and configure
+ *
+ *	Examine the device configuration and tf->flags to calculate 
+ *	the proper read/write commands and protocol to use.
+ *
+ *	LOCKING:
+ *	caller.
+ */
+void ata_rwcmd_protocol(struct ata_queued_cmd *qc)
+{
+	struct ata_taskfile *tf = &qc->tf;
+	struct ata_device *dev = qc->dev;
+
+	int index, lba48, write;
+ 
+	lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
+	write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
+
+	if (dev->flags & ATA_DFLAG_PIO) {
+		tf->protocol = ATA_PROT_PIO;
+		index = dev->multi_count ? 0 : 4;
+	} else {
+		tf->protocol = ATA_PROT_DMA;
+		index = 8;
+	}
+
+	tf->command = ata_rw_cmds[index + lba48 + write];
+}
+
+static const char * xfer_mode_str[] = {
+	"UDMA/16",
+	"UDMA/25",
+	"UDMA/33",
+	"UDMA/44",
+	"UDMA/66",
+	"UDMA/100",
+	"UDMA/133",
+	"UDMA7",
+	"MWDMA0",
+	"MWDMA1",
+	"MWDMA2",
+	"PIO0",
+	"PIO1",
+	"PIO2",
+	"PIO3",
+	"PIO4",
+};
+
+/**
+ *	ata_udma_string - convert UDMA bit offset to string
+ *	@mask: mask of bits supported; only highest bit counts.
+ *
+ *	Determine string which represents the highest speed
+ *	(highest bit in @udma_mask).
+ *
+ *	LOCKING:
+ *	None.
+ *
+ *	RETURNS:
+ *	Constant C string representing highest speed listed in
+ *	@udma_mask, or the constant C string "<n/a>".
+ */
+
+static const char *ata_mode_string(unsigned int mask)
+{
+	int i;
+
+	for (i = 7; i >= 0; i--)
+		if (mask & (1 << i))
+			goto out;
+	for (i = ATA_SHIFT_MWDMA + 2; i >= ATA_SHIFT_MWDMA; i--)
+		if (mask & (1 << i))
+			goto out;
+	for (i = ATA_SHIFT_PIO + 4; i >= ATA_SHIFT_PIO; i--)
+		if (mask & (1 << i))
+			goto out;
+
+	return "<n/a>";
+
+out:
+	return xfer_mode_str[i];
+}
+
+/**
+ *	ata_pio_devchk - PATA device presence detection
+ *	@ap: ATA channel to examine
+ *	@device: Device to examine (starting at zero)
+ *
+ *	This technique was originally described in
+ *	Hale Landis's ATADRVR (www.ata-atapi.com), and
+ *	later found its way into the ATA/ATAPI spec.
+ *
+ *	Write a pattern to the ATA shadow registers,
+ *	and if a device is present, it will respond by
+ *	correctly storing and echoing back the
+ *	ATA shadow register contents.
+ *
+ *	LOCKING:
+ *	caller.
+ */
+
+static unsigned int ata_pio_devchk(struct ata_port *ap,
+				   unsigned int device)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+	u8 nsect, lbal;
+
+	ap->ops->dev_select(ap, device);
+
+	outb(0x55, ioaddr->nsect_addr);
+	outb(0xaa, ioaddr->lbal_addr);
+
+	outb(0xaa, ioaddr->nsect_addr);
+	outb(0x55, ioaddr->lbal_addr);
+
+	outb(0x55, ioaddr->nsect_addr);
+	outb(0xaa, ioaddr->lbal_addr);
+
+	nsect = inb(ioaddr->nsect_addr);
+	lbal = inb(ioaddr->lbal_addr);
+
+	if ((nsect == 0x55) && (lbal == 0xaa))
+		return 1;	/* we found a device */
+
+	return 0;		/* nothing found */
+}
+
+/**
+ *	ata_mmio_devchk - PATA device presence detection
+ *	@ap: ATA channel to examine
+ *	@device: Device to examine (starting at zero)
+ *
+ *	This technique was originally described in
+ *	Hale Landis's ATADRVR (www.ata-atapi.com), and
+ *	later found its way into the ATA/ATAPI spec.
+ *
+ *	Write a pattern to the ATA shadow registers,
+ *	and if a device is present, it will respond by
+ *	correctly storing and echoing back the
+ *	ATA shadow register contents.
+ *
+ *	LOCKING:
+ *	caller.
+ */
+
+static unsigned int ata_mmio_devchk(struct ata_port *ap,
+				    unsigned int device)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+	u8 nsect, lbal;
+
+	ap->ops->dev_select(ap, device);
+
+	writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
+	writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
+
+	writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
+	writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
+
+	writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
+	writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
+
+	nsect = readb((void __iomem *) ioaddr->nsect_addr);
+	lbal = readb((void __iomem *) ioaddr->lbal_addr);
+
+	if ((nsect == 0x55) && (lbal == 0xaa))
+		return 1;	/* we found a device */
+
+	return 0;		/* nothing found */
+}
+
+/**
+ *	ata_devchk - PATA device presence detection
+ *	@ap: ATA channel to examine
+ *	@device: Device to examine (starting at zero)
+ *
+ *	Dispatch ATA device presence detection, depending
+ *	on whether we are using PIO or MMIO to talk to the
+ *	ATA shadow registers.
+ *
+ *	LOCKING:
+ *	caller.
+ */
+
+static unsigned int ata_devchk(struct ata_port *ap,
+				    unsigned int device)
+{
+	if (ap->flags & ATA_FLAG_MMIO)
+		return ata_mmio_devchk(ap, device);
+	return ata_pio_devchk(ap, device);
+}
+
+/**
+ *	ata_dev_classify - determine device type based on ATA-spec signature
+ *	@tf: ATA taskfile register set for device to be identified
+ *
+ *	Determine from taskfile register contents whether a device is
+ *	ATA or ATAPI, as per "Signature and persistence" section
+ *	of ATA/PI spec (volume 1, sect 5.14).
+ *
+ *	LOCKING:
+ *	None.
+ *
+ *	RETURNS:
+ *	Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
+ *	the event of failure.
+ */
+
+unsigned int ata_dev_classify(const struct ata_taskfile *tf)
+{
+	/* Apple's open source Darwin code hints that some devices only
+	 * put a proper signature into the LBA mid/high registers,
+	 * So, we only check those.  It's sufficient for uniqueness.
+	 */
+
+	if (((tf->lbam == 0) && (tf->lbah == 0)) ||
+	    ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
+		DPRINTK("found ATA device by sig\n");
+		return ATA_DEV_ATA;
+	}
+
+	if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
+	    ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
+		DPRINTK("found ATAPI device by sig\n");
+		return ATA_DEV_ATAPI;
+	}
+
+	DPRINTK("unknown device\n");
+	return ATA_DEV_UNKNOWN;
+}
+
+/**
+ *	ata_dev_try_classify - Parse returned ATA device signature
+ *	@ap: ATA channel to examine
+ *	@device: Device to examine (starting at zero)
+ *
+ *	After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
+ *	an ATA/ATAPI-defined set of values is placed in the ATA
+ *	shadow registers, indicating the results of device detection
+ *	and diagnostics.
+ *
+ *	Select the ATA device, and read the values from the ATA shadow
+ *	registers.  Then parse according to the Error register value,
+ *	and the spec-defined values examined by ata_dev_classify().
+ *
+ *	LOCKING:
+ *	caller.
+ */
+
+static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
+{
+	struct ata_device *dev = &ap->device[device];
+	struct ata_taskfile tf;
+	unsigned int class;
+	u8 err;
+
+	ap->ops->dev_select(ap, device);
+
+	memset(&tf, 0, sizeof(tf));
+
+	ap->ops->tf_read(ap, &tf);
+	err = tf.feature;
+
+	dev->class = ATA_DEV_NONE;
+
+	/* see if device passed diags */
+	if (err == 1)
+		/* do nothing */ ;
+	else if ((device == 0) && (err == 0x81))
+		/* do nothing */ ;
+	else
+		return err;
+
+	/* determine if device if ATA or ATAPI */
+	class = ata_dev_classify(&tf);
+	if (class == ATA_DEV_UNKNOWN)
+		return err;
+	if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
+		return err;
+
+	dev->class = class;
+
+	return err;
+}
+
+/**
+ *	ata_dev_id_string - Convert IDENTIFY DEVICE page into string
+ *	@id: IDENTIFY DEVICE results we will examine
+ *	@s: string into which data is output
+ *	@ofs: offset into identify device page
+ *	@len: length of string to return. must be an even number.
+ *
+ *	The strings in the IDENTIFY DEVICE page are broken up into
+ *	16-bit chunks.  Run through the string, and output each
+ *	8-bit chunk linearly, regardless of platform.
+ *
+ *	LOCKING:
+ *	caller.
+ */
+
+void ata_dev_id_string(const u16 *id, unsigned char *s,
+		       unsigned int ofs, unsigned int len)
+{
+	unsigned int c;
+
+	while (len > 0) {
+		c = id[ofs] >> 8;
+		*s = c;
+		s++;
+
+		c = id[ofs] & 0xff;
+		*s = c;
+		s++;
+
+		ofs++;
+		len -= 2;
+	}
+}
+
+
+/**
+ *	ata_noop_dev_select - Select device 0/1 on ATA bus
+ *	@ap: ATA channel to manipulate
+ *	@device: ATA device (numbered from zero) to select
+ *
+ *	This function performs no actual function.
+ *
+ *	May be used as the dev_select() entry in ata_port_operations.
+ *
+ *	LOCKING:
+ *	caller.
+ */
+void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
+{
+}
+
+
+/**
+ *	ata_std_dev_select - Select device 0/1 on ATA bus
+ *	@ap: ATA channel to manipulate
+ *	@device: ATA device (numbered from zero) to select
+ *
+ *	Use the method defined in the ATA specification to
+ *	make either device 0, or device 1, active on the
+ *	ATA channel.  Works with both PIO and MMIO.
+ *
+ *	May be used as the dev_select() entry in ata_port_operations.
+ *
+ *	LOCKING:
+ *	caller.
+ */
+
+void ata_std_dev_select (struct ata_port *ap, unsigned int device)
+{
+	u8 tmp;
+
+	if (device == 0)
+		tmp = ATA_DEVICE_OBS;
+	else
+		tmp = ATA_DEVICE_OBS | ATA_DEV1;
+
+	if (ap->flags & ATA_FLAG_MMIO) {
+		writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
+	} else {
+		outb(tmp, ap->ioaddr.device_addr);
+	}
+	ata_pause(ap);		/* needed; also flushes, for mmio */
+}
+
+/**
+ *	ata_dev_select - Select device 0/1 on ATA bus
+ *	@ap: ATA channel to manipulate
+ *	@device: ATA device (numbered from zero) to select
+ *	@wait: non-zero to wait for Status register BSY bit to clear
+ *	@can_sleep: non-zero if context allows sleeping
+ *
+ *	Use the method defined in the ATA specification to
+ *	make either device 0, or device 1, active on the
+ *	ATA channel.
+ *
+ *	This is a high-level version of ata_std_dev_select(),
+ *	which additionally provides the services of inserting
+ *	the proper pauses and status polling, where needed.
+ *
+ *	LOCKING:
+ *	caller.
+ */
+
+void ata_dev_select(struct ata_port *ap, unsigned int device,
+			   unsigned int wait, unsigned int can_sleep)
+{
+	VPRINTK("ENTER, ata%u: device %u, wait %u\n",
+		ap->id, device, wait);
+
+	if (wait)
+		ata_wait_idle(ap);
+
+	ap->ops->dev_select(ap, device);
+
+	if (wait) {
+		if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
+			msleep(150);
+		ata_wait_idle(ap);
+	}
+}
+
+/**
+ *	ata_dump_id - IDENTIFY DEVICE info debugging output
+ *	@dev: Device whose IDENTIFY DEVICE page we will dump
+ *
+ *	Dump selected 16-bit words from a detected device's
+ *	IDENTIFY PAGE page.
+ *
+ *	LOCKING:
+ *	caller.
+ */
+
+static inline void ata_dump_id(const struct ata_device *dev)
+{
+	DPRINTK("49==0x%04x  "
+		"53==0x%04x  "
+		"63==0x%04x  "
+		"64==0x%04x  "
+		"75==0x%04x  \n",
+		dev->id[49],
+		dev->id[53],
+		dev->id[63],
+		dev->id[64],
+		dev->id[75]);
+	DPRINTK("80==0x%04x  "
+		"81==0x%04x  "
+		"82==0x%04x  "
+		"83==0x%04x  "
+		"84==0x%04x  \n",
+		dev->id[80],
+		dev->id[81],
+		dev->id[82],
+		dev->id[83],
+		dev->id[84]);
+	DPRINTK("88==0x%04x  "
+		"93==0x%04x\n",
+		dev->id[88],
+		dev->id[93]);
+}
+
+/*
+ *	Compute the PIO modes available for this device. This is not as
+ *	trivial as it seems if we must consider early devices correctly.
+ *
+ *	FIXME: pre IDE drive timing (do we care ?). 
+ */
+
+static unsigned int ata_pio_modes(const struct ata_device *adev)
+{
+	u16 modes;
+
+	/* Usual case. Word 53 indicates word 88 is valid */
+	if (adev->id[ATA_ID_FIELD_VALID] & (1 << 2)) {
+		modes = adev->id[ATA_ID_PIO_MODES] & 0x03;
+		modes <<= 3;
+		modes |= 0x7;
+		return modes;
+	}
+
+	/* If word 88 isn't valid then Word 51 holds the PIO timing number
+	   for the maximum. Turn it into a mask and return it */
+	modes = (2 << (adev->id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
+	return modes;
+}
+
+static int ata_qc_wait_err(struct ata_queued_cmd *qc,
+			   struct completion *wait)
+{
+	int rc = 0;
+
+	if (wait_for_completion_timeout(wait, 30 * HZ) < 1) {
+		/* timeout handling */
+		unsigned int err_mask = ac_err_mask(ata_chk_status(qc->ap));
+
+		if (!err_mask) {
+			printk(KERN_WARNING "ata%u: slow completion (cmd %x)\n",
+			       qc->ap->id, qc->tf.command);
+		} else {
+			printk(KERN_WARNING "ata%u: qc timeout (cmd %x)\n",
+			       qc->ap->id, qc->tf.command);
+			rc = -EIO;
+		}
+
+		ata_qc_complete(qc, err_mask);
+	}
+
+	return rc;
+}
+
+/**
+ *	ata_dev_identify - obtain IDENTIFY x DEVICE page
+ *	@ap: port on which device we wish to probe resides
+ *	@device: device bus address, starting at zero
+ *
+ *	Following bus reset, we issue the IDENTIFY [PACKET] DEVICE
+ *	command, and read back the 512-byte device information page.
+ *	The device information page is fed to us via the standard
+ *	PIO-IN protocol, but we hand-code it here. (TODO: investigate
+ *	using standard PIO-IN paths)
+ *
+ *	After reading the device information page, we use several
+ *	bits of information from it to initialize data structures
+ *	that will be used during the lifetime of the ata_device.
+ *	Other data from the info page is used to disqualify certain
+ *	older ATA devices we do not wish to support.
+ *
+ *	LOCKING:
+ *	Inherited from caller.  Some functions called by this function
+ *	obtain the host_set lock.
+ */
+
+static void ata_dev_identify(struct ata_port *ap, unsigned int device)
+{
+	struct ata_device *dev = &ap->device[device];
+	unsigned int major_version;
+	u16 tmp;
+	unsigned long xfer_modes;
+	unsigned int using_edd;
+	DECLARE_COMPLETION(wait);
+	struct ata_queued_cmd *qc;
+	unsigned long flags;
+	int rc;
+
+	if (!ata_dev_present(dev)) {
+		DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
+			ap->id, device);
+		return;
+	}
+
+	if (ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
+		using_edd = 0;
+	else
+		using_edd = 1;
+
+	DPRINTK("ENTER, host %u, dev %u\n", ap->id, device);
+
+	assert (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ATAPI ||
+		dev->class == ATA_DEV_NONE);
+
+	ata_dev_select(ap, device, 1, 1); /* select device 0/1 */
+
+	qc = ata_qc_new_init(ap, dev);
+	BUG_ON(qc == NULL);
+
+	ata_sg_init_one(qc, dev->id, sizeof(dev->id));
+	qc->dma_dir = DMA_FROM_DEVICE;
+	qc->tf.protocol = ATA_PROT_PIO;
+	qc->nsect = 1;
+
+retry:
+	if (dev->class == ATA_DEV_ATA) {
+		qc->tf.command = ATA_CMD_ID_ATA;
+		DPRINTK("do ATA identify\n");
+	} else {
+		qc->tf.command = ATA_CMD_ID_ATAPI;
+		DPRINTK("do ATAPI identify\n");
+	}
+
+	qc->waiting = &wait;
+	qc->complete_fn = ata_qc_complete_noop;
+
+	spin_lock_irqsave(&ap->host_set->lock, flags);
+	rc = ata_qc_issue(qc);
+	spin_unlock_irqrestore(&ap->host_set->lock, flags);
+
+	if (rc)
+		goto err_out;
+	else
+		ata_qc_wait_err(qc, &wait);
+
+	spin_lock_irqsave(&ap->host_set->lock, flags);
+	ap->ops->tf_read(ap, &qc->tf);
+	spin_unlock_irqrestore(&ap->host_set->lock, flags);
+
+	if (qc->tf.command & ATA_ERR) {
+		/*
+		 * arg!  EDD works for all test cases, but seems to return
+		 * the ATA signature for some ATAPI devices.  Until the
+		 * reason for this is found and fixed, we fix up the mess
+		 * here.  If IDENTIFY DEVICE returns command aborted
+		 * (as ATAPI devices do), then we issue an
+		 * IDENTIFY PACKET DEVICE.
+		 *
+		 * ATA software reset (SRST, the default) does not appear
+		 * to have this problem.
+		 */
+		if ((using_edd) && (dev->class == ATA_DEV_ATA)) {
+			u8 err = qc->tf.feature;
+			if (err & ATA_ABORTED) {
+				dev->class = ATA_DEV_ATAPI;
+				qc->cursg = 0;
+				qc->cursg_ofs = 0;
+				qc->cursect = 0;
+				qc->nsect = 1;
+				goto retry;
+			}
+		}
+		goto err_out;
+	}
+
+	swap_buf_le16(dev->id, ATA_ID_WORDS);
+
+	/* print device capabilities */
+	printk(KERN_DEBUG "ata%u: dev %u cfg "
+	       "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
+	       ap->id, device, dev->id[49],
+	       dev->id[82], dev->id[83], dev->id[84],
+	       dev->id[85], dev->id[86], dev->id[87],
+	       dev->id[88]);
+
+	/*
+	 * common ATA, ATAPI feature tests
+	 */
+
+	/* we require DMA support (bits 8 of word 49) */
+	if (!ata_id_has_dma(dev->id)) {
+		printk(KERN_DEBUG "ata%u: no dma\n", ap->id);
+		goto err_out_nosup;
+	}
+
+	/* quick-n-dirty find max transfer mode; for printk only */
+	xfer_modes = dev->id[ATA_ID_UDMA_MODES];
+	if (!xfer_modes)
+		xfer_modes = (dev->id[ATA_ID_MWDMA_MODES]) << ATA_SHIFT_MWDMA;
+	if (!xfer_modes)
+		xfer_modes = ata_pio_modes(dev);
+
+	ata_dump_id(dev);
+
+	/* ATA-specific feature tests */
+	if (dev->class == ATA_DEV_ATA) {
+		if (!ata_id_is_ata(dev->id))	/* sanity check */
+			goto err_out_nosup;
+
+		/* get major version */
+		tmp = dev->id[ATA_ID_MAJOR_VER];
+		for (major_version = 14; major_version >= 1; major_version--)
+			if (tmp & (1 << major_version))
+				break;
+
+		/*
+		 * The exact sequence expected by certain pre-ATA4 drives is:
+		 * SRST RESET
+		 * IDENTIFY
+		 * INITIALIZE DEVICE PARAMETERS
+		 * anything else..
+		 * Some drives were very specific about that exact sequence.
+		 */
+		if (major_version < 4 || (!ata_id_has_lba(dev->id))) {
+			ata_dev_init_params(ap, dev);
+
+			/* current CHS translation info (id[53-58]) might be
+			 * changed. reread the identify device info.
+			 */
+			ata_dev_reread_id(ap, dev);
+		}
+
+		if (ata_id_has_lba(dev->id)) {
+			dev->flags |= ATA_DFLAG_LBA;
+
+			if (ata_id_has_lba48(dev->id)) {
+				dev->flags |= ATA_DFLAG_LBA48;
+				dev->n_sectors = ata_id_u64(dev->id, 100);
+			} else {
+				dev->n_sectors = ata_id_u32(dev->id, 60);
+			}
+
+			/* print device info to dmesg */
+			printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors:%s\n",
+			       ap->id, device,
+			       major_version,
+			       ata_mode_string(xfer_modes),
+			       (unsigned long long)dev->n_sectors,
+			       dev->flags & ATA_DFLAG_LBA48 ? " LBA48" : " LBA");
+		} else { 
+			/* CHS */
+
+			/* Default translation */
+			dev->cylinders	= dev->id[1];
+			dev->heads	= dev->id[3];
+			dev->sectors	= dev->id[6];
+			dev->n_sectors	= dev->cylinders * dev->heads * dev->sectors;
+
+			if (ata_id_current_chs_valid(dev->id)) {
+				/* Current CHS translation is valid. */
+				dev->cylinders = dev->id[54];
+				dev->heads     = dev->id[55];
+				dev->sectors   = dev->id[56];
+				
+				dev->n_sectors = ata_id_u32(dev->id, 57);
+			}
+
+			/* print device info to dmesg */
+			printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors: CHS %d/%d/%d\n",
+			       ap->id, device,
+			       major_version,
+			       ata_mode_string(xfer_modes),
+			       (unsigned long long)dev->n_sectors,
+			       (int)dev->cylinders, (int)dev->heads, (int)dev->sectors);
+
+		}
+
+		ap->host->max_cmd_len = 16;
+	}
+
+	/* ATAPI-specific feature tests */
+	else if (dev->class == ATA_DEV_ATAPI) {
+		if (ata_id_is_ata(dev->id))		/* sanity check */
+			goto err_out_nosup;
+
+		rc = atapi_cdb_len(dev->id);
+		if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
+			printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
+			goto err_out_nosup;
+		}
+		ap->cdb_len = (unsigned int) rc;
+		ap->host->max_cmd_len = (unsigned char) ap->cdb_len;
+
+		/* print device info to dmesg */
+		printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
+		       ap->id, device,
+		       ata_mode_string(xfer_modes));
+	}
+
+	DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
+	return;
+
+err_out_nosup:
+	printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
+	       ap->id, device);
+err_out:
+	dev->class++;	/* converts ATA_DEV_xxx into ATA_DEV_xxx_UNSUP */
+	DPRINTK("EXIT, err\n");
+}
+
+
+static inline u8 ata_dev_knobble(const struct ata_port *ap)
+{
+	return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(ap->device->id)));
+}
+
+/**
+ * 	ata_dev_config - Run device specific handlers and check for
+ * 			 SATA->PATA bridges
+ * 	@ap: Bus
+ * 	@i:  Device
+ *
+ * 	LOCKING:
+ */
+
+void ata_dev_config(struct ata_port *ap, unsigned int i)
+{
+	/* limit bridge transfers to udma5, 200 sectors */
+	if (ata_dev_knobble(ap)) {
+		printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
+			ap->id, ap->device->devno);
+		ap->udma_mask &= ATA_UDMA5;
+		ap->host->max_sectors = ATA_MAX_SECTORS;
+		ap->host->hostt->max_sectors = ATA_MAX_SECTORS;
+		ap->device->flags |= ATA_DFLAG_LOCK_SECTORS;
+	}
+
+	if (ap->ops->dev_config)
+		ap->ops->dev_config(ap, &ap->device[i]);
+}
+
+/**
+ *	ata_bus_probe - Reset and probe ATA bus
+ *	@ap: Bus to probe
+ *
+ *	Master ATA bus probing function.  Initiates a hardware-dependent
+ *	bus reset, then attempts to identify any devices found on
+ *	the bus.
+ *
+ *	LOCKING:
+ *	PCI/etc. bus probe sem.
+ *
+ *	RETURNS:
+ *	Zero on success, non-zero on error.
+ */
+
+static int ata_bus_probe(struct ata_port *ap)
+{
+	unsigned int i, found = 0;
+
+	ap->ops->phy_reset(ap);
+	if (ap->flags & ATA_FLAG_PORT_DISABLED)
+		goto err_out;
+
+	for (i = 0; i < ATA_MAX_DEVICES; i++) {
+		ata_dev_identify(ap, i);
+		if (ata_dev_present(&ap->device[i])) {
+			found = 1;
+			ata_dev_config(ap,i);
+		}
+	}
+
+	if ((!found) || (ap->flags & ATA_FLAG_PORT_DISABLED))
+		goto err_out_disable;
+
+	ata_set_mode(ap);
+	if (ap->flags & ATA_FLAG_PORT_DISABLED)
+		goto err_out_disable;
+
+	return 0;
+
+err_out_disable:
+	ap->ops->port_disable(ap);
+err_out:
+	return -1;
+}
+
+/**
+ *	ata_port_probe - Mark port as enabled
+ *	@ap: Port for which we indicate enablement
+ *
+ *	Modify @ap data structure such that the system
+ *	thinks that the entire port is enabled.
+ *
+ *	LOCKING: host_set lock, or some other form of
+ *	serialization.
+ */
+
+void ata_port_probe(struct ata_port *ap)
+{
+	ap->flags &= ~ATA_FLAG_PORT_DISABLED;
+}
+
+/**
+ *	__sata_phy_reset - Wake/reset a low-level SATA PHY
+ *	@ap: SATA port associated with target SATA PHY.
+ *
+ *	This function issues commands to standard SATA Sxxx
+ *	PHY registers, to wake up the phy (and device), and
+ *	clear any reset condition.
+ *
+ *	LOCKING:
+ *	PCI/etc. bus probe sem.
+ *
+ */
+void __sata_phy_reset(struct ata_port *ap)
+{
+	u32 sstatus;
+	unsigned long timeout = jiffies + (HZ * 5);
+
+	if (ap->flags & ATA_FLAG_SATA_RESET) {
+		/* issue phy wake/reset */
+		scr_write_flush(ap, SCR_CONTROL, 0x301);
+		/* Couldn't find anything in SATA I/II specs, but
+		 * AHCI-1.1 10.4.2 says at least 1 ms. */
+		mdelay(1);
+	}
+	scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
+
+	/* wait for phy to become ready, if necessary */
+	do {
+		msleep(200);
+		sstatus = scr_read(ap, SCR_STATUS);
+		if ((sstatus & 0xf) != 1)
+			break;
+	} while (time_before(jiffies, timeout));
+
+	/* TODO: phy layer with polling, timeouts, etc. */
+	if (sata_dev_present(ap))
+		ata_port_probe(ap);
+	else {
+		sstatus = scr_read(ap, SCR_STATUS);
+		printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n",
+		       ap->id, sstatus);
+		ata_port_disable(ap);
+	}
+
+	if (ap->flags & ATA_FLAG_PORT_DISABLED)
+		return;
+
+	if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
+		ata_port_disable(ap);
+		return;
+	}
+
+	ap->cbl = ATA_CBL_SATA;
+}
+
+/**
+ *	sata_phy_reset - Reset SATA bus.
+ *	@ap: SATA port associated with target SATA PHY.
+ *
+ *	This function resets the SATA bus, and then probes
+ *	the bus for devices.
+ *
+ *	LOCKING:
+ *	PCI/etc. bus probe sem.
+ *
+ */
+void sata_phy_reset(struct ata_port *ap)
+{
+	__sata_phy_reset(ap);
+	if (ap->flags & ATA_FLAG_PORT_DISABLED)
+		return;
+	ata_bus_reset(ap);
+}
+
+/**
+ *	ata_port_disable - Disable port.
+ *	@ap: Port to be disabled.
+ *
+ *	Modify @ap data structure such that the system
+ *	thinks that the entire port is disabled, and should
+ *	never attempt to probe or communicate with devices
+ *	on this port.
+ *
+ *	LOCKING: host_set lock, or some other form of
+ *	serialization.
+ */
+
+void ata_port_disable(struct ata_port *ap)
+{
+	ap->device[0].class = ATA_DEV_NONE;
+	ap->device[1].class = ATA_DEV_NONE;
+	ap->flags |= ATA_FLAG_PORT_DISABLED;
+}
+
+/*
+ * This mode timing computation functionality is ported over from
+ * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
+ */
+/*
+ * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
+ * These were taken from ATA/ATAPI-6 standard, rev 0a, except
+ * for PIO 5, which is a nonstandard extension and UDMA6, which
+ * is currently supported only by Maxtor drives. 
+ */
+
+static const struct ata_timing ata_timing[] = {
+
+	{ XFER_UDMA_6,     0,   0,   0,   0,   0,   0,   0,  15 },
+	{ XFER_UDMA_5,     0,   0,   0,   0,   0,   0,   0,  20 },
+	{ XFER_UDMA_4,     0,   0,   0,   0,   0,   0,   0,  30 },
+	{ XFER_UDMA_3,     0,   0,   0,   0,   0,   0,   0,  45 },
+
+	{ XFER_UDMA_2,     0,   0,   0,   0,   0,   0,   0,  60 },
+	{ XFER_UDMA_1,     0,   0,   0,   0,   0,   0,   0,  80 },
+	{ XFER_UDMA_0,     0,   0,   0,   0,   0,   0,   0, 120 },
+
+/*	{ XFER_UDMA_SLOW,  0,   0,   0,   0,   0,   0,   0, 150 }, */
+                                          
+	{ XFER_MW_DMA_2,  25,   0,   0,   0,  70,  25, 120,   0 },
+	{ XFER_MW_DMA_1,  45,   0,   0,   0,  80,  50, 150,   0 },
+	{ XFER_MW_DMA_0,  60,   0,   0,   0, 215, 215, 480,   0 },
+                                          
+	{ XFER_SW_DMA_2,  60,   0,   0,   0, 120, 120, 240,   0 },
+	{ XFER_SW_DMA_1,  90,   0,   0,   0, 240, 240, 480,   0 },
+	{ XFER_SW_DMA_0, 120,   0,   0,   0, 480, 480, 960,   0 },
+
+/*	{ XFER_PIO_5,     20,  50,  30, 100,  50,  30, 100,   0 }, */
+	{ XFER_PIO_4,     25,  70,  25, 120,  70,  25, 120,   0 },
+	{ XFER_PIO_3,     30,  80,  70, 180,  80,  70, 180,   0 },
+
+	{ XFER_PIO_2,     30, 290,  40, 330, 100,  90, 240,   0 },
+	{ XFER_PIO_1,     50, 290,  93, 383, 125, 100, 383,   0 },
+	{ XFER_PIO_0,     70, 290, 240, 600, 165, 150, 600,   0 },
+
+/*	{ XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960,   0 }, */
+
+	{ 0xFF }
+};
+
+#define ENOUGH(v,unit)		(((v)-1)/(unit)+1)
+#define EZ(v,unit)		((v)?ENOUGH(v,unit):0)
+
+static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
+{
+	q->setup   = EZ(t->setup   * 1000,  T);
+	q->act8b   = EZ(t->act8b   * 1000,  T);
+	q->rec8b   = EZ(t->rec8b   * 1000,  T);
+	q->cyc8b   = EZ(t->cyc8b   * 1000,  T);
+	q->active  = EZ(t->active  * 1000,  T);
+	q->recover = EZ(t->recover * 1000,  T);
+	q->cycle   = EZ(t->cycle   * 1000,  T);
+	q->udma    = EZ(t->udma    * 1000, UT);
+}
+
+void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
+		      struct ata_timing *m, unsigned int what)
+{
+	if (what & ATA_TIMING_SETUP  ) m->setup   = max(a->setup,   b->setup);
+	if (what & ATA_TIMING_ACT8B  ) m->act8b   = max(a->act8b,   b->act8b);
+	if (what & ATA_TIMING_REC8B  ) m->rec8b   = max(a->rec8b,   b->rec8b);
+	if (what & ATA_TIMING_CYC8B  ) m->cyc8b   = max(a->cyc8b,   b->cyc8b);
+	if (what & ATA_TIMING_ACTIVE ) m->active  = max(a->active,  b->active);
+	if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
+	if (what & ATA_TIMING_CYCLE  ) m->cycle   = max(a->cycle,   b->cycle);
+	if (what & ATA_TIMING_UDMA   ) m->udma    = max(a->udma,    b->udma);
+}
+
+static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
+{
+	const struct ata_timing *t;
+
+	for (t = ata_timing; t->mode != speed; t++)
+		if (t->mode == 0xFF)
+			return NULL;
+	return t; 
+}
+
+int ata_timing_compute(struct ata_device *adev, unsigned short speed,
+		       struct ata_timing *t, int T, int UT)
+{
+	const struct ata_timing *s;
+	struct ata_timing p;
+
+	/*
+	 * Find the mode. 
+	 */
+
+	if (!(s = ata_timing_find_mode(speed)))
+		return -EINVAL;
+
+	memcpy(t, s, sizeof(*s));
+
+	/*
+	 * If the drive is an EIDE drive, it can tell us it needs extended
+	 * PIO/MW_DMA cycle timing.
+	 */
+
+	if (adev->id[ATA_ID_FIELD_VALID] & 2) {	/* EIDE drive */
+		memset(&p, 0, sizeof(p));
+		if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
+			if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
+					    else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
+		} else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
+			p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
+		}
+		ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
+	}
+
+	/*
+	 * Convert the timing to bus clock counts.
+	 */
+
+	ata_timing_quantize(t, t, T, UT);
+
+	/*
+	 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, S.M.A.R.T
+	 * and some other commands. We have to ensure that the DMA cycle timing is
+	 * slower/equal than the fastest PIO timing.
+	 */
+
+	if (speed > XFER_PIO_4) {
+		ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
+		ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
+	}
+
+	/*
+	 * Lenghten active & recovery time so that cycle time is correct.
+	 */
+
+	if (t->act8b + t->rec8b < t->cyc8b) {
+		t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
+		t->rec8b = t->cyc8b - t->act8b;
+	}
+
+	if (t->active + t->recover < t->cycle) {
+		t->active += (t->cycle - (t->active + t->recover)) / 2;
+		t->recover = t->cycle - t->active;
+	}
+
+	return 0;
+}
+
+static const struct {
+	unsigned int shift;
+	u8 base;
+} xfer_mode_classes[] = {
+	{ ATA_SHIFT_UDMA,	XFER_UDMA_0 },
+	{ ATA_SHIFT_MWDMA,	XFER_MW_DMA_0 },
+	{ ATA_SHIFT_PIO,	XFER_PIO_0 },
+};
+
+static inline u8 base_from_shift(unsigned int shift)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++)
+		if (xfer_mode_classes[i].shift == shift)
+			return xfer_mode_classes[i].base;
+
+	return 0xff;
+}
+
+static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
+{
+	int ofs, idx;
+	u8 base;
+
+	if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
+		return;
+
+	if (dev->xfer_shift == ATA_SHIFT_PIO)
+		dev->flags |= ATA_DFLAG_PIO;
+
+	ata_dev_set_xfermode(ap, dev);
+
+	base = base_from_shift(dev->xfer_shift);
+	ofs = dev->xfer_mode - base;
+	idx = ofs + dev->xfer_shift;
+	WARN_ON(idx >= ARRAY_SIZE(xfer_mode_str));
+
+	DPRINTK("idx=%d xfer_shift=%u, xfer_mode=0x%x, base=0x%x, offset=%d\n",
+		idx, dev->xfer_shift, (int)dev->xfer_mode, (int)base, ofs);
+
+	printk(KERN_INFO "ata%u: dev %u configured for %s\n",
+		ap->id, dev->devno, xfer_mode_str[idx]);
+}
+
+static int ata_host_set_pio(struct ata_port *ap)
+{
+	unsigned int mask;
+	int x, i;
+	u8 base, xfer_mode;
+
+	mask = ata_get_mode_mask(ap, ATA_SHIFT_PIO);
+	x = fgb(mask);
+	if (x < 0) {
+		printk(KERN_WARNING "ata%u: no PIO support\n", ap->id);
+		return -1;
+	}
+
+	base = base_from_shift(ATA_SHIFT_PIO);
+	xfer_mode = base + x;
+
+	DPRINTK("base 0x%x xfer_mode 0x%x mask 0x%x x %d\n",
+		(int)base, (int)xfer_mode, mask, x);
+
+	for (i = 0; i < ATA_MAX_DEVICES; i++) {
+		struct ata_device *dev = &ap->device[i];
+		if (ata_dev_present(dev)) {
+			dev->pio_mode = xfer_mode;
+			dev->xfer_mode = xfer_mode;
+			dev->xfer_shift = ATA_SHIFT_PIO;
+			if (ap->ops->set_piomode)
+				ap->ops->set_piomode(ap, dev);
+		}
+	}
+
+	return 0;
+}
+
+static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode,
+			    unsigned int xfer_shift)
+{
+	int i;
+
+	for (i = 0; i < ATA_MAX_DEVICES; i++) {
+		struct ata_device *dev = &ap->device[i];
+		if (ata_dev_present(dev)) {
+			dev->dma_mode = xfer_mode;
+			dev->xfer_mode = xfer_mode;
+			dev->xfer_shift = xfer_shift;
+			if (ap->ops->set_dmamode)
+				ap->ops->set_dmamode(ap, dev);
+		}
+	}
+}
+
+/**
+ *	ata_set_mode - Program timings and issue SET FEATURES - XFER
+ *	@ap: port on which timings will be programmed
+ *
+ *	Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
+ *
+ *	LOCKING:
+ *	PCI/etc. bus probe sem.
+ *
+ */
+static void ata_set_mode(struct ata_port *ap)
+{
+	unsigned int xfer_shift;
+	u8 xfer_mode;
+	int rc;
+
+	/* step 1: always set host PIO timings */
+	rc = ata_host_set_pio(ap);
+	if (rc)
+		goto err_out;
+
+	/* step 2: choose the best data xfer mode */
+	xfer_mode = xfer_shift = 0;
+	rc = ata_choose_xfer_mode(ap, &xfer_mode, &xfer_shift);
+	if (rc)
+		goto err_out;
+
+	/* step 3: if that xfer mode isn't PIO, set host DMA timings */
+	if (xfer_shift != ATA_SHIFT_PIO)
+		ata_host_set_dma(ap, xfer_mode, xfer_shift);
+
+	/* step 4: update devices' xfer mode */
+	ata_dev_set_mode(ap, &ap->device[0]);
+	ata_dev_set_mode(ap, &ap->device[1]);
+
+	if (ap->flags & ATA_FLAG_PORT_DISABLED)
+		return;
+
+	if (ap->ops->post_set_mode)
+		ap->ops->post_set_mode(ap);
+
+	return;
+
+err_out:
+	ata_port_disable(ap);
+}
+
+/**
+ *	ata_busy_sleep - sleep until BSY clears, or timeout
+ *	@ap: port containing status register to be polled
+ *	@tmout_pat: impatience timeout
+ *	@tmout: overall timeout
+ *
+ *	Sleep until ATA Status register bit BSY clears,
+ *	or a timeout occurs.
+ *
+ *	LOCKING: None.
+ *
+ */
+
+static unsigned int ata_busy_sleep (struct ata_port *ap,
+				    unsigned long tmout_pat,
+			    	    unsigned long tmout)
+{
+	unsigned long timer_start, timeout;
+	u8 status;
+
+	status = ata_busy_wait(ap, ATA_BUSY, 300);
+	timer_start = jiffies;
+	timeout = timer_start + tmout_pat;
+	while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
+		msleep(50);
+		status = ata_busy_wait(ap, ATA_BUSY, 3);
+	}
+
+	if (status & ATA_BUSY)
+		printk(KERN_WARNING "ata%u is slow to respond, "
+		       "please be patient\n", ap->id);
+
+	timeout = timer_start + tmout;
+	while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
+		msleep(50);
+		status = ata_chk_status(ap);
+	}
+
+	if (status & ATA_BUSY) {
+		printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
+		       ap->id, tmout / HZ);
+		return 1;
+	}
+
+	return 0;
+}
+
+static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+	unsigned int dev0 = devmask & (1 << 0);
+	unsigned int dev1 = devmask & (1 << 1);
+	unsigned long timeout;
+
+	/* if device 0 was found in ata_devchk, wait for its
+	 * BSY bit to clear
+	 */
+	if (dev0)
+		ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
+
+	/* if device 1 was found in ata_devchk, wait for
+	 * register access, then wait for BSY to clear
+	 */
+	timeout = jiffies + ATA_TMOUT_BOOT;
+	while (dev1) {
+		u8 nsect, lbal;
+
+		ap->ops->dev_select(ap, 1);
+		if (ap->flags & ATA_FLAG_MMIO) {
+			nsect = readb((void __iomem *) ioaddr->nsect_addr);
+			lbal = readb((void __iomem *) ioaddr->lbal_addr);
+		} else {
+			nsect = inb(ioaddr->nsect_addr);
+			lbal = inb(ioaddr->lbal_addr);
+		}
+		if ((nsect == 1) && (lbal == 1))
+			break;
+		if (time_after(jiffies, timeout)) {
+			dev1 = 0;
+			break;
+		}
+		msleep(50);	/* give drive a breather */
+	}
+	if (dev1)
+		ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
+
+	/* is all this really necessary? */
+	ap->ops->dev_select(ap, 0);
+	if (dev1)
+		ap->ops->dev_select(ap, 1);
+	if (dev0)
+		ap->ops->dev_select(ap, 0);
+}
+
+/**
+ *	ata_bus_edd - Issue EXECUTE DEVICE DIAGNOSTIC command.
+ *	@ap: Port to reset and probe
+ *
+ *	Use the EXECUTE DEVICE DIAGNOSTIC command to reset and
+ *	probe the bus.  Not often used these days.
+ *
+ *	LOCKING:
+ *	PCI/etc. bus probe sem.
+ *	Obtains host_set lock.
+ *
+ */
+
+static unsigned int ata_bus_edd(struct ata_port *ap)
+{
+	struct ata_taskfile tf;
+	unsigned long flags;
+
+	/* set up execute-device-diag (bus reset) taskfile */
+	/* also, take interrupts to a known state (disabled) */
+	DPRINTK("execute-device-diag\n");
+	ata_tf_init(ap, &tf, 0);
+	tf.ctl |= ATA_NIEN;
+	tf.command = ATA_CMD_EDD;
+	tf.protocol = ATA_PROT_NODATA;
+
+	/* do bus reset */
+	spin_lock_irqsave(&ap->host_set->lock, flags);
+	ata_tf_to_host(ap, &tf);
+	spin_unlock_irqrestore(&ap->host_set->lock, flags);
+
+	/* spec says at least 2ms.  but who knows with those
+	 * crazy ATAPI devices...
+	 */
+	msleep(150);
+
+	return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
+}
+
+static unsigned int ata_bus_softreset(struct ata_port *ap,
+				      unsigned int devmask)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+
+	DPRINTK("ata%u: bus reset via SRST\n", ap->id);
+
+	/* software reset.  causes dev0 to be selected */
+	if (ap->flags & ATA_FLAG_MMIO) {
+		writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
+		udelay(20);	/* FIXME: flush */
+		writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
+		udelay(20);	/* FIXME: flush */
+		writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
+	} else {
+		outb(ap->ctl, ioaddr->ctl_addr);
+		udelay(10);
+		outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
+		udelay(10);
+		outb(ap->ctl, ioaddr->ctl_addr);
+	}
+
+	/* spec mandates ">= 2ms" before checking status.
+	 * We wait 150ms, because that was the magic delay used for
+	 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
+	 * between when the ATA command register is written, and then
+	 * status is checked.  Because waiting for "a while" before
+	 * checking status is fine, post SRST, we perform this magic
+	 * delay here as well.
+	 */
+	msleep(150);
+
+	ata_bus_post_reset(ap, devmask);
+
+	return 0;
+}
+
+/**
+ *	ata_bus_reset - reset host port and associated ATA channel
+ *	@ap: port to reset
+ *
+ *	This is typically the first time we actually start issuing
+ *	commands to the ATA channel.  We wait for BSY to clear, then
+ *	issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
+ *	result.  Determine what devices, if any, are on the channel
+ *	by looking at the device 0/1 error register.  Look at the signature
+ *	stored in each device's taskfile registers, to determine if
+ *	the device is ATA or ATAPI.
+ *
+ *	LOCKING:
+ *	PCI/etc. bus probe sem.
+ *	Obtains host_set lock.
+ *
+ *	SIDE EFFECTS:
+ *	Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
+ */
+
+void ata_bus_reset(struct ata_port *ap)
+{
+	struct ata_ioports *ioaddr = &ap->ioaddr;
+	unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
+	u8 err;
+	unsigned int dev0, dev1 = 0, rc = 0, devmask = 0;
+
+	DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
+
+	/* determine if device 0/1 are present */
+	if (ap->flags & ATA_FLAG_SATA_RESET)
+		dev0 = 1;
+	else {
+		dev0 = ata_devchk(ap, 0);
+		if (slave_possible)
+			dev1 = ata_devchk(ap, 1);
+	}
+
+	if (dev0)
+		devmask |= (1 << 0);
+	if (dev1)
+		devmask |= (1 << 1);
+
+	/* select device 0 again */
+	ap->ops->dev_select(ap, 0);
+
+	/* issue bus reset */
+	if (ap->flags & ATA_FLAG_SRST)
+		rc = ata_bus_softreset(ap, devmask);
+	else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) {
+		/* set up device control */
+		if (ap->flags & ATA_FLAG_MMIO)
+			writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
+		else
+			outb(ap->ctl, ioaddr->ctl_addr);
+		rc = ata_bus_edd(ap);
+	}
+
+	if (rc)
+		goto err_out;
+
+	/*
+	 * determine by signature whether we have ATA or ATAPI devices
+	 */
+	err = ata_dev_try_classify(ap, 0);
+	if ((slave_possible) && (err != 0x81))
+		ata_dev_try_classify(ap, 1);
+
+	/* re-enable interrupts */
+	if (ap->ioaddr.ctl_addr)	/* FIXME: hack. create a hook instead */
+		ata_irq_on(ap);
+
+	/* is double-select really necessary? */
+	if (ap->device[1].class != ATA_DEV_NONE)
+		ap->ops->dev_select(ap, 1);
+	if (ap->device[0].class != ATA_DEV_NONE)
+		ap->ops->dev_select(ap, 0);
+
+	/* if no devices were detected, disable this port */
+	if ((ap->device[0].class == ATA_DEV_NONE) &&
+	    (ap->device[1].class == ATA_DEV_NONE))
+		goto err_out;
+
+	if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
+		/* set up device control for ATA_FLAG_SATA_RESET */
+		if (ap->flags & ATA_FLAG_MMIO)
+			writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
+		else
+			outb(ap->ctl, ioaddr->ctl_addr);
+	}
+
+	DPRINTK("EXIT\n");
+	return;
+
+err_out:
+	printk(KERN_ERR "ata%u: disabling port\n", ap->id);
+	ap->ops->port_disable(ap);
+
+	DPRINTK("EXIT\n");
+}
+
+static void ata_pr_blacklisted(const struct ata_port *ap,
+			       const struct ata_device *dev)
+{
+	printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, disabling DMA\n",
+		ap->id, dev->devno);
+}
+
+static const char * ata_dma_blacklist [] = {
+	"WDC AC11000H",
+	"WDC AC22100H",
+	"WDC AC32500H",
+	"WDC AC33100H",
+	"WDC AC31600H",
+	"WDC AC32100H",
+	"WDC AC23200L",
+	"Compaq CRD-8241B",
+	"CRD-8400B",
+	"CRD-8480B",
+	"CRD-8482B",
+ 	"CRD-84",
+	"SanDisk SDP3B",
+	"SanDisk SDP3B-64",
+	"SANYO CD-ROM CRD",
+	"HITACHI CDR-8",
+	"HITACHI CDR-8335",
+	"HITACHI CDR-8435",
+	"Toshiba CD-ROM XM-6202B",
+	"TOSHIBA CD-ROM XM-1702BC",
+	"CD-532E-A",
+	"E-IDE CD-ROM CR-840",
+	"CD-ROM Drive/F5A",
+	"WPI CDD-820",
+	"SAMSUNG CD-ROM SC-148C",
+	"SAMSUNG CD-ROM SC",
+	"SanDisk SDP3B-64",
+	"ATAPI CD-ROM DRIVE 40X MAXIMUM",
+	"_NEC DV5800A",
+};
+
+static int ata_dma_blacklisted(const struct ata_device *dev)
+{
+	unsigned char model_num[40];
+	char *s;
+	unsigned int len;
+	int i;
+
+	ata_dev_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
+			  sizeof(model_num));
+	s = &model_num[0];
+	len = strnlen(s, sizeof(model_num));
+
+	/* ATAPI specifies that empty space is blank-filled; remove blanks */
+	while ((len > 0) && (s[len - 1] == ' ')) {
+		len--;
+		s[len] = 0;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++)
+		if (!strncmp(ata_dma_blacklist[i], s, len))
+			return 1;
+
+	return 0;
+}
+
+static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift)
+{
+	const struct ata_device *master, *slave;
+	unsigned int mask;
+
+	master = &ap->device[0];
+	slave = &ap->device[1];
+
+	assert (ata_dev_present(master) || ata_dev_present(slave));
+
+	if (shift == ATA_SHIFT_UDMA) {
+		mask = ap->udma_mask;
+		if (ata_dev_present(master)) {
+			mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff);
+			if (ata_dma_blacklisted(master)) {
+				mask = 0;
+				ata_pr_blacklisted(ap, master);
+			}
+		}
+		if (ata_dev_present(slave)) {
+			mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff);
+			if (ata_dma_blacklisted(slave)) {
+				mask = 0;
+				ata_pr_blacklisted(ap, slave);
+			}
+		}
+	}
+	else if (shift == ATA_SHIFT_MWDMA) {
+		mask = ap->mwdma_mask;
+		if (ata_dev_present(master)) {
+			mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07);
+			if (ata_dma_blacklisted(master)) {
+				mask = 0;
+				ata_pr_blacklisted(ap, master);
+			}
+		}
+		if (ata_dev_present(slave)) {
+			mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07);
+			if (ata_dma_blacklisted(slave)) {
+				mask = 0;
+				ata_pr_blacklisted(ap, slave);
+			}
+		}
+	}
+	else if (shift == ATA_SHIFT_PIO) {
+		mask = ap->pio_mask;
+		if (ata_dev_present(master)) {
+			/* spec doesn't return explicit support for
+			 * PIO0-2, so we fake it
+			 */
+			u16 tmp_mode = master->id[ATA_ID_PIO_MODES] & 0x03;
+			tmp_mode <<= 3;
+			tmp_mode |= 0x7;
+			mask &= tmp_mode;
+		}
+		if (ata_dev_present(slave)) {
+			/* spec doesn't return explicit support for
+			 * PIO0-2, so we fake it
+			 */
+			u16 tmp_mode = slave->id[ATA_ID_PIO_MODES] & 0x03;
+			tmp_mode <<= 3;
+			tmp_mode |= 0x7;
+			mask &= tmp_mode;
+		}
+	}
+	else {
+		mask = 0xffffffff; /* shut up compiler warning */
+		BUG();
+	}
+
+	return mask;
+}
+
+/* find greatest bit */
+static int fgb(u32 bitmap)
+{
+	unsigned int i;
+	int x = -1;
+
+	for (i = 0; i < 32; i++)
+		if (bitmap & (1 << i))
+			x = i;
+
+	return x;
+}
+
+/**
+ *	ata_choose_xfer_mode - attempt to find best transfer mode
+ *	@ap: Port for which an xfer mode will be selected
+ *	@xfer_mode_out: (output) SET FEATURES - XFER MODE code
+ *	@xfer_shift_out: (output) bit shift that selects this mode
+ *
+ *	Based on host and device capabilities, determine the
+ *	maximum transfer mode that is amenable to all.
+ *
+ *	LOCKING:
+ *	PCI/etc. bus probe sem.
+ *
+ *	RETURNS:
+ *	Zero on success, negative on error.
+ */
+
+static int ata_choose_xfer_mode(const struct ata_port *ap,
+				u8 *xfer_mode_out,
+				unsigned int *xfer_shift_out)
+{
+	unsigned int mask, shift;
+	int x, i;
+
+	for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++) {
+		shift = xfer_mode_classes[i].shift;
+		mask = ata_get_mode_mask(ap, shift);
+
+		x = fgb(mask);
+		if (x >= 0) {
+			*xfer_mode_out = xfer_mode_classes[i].base + x;
+			*xfer_shift_out = shift;
+			return 0;
+		}
+	}
+
+	return -1;
+}
+
+/**
+ *	ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
+ *	@ap: Port associated with device @dev
+ *	@dev: Device to which command will be sent
+ *
+ *	Issue SET FEATURES - XFER MODE command to device @dev
+ *	on port @ap.
+ *
+ *	LOCKING:
+ *	PCI/etc. bus probe sem.
+ */
+
+static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
+{
+	DECLARE_COMPLETION(wait);
+	struct ata_queued_cmd *qc;
+	int rc;
+	unsigned long flags;
+
+	/* set up set-features taskfile */
+	DPRINTK("set features - xfer mode\n");
+
+	qc = ata_qc_new_init(ap, dev);
+	BUG_ON(qc == NULL);
+
+	qc->tf.command = ATA_CMD_SET_FEATURES;
+	qc->tf.feature = SETFEATURES_XFER;
+	qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+	qc->tf.protocol = ATA_PROT_NODATA;
+	qc->tf.nsect = dev->xfer_mode;
+
+	qc->waiting = &wait;
+	qc->complete_fn = ata_qc_complete_noop;
+
+	spin_lock_irqsave(&ap->host_set->lock, flags);
+	rc = ata_qc_issue(qc);
+	spin_unlock_irqrestore(&ap->host_set->lock, flags);
+
+	if (rc)
+		ata_port_disable(ap);
+	else
+		ata_qc_wait_err(qc, &wait);
+
+	DPRINTK("EXIT\n");
+}
+
+/**
+ *	ata_dev_reread_id - Reread the device identify device info
+ *	@ap: port where the device is
+ *	@dev: device to reread the identify device info
+ *
+ *	LOCKING:
+ */
+
+static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev)
+{
+	DECLARE_COMPLETION(wait);
+	struct ata_queued_cmd *qc;
+	unsigned long flags;
+	int rc;
+
+	qc = ata_qc_new_init(ap, dev);
+	BUG_ON(qc == NULL);
+
+	ata_sg_init_one(qc, dev->id, sizeof(dev->id));
+	qc->dma_dir = DMA_FROM_DEVICE;
+
+	if (dev->class == ATA_DEV_ATA) {
+		qc->tf.command = ATA_CMD_ID_ATA;
+		DPRINTK("do ATA identify\n");
+	} else {
+		qc->tf.command = ATA_CMD_ID_ATAPI;
+		DPRINTK("do ATAPI identify\n");
+	}
+
+	qc->tf.flags |= ATA_TFLAG_DEVICE;
+	qc->tf.protocol = ATA_PROT_PIO;
+	qc->nsect = 1;
+
+	qc->waiting = &wait;
+	qc->complete_fn = ata_qc_complete_noop;
+
+	spin_lock_irqsave(&ap->host_set->lock, flags);
+	rc = ata_qc_issue(qc);
+	spin_unlock_irqrestore(&ap->host_set->lock, flags);
+
+	if (rc)
+		goto err_out;
+
+	ata_qc_wait_err(qc, &wait);
+
+	swap_buf_le16(dev->id, ATA_ID_WORDS);
+
+	ata_dump_id(dev);
+
+	DPRINTK("EXIT\n");
+
+	return;
+err_out:
+	ata_port_disable(ap);
+}
+
+/**
+ *	ata_dev_init_params - Issue INIT DEV PARAMS command
+ *	@ap: Port associated with device @dev
+ *	@dev: Device to which command will be sent
+ *
+ *	LOCKING:
+ */
+
+static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev)
+{
+	DECLARE_COMPLETION(wait);
+	struct ata_queued_cmd *qc;
+	int rc;
+	unsigned long flags;
+	u16 sectors = dev->id[6];
+	u16 heads   = dev->id[3];
+
+	/* Number of sectors per track 1-255. Number of heads 1-16 */
+	if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
+		return;
+
+	/* set up init dev params taskfile */
+	DPRINTK("init dev params \n");
+
+	qc = ata_qc_new_init(ap, dev);
+	BUG_ON(qc == NULL);
+
+	qc->tf.command = ATA_CMD_INIT_DEV_PARAMS;
+	qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+	qc->tf.protocol = ATA_PROT_NODATA;
+	qc->tf.nsect = sectors;
+	qc->tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
+
+	qc->waiting = &wait;
+	qc->complete_fn = ata_qc_complete_noop;
+
+	spin_lock_irqsave(&ap->host_set->lock, flags);
+	rc = ata_qc_issue(qc);
+	spin_unlock_irqrestore(&ap->host_set->lock, flags);
+
+	if (rc)
+		ata_port_disable(ap);
+	else
+		ata_qc_wait_err(qc, &wait);
+
+	DPRINTK("EXIT\n");
+}
+
+/**
+ *	ata_sg_clean - Unmap DMA memory associated with command
+ *	@qc: Command containing DMA memory to be released
+ *
+ *	Unmap all mapped DMA memory associated with this command.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+
+static void ata_sg_clean(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct scatterlist *sg = qc->__sg;
+	int dir = qc->dma_dir;
+	void *pad_buf = NULL;
+
+	assert(qc->flags & ATA_QCFLAG_DMAMAP);
+	assert(sg != NULL);
+
+	if (qc->flags & ATA_QCFLAG_SINGLE)
+		assert(qc->n_elem == 1);
+
+	VPRINTK("unmapping %u sg elements\n", qc->n_elem);
+
+	/* if we padded the buffer out to 32-bit bound, and data
+	 * xfer direction is from-device, we must copy from the
+	 * pad buffer back into the supplied buffer
+	 */
+	if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
+		pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
+
+	if (qc->flags & ATA_QCFLAG_SG) {
+		if (qc->n_elem)
+			dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
+		/* restore last sg */
+		sg[qc->orig_n_elem - 1].length += qc->pad_len;
+		if (pad_buf) {
+			struct scatterlist *psg = &qc->pad_sgent;
+			void *addr = kmap_atomic(psg->page, KM_IRQ0);
+			memcpy(addr + psg->offset, pad_buf, qc->pad_len);
+			kunmap_atomic(addr, KM_IRQ0);
+		}
+	} else {
+		if (sg_dma_len(&sg[0]) > 0)
+			dma_unmap_single(ap->host_set->dev,
+				sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
+				dir);
+		/* restore sg */
+		sg->length += qc->pad_len;
+		if (pad_buf)
+			memcpy(qc->buf_virt + sg->length - qc->pad_len,
+			       pad_buf, qc->pad_len);
+	}
+
+	qc->flags &= ~ATA_QCFLAG_DMAMAP;
+	qc->__sg = NULL;
+}
+
+/**
+ *	ata_fill_sg - Fill PCI IDE PRD table
+ *	@qc: Metadata associated with taskfile to be transferred
+ *
+ *	Fill PCI IDE PRD (scatter-gather) table with segments
+ *	associated with the current disk command.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ *
+ */
+static void ata_fill_sg(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct scatterlist *sg;
+	unsigned int idx;
+
+	assert(qc->__sg != NULL);
+	assert(qc->n_elem > 0);
+
+	idx = 0;
+	ata_for_each_sg(sg, qc) {
+		u32 addr, offset;
+		u32 sg_len, len;
+
+		/* determine if physical DMA addr spans 64K boundary.
+		 * Note h/w doesn't support 64-bit, so we unconditionally
+		 * truncate dma_addr_t to u32.
+		 */
+		addr = (u32) sg_dma_address(sg);
+		sg_len = sg_dma_len(sg);
+
+		while (sg_len) {
+			offset = addr & 0xffff;
+			len = sg_len;
+			if ((offset + sg_len) > 0x10000)
+				len = 0x10000 - offset;
+
+			ap->prd[idx].addr = cpu_to_le32(addr);
+			ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
+			VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
+
+			idx++;
+			sg_len -= len;
+			addr += len;
+		}
+	}
+
+	if (idx)
+		ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
+}
+/**
+ *	ata_check_atapi_dma - Check whether ATAPI DMA can be supported
+ *	@qc: Metadata associated with taskfile to check
+ *
+ *	Allow low-level driver to filter ATA PACKET commands, returning
+ *	a status indicating whether or not it is OK to use DMA for the
+ *	supplied PACKET command.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ *
+ *	RETURNS: 0 when ATAPI DMA can be used
+ *               nonzero otherwise
+ */
+int ata_check_atapi_dma(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	int rc = 0; /* Assume ATAPI DMA is OK by default */
+
+	if (ap->ops->check_atapi_dma)
+		rc = ap->ops->check_atapi_dma(qc);
+
+	return rc;
+}
+/**
+ *	ata_qc_prep - Prepare taskfile for submission
+ *	@qc: Metadata associated with taskfile to be prepared
+ *
+ *	Prepare ATA taskfile for submission.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+void ata_qc_prep(struct ata_queued_cmd *qc)
+{
+	if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+		return;
+
+	ata_fill_sg(qc);
+}
+
+/**
+ *	ata_sg_init_one - Associate command with memory buffer
+ *	@qc: Command to be associated
+ *	@buf: Memory buffer
+ *	@buflen: Length of memory buffer, in bytes.
+ *
+ *	Initialize the data-related elements of queued_cmd @qc
+ *	to point to a single memory buffer, @buf of byte length @buflen.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+
+void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
+{
+	struct scatterlist *sg;
+
+	qc->flags |= ATA_QCFLAG_SINGLE;
+
+	memset(&qc->sgent, 0, sizeof(qc->sgent));
+	qc->__sg = &qc->sgent;
+	qc->n_elem = 1;
+	qc->orig_n_elem = 1;
+	qc->buf_virt = buf;
+
+	sg = qc->__sg;
+	sg_init_one(sg, buf, buflen);
+}
+
+/**
+ *	ata_sg_init - Associate command with scatter-gather table.
+ *	@qc: Command to be associated
+ *	@sg: Scatter-gather table.
+ *	@n_elem: Number of elements in s/g table.
+ *
+ *	Initialize the data-related elements of queued_cmd @qc
+ *	to point to a scatter-gather table @sg, containing @n_elem
+ *	elements.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+
+void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
+		 unsigned int n_elem)
+{
+	qc->flags |= ATA_QCFLAG_SG;
+	qc->__sg = sg;
+	qc->n_elem = n_elem;
+	qc->orig_n_elem = n_elem;
+}
+
+/**
+ *	ata_sg_setup_one - DMA-map the memory buffer associated with a command.
+ *	@qc: Command with memory buffer to be mapped.
+ *
+ *	DMA-map the memory buffer associated with queued_cmd @qc.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ *
+ *	RETURNS:
+ *	Zero on success, negative on error.
+ */
+
+static int ata_sg_setup_one(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	int dir = qc->dma_dir;
+	struct scatterlist *sg = qc->__sg;
+	dma_addr_t dma_address;
+
+	/* we must lengthen transfers to end on a 32-bit boundary */
+	qc->pad_len = sg->length & 3;
+	if (qc->pad_len) {
+		void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
+		struct scatterlist *psg = &qc->pad_sgent;
+
+		assert(qc->dev->class == ATA_DEV_ATAPI);
+
+		memset(pad_buf, 0, ATA_DMA_PAD_SZ);
+
+		if (qc->tf.flags & ATA_TFLAG_WRITE)
+			memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
+			       qc->pad_len);
+
+		sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
+		sg_dma_len(psg) = ATA_DMA_PAD_SZ;
+		/* trim sg */
+		sg->length -= qc->pad_len;
+
+		DPRINTK("padding done, sg->length=%u pad_len=%u\n",
+			sg->length, qc->pad_len);
+	}
+
+	if (!sg->length) {
+		sg_dma_address(sg) = 0;
+		goto skip_map;
+	}
+
+	dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
+				     sg->length, dir);
+	if (dma_mapping_error(dma_address)) {
+		/* restore sg */
+		sg->length += qc->pad_len;
+		return -1;
+	}
+
+	sg_dma_address(sg) = dma_address;
+skip_map:
+	sg_dma_len(sg) = sg->length;
+
+	DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
+		qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
+
+	return 0;
+}
+
+/**
+ *	ata_sg_setup - DMA-map the scatter-gather table associated with a command.
+ *	@qc: Command with scatter-gather table to be mapped.
+ *
+ *	DMA-map the scatter-gather table associated with queued_cmd @qc.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ *
+ *	RETURNS:
+ *	Zero on success, negative on error.
+ *
+ */
+
+static int ata_sg_setup(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct scatterlist *sg = qc->__sg;
+	struct scatterlist *lsg = &sg[qc->n_elem - 1];
+	int n_elem, pre_n_elem, dir, trim_sg = 0;
+
+	VPRINTK("ENTER, ata%u\n", ap->id);
+	assert(qc->flags & ATA_QCFLAG_SG);
+
+	/* we must lengthen transfers to end on a 32-bit boundary */
+	qc->pad_len = lsg->length & 3;
+	if (qc->pad_len) {
+		void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
+		struct scatterlist *psg = &qc->pad_sgent;
+		unsigned int offset;
+
+		assert(qc->dev->class == ATA_DEV_ATAPI);
+
+		memset(pad_buf, 0, ATA_DMA_PAD_SZ);
+
+		/*
+		 * psg->page/offset are used to copy to-be-written
+		 * data in this function or read data in ata_sg_clean.
+		 */
+		offset = lsg->offset + lsg->length - qc->pad_len;
+		psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
+		psg->offset = offset_in_page(offset);
+
+		if (qc->tf.flags & ATA_TFLAG_WRITE) {
+			void *addr = kmap_atomic(psg->page, KM_IRQ0);
+			memcpy(pad_buf, addr + psg->offset, qc->pad_len);
+			kunmap_atomic(addr, KM_IRQ0);
+		}
+
+		sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
+		sg_dma_len(psg) = ATA_DMA_PAD_SZ;
+		/* trim last sg */
+		lsg->length -= qc->pad_len;
+		if (lsg->length == 0)
+			trim_sg = 1;
+
+		DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
+			qc->n_elem - 1, lsg->length, qc->pad_len);
+	}
+
+	pre_n_elem = qc->n_elem;
+	if (trim_sg && pre_n_elem)
+		pre_n_elem--;
+
+	if (!pre_n_elem) {
+		n_elem = 0;
+		goto skip_map;
+	}
+
+	dir = qc->dma_dir;
+	n_elem = dma_map_sg(ap->host_set->dev, sg, pre_n_elem, dir);
+	if (n_elem < 1) {
+		/* restore last sg */
+		lsg->length += qc->pad_len;
+		return -1;
+	}
+
+	DPRINTK("%d sg elements mapped\n", n_elem);
+
+skip_map:
+	qc->n_elem = n_elem;
+
+	return 0;
+}
+
+/**
+ *	ata_poll_qc_complete - turn irq back on and finish qc
+ *	@qc: Command to complete
+ *	@err_mask: ATA status register content
+ *
+ *	LOCKING:
+ *	None.  (grabs host lock)
+ */
+
+void ata_poll_qc_complete(struct ata_queued_cmd *qc, unsigned int err_mask)
+{
+	struct ata_port *ap = qc->ap;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ap->host_set->lock, flags);
+	ap->flags &= ~ATA_FLAG_NOINTR;
+	ata_irq_on(ap);
+	ata_qc_complete(qc, err_mask);
+	spin_unlock_irqrestore(&ap->host_set->lock, flags);
+}
+
+/**
+ *	ata_pio_poll -
+ *	@ap: the target ata_port
+ *
+ *	LOCKING:
+ *	None.  (executing in kernel thread context)
+ *
+ *	RETURNS:
+ *	timeout value to use
+ */
+
+static unsigned long ata_pio_poll(struct ata_port *ap)
+{
+	u8 status;
+	unsigned int poll_state = HSM_ST_UNKNOWN;
+	unsigned int reg_state = HSM_ST_UNKNOWN;
+
+	switch (ap->hsm_task_state) {
+	case HSM_ST:
+	case HSM_ST_POLL:
+		poll_state = HSM_ST_POLL;
+		reg_state = HSM_ST;
+		break;
+	case HSM_ST_LAST:
+	case HSM_ST_LAST_POLL:
+		poll_state = HSM_ST_LAST_POLL;
+		reg_state = HSM_ST_LAST;
+		break;
+	default:
+		BUG();
+		break;
+	}
+
+	status = ata_chk_status(ap);
+	if (status & ATA_BUSY) {
+		if (time_after(jiffies, ap->pio_task_timeout)) {
+			ap->hsm_task_state = HSM_ST_TMOUT;
+			return 0;
+		}
+		ap->hsm_task_state = poll_state;
+		return ATA_SHORT_PAUSE;
+	}
+
+	ap->hsm_task_state = reg_state;
+	return 0;
+}
+
+/**
+ *	ata_pio_complete - check if drive is busy or idle
+ *	@ap: the target ata_port
+ *
+ *	LOCKING:
+ *	None.  (executing in kernel thread context)
+ *
+ *	RETURNS:
+ *	Non-zero if qc completed, zero otherwise.
+ */
+
+static int ata_pio_complete (struct ata_port *ap)
+{
+	struct ata_queued_cmd *qc;
+	u8 drv_stat;
+
+	/*
+	 * This is purely heuristic.  This is a fast path.  Sometimes when
+	 * we enter, BSY will be cleared in a chk-status or two.  If not,
+	 * the drive is probably seeking or something.  Snooze for a couple
+	 * msecs, then chk-status again.  If still busy, fall back to
+	 * HSM_ST_POLL state.
+	 */
+	drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
+	if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
+		msleep(2);
+		drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
+		if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
+			ap->hsm_task_state = HSM_ST_LAST_POLL;
+			ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
+			return 0;
+		}
+	}
+
+	drv_stat = ata_wait_idle(ap);
+	if (!ata_ok(drv_stat)) {
+		ap->hsm_task_state = HSM_ST_ERR;
+		return 0;
+	}
+
+	qc = ata_qc_from_tag(ap, ap->active_tag);
+	assert(qc != NULL);
+
+	ap->hsm_task_state = HSM_ST_IDLE;
+
+	ata_poll_qc_complete(qc, 0);
+
+	/* another command may start at this point */
+
+	return 1;
+}
+
+
+/**
+ *	swap_buf_le16 - swap halves of 16-words in place
+ *	@buf:  Buffer to swap
+ *	@buf_words:  Number of 16-bit words in buffer.
+ *
+ *	Swap halves of 16-bit words if needed to convert from
+ *	little-endian byte order to native cpu byte order, or
+ *	vice-versa.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+void swap_buf_le16(u16 *buf, unsigned int buf_words)
+{
+#ifdef __BIG_ENDIAN
+	unsigned int i;
+
+	for (i = 0; i < buf_words; i++)
+		buf[i] = le16_to_cpu(buf[i]);
+#endif /* __BIG_ENDIAN */
+}
+
+/**
+ *	ata_mmio_data_xfer - Transfer data by MMIO
+ *	@ap: port to read/write
+ *	@buf: data buffer
+ *	@buflen: buffer length
+ *	@write_data: read/write
+ *
+ *	Transfer data from/to the device data register by MMIO.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+
+static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
+			       unsigned int buflen, int write_data)
+{
+	unsigned int i;
+	unsigned int words = buflen >> 1;
+	u16 *buf16 = (u16 *) buf;
+	void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
+
+	/* Transfer multiple of 2 bytes */
+	if (write_data) {
+		for (i = 0; i < words; i++)
+			writew(le16_to_cpu(buf16[i]), mmio);
+	} else {
+		for (i = 0; i < words; i++)
+			buf16[i] = cpu_to_le16(readw(mmio));
+	}
+
+	/* Transfer trailing 1 byte, if any. */
+	if (unlikely(buflen & 0x01)) {
+		u16 align_buf[1] = { 0 };
+		unsigned char *trailing_buf = buf + buflen - 1;
+
+		if (write_data) {
+			memcpy(align_buf, trailing_buf, 1);
+			writew(le16_to_cpu(align_buf[0]), mmio);
+		} else {
+			align_buf[0] = cpu_to_le16(readw(mmio));
+			memcpy(trailing_buf, align_buf, 1);
+		}
+	}
+}
+
+/**
+ *	ata_pio_data_xfer - Transfer data by PIO
+ *	@ap: port to read/write
+ *	@buf: data buffer
+ *	@buflen: buffer length
+ *	@write_data: read/write
+ *
+ *	Transfer data from/to the device data register by PIO.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+
+static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
+			      unsigned int buflen, int write_data)
+{
+	unsigned int words = buflen >> 1;
+
+	/* Transfer multiple of 2 bytes */
+	if (write_data)
+		outsw(ap->ioaddr.data_addr, buf, words);
+	else
+		insw(ap->ioaddr.data_addr, buf, words);
+
+	/* Transfer trailing 1 byte, if any. */
+	if (unlikely(buflen & 0x01)) {
+		u16 align_buf[1] = { 0 };
+		unsigned char *trailing_buf = buf + buflen - 1;
+
+		if (write_data) {
+			memcpy(align_buf, trailing_buf, 1);
+			outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
+		} else {
+			align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
+			memcpy(trailing_buf, align_buf, 1);
+		}
+	}
+}
+
+/**
+ *	ata_data_xfer - Transfer data from/to the data register.
+ *	@ap: port to read/write
+ *	@buf: data buffer
+ *	@buflen: buffer length
+ *	@do_write: read/write
+ *
+ *	Transfer data from/to the device data register.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+
+static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
+			  unsigned int buflen, int do_write)
+{
+	if (ap->flags & ATA_FLAG_MMIO)
+		ata_mmio_data_xfer(ap, buf, buflen, do_write);
+	else
+		ata_pio_data_xfer(ap, buf, buflen, do_write);
+}
+
+/**
+ *	ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
+ *	@qc: Command on going
+ *
+ *	Transfer ATA_SECT_SIZE of data from/to the ATA device.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+
+static void ata_pio_sector(struct ata_queued_cmd *qc)
+{
+	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
+	struct scatterlist *sg = qc->__sg;
+	struct ata_port *ap = qc->ap;
+	struct page *page;
+	unsigned int offset;
+	unsigned char *buf;
+
+	if (qc->cursect == (qc->nsect - 1))
+		ap->hsm_task_state = HSM_ST_LAST;
+
+	page = sg[qc->cursg].page;
+	offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
+
+	/* get the current page and offset */
+	page = nth_page(page, (offset >> PAGE_SHIFT));
+	offset %= PAGE_SIZE;
+
+	buf = kmap(page) + offset;
+
+	qc->cursect++;
+	qc->cursg_ofs++;
+
+	if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
+		qc->cursg++;
+		qc->cursg_ofs = 0;
+	}
+
+	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
+
+	/* do the actual data transfer */
+	do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
+	ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
+
+	kunmap(page);
+}
+
+/**
+ *	__atapi_pio_bytes - Transfer data from/to the ATAPI device.
+ *	@qc: Command on going
+ *	@bytes: number of bytes
+ *
+ *	Transfer Transfer data from/to the ATAPI device.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ *
+ */
+
+static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
+{
+	int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
+	struct scatterlist *sg = qc->__sg;
+	struct ata_port *ap = qc->ap;
+	struct page *page;
+	unsigned char *buf;
+	unsigned int offset, count;
+
+	if (qc->curbytes + bytes >= qc->nbytes)
+		ap->hsm_task_state = HSM_ST_LAST;
+
+next_sg:
+	if (unlikely(qc->cursg >= qc->n_elem)) {
+		/*
+		 * The end of qc->sg is reached and the device expects
+		 * more data to transfer. In order not to overrun qc->sg
+		 * and fulfill length specified in the byte count register,
+		 *    - for read case, discard trailing data from the device
+		 *    - for write case, padding zero data to the device
+		 */
+		u16 pad_buf[1] = { 0 };
+		unsigned int words = bytes >> 1;
+		unsigned int i;
+
+		if (words) /* warning if bytes > 1 */
+			printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
+			       ap->id, bytes);
+
+		for (i = 0; i < words; i++)
+			ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
+
+		ap->hsm_task_state = HSM_ST_LAST;
+		return;
+	}
+
+	sg = &qc->__sg[qc->cursg];
+
+	page = sg->page;
+	offset = sg->offset + qc->cursg_ofs;
+
+	/* get the current page and offset */
+	page = nth_page(page, (offset >> PAGE_SHIFT));
+	offset %= PAGE_SIZE;
+
+	/* don't overrun current sg */
+	count = min(sg->length - qc->cursg_ofs, bytes);
+
+	/* don't cross page boundaries */
+	count = min(count, (unsigned int)PAGE_SIZE - offset);
+
+	buf = kmap(page) + offset;
+
+	bytes -= count;
+	qc->curbytes += count;
+	qc->cursg_ofs += count;
+
+	if (qc->cursg_ofs == sg->length) {
+		qc->cursg++;
+		qc->cursg_ofs = 0;
+	}
+
+	DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
+
+	/* do the actual data transfer */
+	ata_data_xfer(ap, buf, count, do_write);
+
+	kunmap(page);
+
+	if (bytes)
+		goto next_sg;
+}
+
+/**
+ *	atapi_pio_bytes - Transfer data from/to the ATAPI device.
+ *	@qc: Command on going
+ *
+ *	Transfer Transfer data from/to the ATAPI device.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+
+static void atapi_pio_bytes(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_device *dev = qc->dev;
+	unsigned int ireason, bc_lo, bc_hi, bytes;
+	int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
+
+	ap->ops->tf_read(ap, &qc->tf);
+	ireason = qc->tf.nsect;
+	bc_lo = qc->tf.lbam;
+	bc_hi = qc->tf.lbah;
+	bytes = (bc_hi << 8) | bc_lo;
+
+	/* shall be cleared to zero, indicating xfer of data */
+	if (ireason & (1 << 0))
+		goto err_out;
+
+	/* make sure transfer direction matches expected */
+	i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
+	if (do_write != i_write)
+		goto err_out;
+
+	__atapi_pio_bytes(qc, bytes);
+
+	return;
+
+err_out:
+	printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
+	      ap->id, dev->devno);
+	ap->hsm_task_state = HSM_ST_ERR;
+}
+
+/**
+ *	ata_pio_block - start PIO on a block
+ *	@ap: the target ata_port
+ *
+ *	LOCKING:
+ *	None.  (executing in kernel thread context)
+ */
+
+static void ata_pio_block(struct ata_port *ap)
+{
+	struct ata_queued_cmd *qc;
+	u8 status;
+
+	/*
+	 * This is purely heuristic.  This is a fast path.
+	 * Sometimes when we enter, BSY will be cleared in
+	 * a chk-status or two.  If not, the drive is probably seeking
+	 * or something.  Snooze for a couple msecs, then
+	 * chk-status again.  If still busy, fall back to
+	 * HSM_ST_POLL state.
+	 */
+	status = ata_busy_wait(ap, ATA_BUSY, 5);
+	if (status & ATA_BUSY) {
+		msleep(2);
+		status = ata_busy_wait(ap, ATA_BUSY, 10);
+		if (status & ATA_BUSY) {
+			ap->hsm_task_state = HSM_ST_POLL;
+			ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
+			return;
+		}
+	}
+
+	qc = ata_qc_from_tag(ap, ap->active_tag);
+	assert(qc != NULL);
+
+	if (is_atapi_taskfile(&qc->tf)) {
+		/* no more data to transfer or unsupported ATAPI command */
+		if ((status & ATA_DRQ) == 0) {
+			ap->hsm_task_state = HSM_ST_LAST;
+			return;
+		}
+
+		atapi_pio_bytes(qc);
+	} else {
+		/* handle BSY=0, DRQ=0 as error */
+		if ((status & ATA_DRQ) == 0) {
+			ap->hsm_task_state = HSM_ST_ERR;
+			return;
+		}
+
+		ata_pio_sector(qc);
+	}
+}
+
+static void ata_pio_error(struct ata_port *ap)
+{
+	struct ata_queued_cmd *qc;
+
+	printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
+
+	qc = ata_qc_from_tag(ap, ap->active_tag);
+	assert(qc != NULL);
+
+	ap->hsm_task_state = HSM_ST_IDLE;
+
+	ata_poll_qc_complete(qc, AC_ERR_ATA_BUS);
+}
+
+static void ata_pio_task(void *_data)
+{
+	struct ata_port *ap = _data;
+	unsigned long timeout;
+	int qc_completed;
+
+fsm_start:
+	timeout = 0;
+	qc_completed = 0;
+
+	switch (ap->hsm_task_state) {
+	case HSM_ST_IDLE:
+		return;
+
+	case HSM_ST:
+		ata_pio_block(ap);
+		break;
+
+	case HSM_ST_LAST:
+		qc_completed = ata_pio_complete(ap);
+		break;
+
+	case HSM_ST_POLL:
+	case HSM_ST_LAST_POLL:
+		timeout = ata_pio_poll(ap);
+		break;
+
+	case HSM_ST_TMOUT:
+	case HSM_ST_ERR:
+		ata_pio_error(ap);
+		return;
+	}
+
+	if (timeout)
+		queue_delayed_work(ata_wq, &ap->pio_task, timeout);
+	else if (!qc_completed)
+		goto fsm_start;
+}
+
+/**
+ *	ata_qc_timeout - Handle timeout of queued command
+ *	@qc: Command that timed out
+ *
+ *	Some part of the kernel (currently, only the SCSI layer)
+ *	has noticed that the active command on port @ap has not
+ *	completed after a specified length of time.  Handle this
+ *	condition by disabling DMA (if necessary) and completing
+ *	transactions, with error if necessary.
+ *
+ *	This also handles the case of the "lost interrupt", where
+ *	for some reason (possibly hardware bug, possibly driver bug)
+ *	an interrupt was not delivered to the driver, even though the
+ *	transaction completed successfully.
+ *
+ *	LOCKING:
+ *	Inherited from SCSI layer (none, can sleep)
+ */
+
+static void ata_qc_timeout(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	struct ata_host_set *host_set = ap->host_set;
+	u8 host_stat = 0, drv_stat;
+	unsigned long flags;
+
+	DPRINTK("ENTER\n");
+
+	spin_lock_irqsave(&host_set->lock, flags);
+
+	/* hack alert!  We cannot use the supplied completion
+	 * function from inside the ->eh_strategy_handler() thread.
+	 * libata is the only user of ->eh_strategy_handler() in
+	 * any kernel, so the default scsi_done() assumes it is
+	 * not being called from the SCSI EH.
+	 */
+	qc->scsidone = scsi_finish_command;
+
+	switch (qc->tf.protocol) {
+
+	case ATA_PROT_DMA:
+	case ATA_PROT_ATAPI_DMA:
+		host_stat = ap->ops->bmdma_status(ap);
+
+		/* before we do anything else, clear DMA-Start bit */
+		ap->ops->bmdma_stop(qc);
+
+		/* fall through */
+
+	default:
+		ata_altstatus(ap);
+		drv_stat = ata_chk_status(ap);
+
+		/* ack bmdma irq events */
+		ap->ops->irq_clear(ap);
+
+		printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
+		       ap->id, qc->tf.command, drv_stat, host_stat);
+
+		/* complete taskfile transaction */
+		ata_qc_complete(qc, ac_err_mask(drv_stat));
+		break;
+	}
+
+	spin_unlock_irqrestore(&host_set->lock, flags);
+
+	DPRINTK("EXIT\n");
+}
+
+/**
+ *	ata_eng_timeout - Handle timeout of queued command
+ *	@ap: Port on which timed-out command is active
+ *
+ *	Some part of the kernel (currently, only the SCSI layer)
+ *	has noticed that the active command on port @ap has not
+ *	completed after a specified length of time.  Handle this
+ *	condition by disabling DMA (if necessary) and completing
+ *	transactions, with error if necessary.
+ *
+ *	This also handles the case of the "lost interrupt", where
+ *	for some reason (possibly hardware bug, possibly driver bug)
+ *	an interrupt was not delivered to the driver, even though the
+ *	transaction completed successfully.
+ *
+ *	LOCKING:
+ *	Inherited from SCSI layer (none, can sleep)
+ */
+
+void ata_eng_timeout(struct ata_port *ap)
+{
+	struct ata_queued_cmd *qc;
+
+	DPRINTK("ENTER\n");
+
+	qc = ata_qc_from_tag(ap, ap->active_tag);
+	if (qc)
+		ata_qc_timeout(qc);
+	else {
+		printk(KERN_ERR "ata%u: BUG: timeout without command\n",
+		       ap->id);
+		goto out;
+	}
+
+out:
+	DPRINTK("EXIT\n");
+}
+
+/**
+ *	ata_qc_new - Request an available ATA command, for queueing
+ *	@ap: Port associated with device @dev
+ *	@dev: Device from whom we request an available command structure
+ *
+ *	LOCKING:
+ *	None.
+ */
+
+static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
+{
+	struct ata_queued_cmd *qc = NULL;
+	unsigned int i;
+
+	for (i = 0; i < ATA_MAX_QUEUE; i++)
+		if (!test_and_set_bit(i, &ap->qactive)) {
+			qc = ata_qc_from_tag(ap, i);
+			break;
+		}
+
+	if (qc)
+		qc->tag = i;
+
+	return qc;
+}
+
+/**
+ *	ata_qc_new_init - Request an available ATA command, and initialize it
+ *	@ap: Port associated with device @dev
+ *	@dev: Device from whom we request an available command structure
+ *
+ *	LOCKING:
+ *	None.
+ */
+
+struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
+				      struct ata_device *dev)
+{
+	struct ata_queued_cmd *qc;
+
+	qc = ata_qc_new(ap);
+	if (qc) {
+		qc->scsicmd = NULL;
+		qc->ap = ap;
+		qc->dev = dev;
+
+		ata_qc_reinit(qc);
+	}
+
+	return qc;
+}
+
+int ata_qc_complete_noop(struct ata_queued_cmd *qc, unsigned int err_mask)
+{
+	return 0;
+}
+
+static void __ata_qc_complete(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	unsigned int tag, do_clear = 0;
+
+	qc->flags = 0;
+	tag = qc->tag;
+	if (likely(ata_tag_valid(tag))) {
+		if (tag == ap->active_tag)
+			ap->active_tag = ATA_TAG_POISON;
+		qc->tag = ATA_TAG_POISON;
+		do_clear = 1;
+	}
+
+	if (qc->waiting) {
+		struct completion *waiting = qc->waiting;
+		qc->waiting = NULL;
+		complete(waiting);
+	}
+
+	if (likely(do_clear))
+		clear_bit(tag, &ap->qactive);
+}
+
+/**
+ *	ata_qc_free - free unused ata_queued_cmd
+ *	@qc: Command to complete
+ *
+ *	Designed to free unused ata_queued_cmd object
+ *	in case something prevents using it.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+void ata_qc_free(struct ata_queued_cmd *qc)
+{
+	assert(qc != NULL);	/* ata_qc_from_tag _might_ return NULL */
+	assert(qc->waiting == NULL);	/* nothing should be waiting */
+
+	__ata_qc_complete(qc);
+}
+
+/**
+ *	ata_qc_complete - Complete an active ATA command
+ *	@qc: Command to complete
+ *	@err_mask: ATA Status register contents
+ *
+ *	Indicate to the mid and upper layers that an ATA
+ *	command has completed, with either an ok or not-ok status.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+
+void ata_qc_complete(struct ata_queued_cmd *qc, unsigned int err_mask)
+{
+	int rc;
+
+	assert(qc != NULL);	/* ata_qc_from_tag _might_ return NULL */
+	assert(qc->flags & ATA_QCFLAG_ACTIVE);
+
+	if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
+		ata_sg_clean(qc);
+
+	/* atapi: mark qc as inactive to prevent the interrupt handler
+	 * from completing the command twice later, before the error handler
+	 * is called. (when rc != 0 and atapi request sense is needed)
+	 */
+	qc->flags &= ~ATA_QCFLAG_ACTIVE;
+
+	/* call completion callback */
+	rc = qc->complete_fn(qc, err_mask);
+
+	/* if callback indicates not to complete command (non-zero),
+	 * return immediately
+	 */
+	if (rc != 0)
+		return;
+
+	__ata_qc_complete(qc);
+
+	VPRINTK("EXIT\n");
+}
+
+static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+
+	switch (qc->tf.protocol) {
+	case ATA_PROT_DMA:
+	case ATA_PROT_ATAPI_DMA:
+		return 1;
+
+	case ATA_PROT_ATAPI:
+	case ATA_PROT_PIO:
+	case ATA_PROT_PIO_MULT:
+		if (ap->flags & ATA_FLAG_PIO_DMA)
+			return 1;
+
+		/* fall through */
+
+	default:
+		return 0;
+	}
+
+	/* never reached */
+}
+
+/**
+ *	ata_qc_issue - issue taskfile to device
+ *	@qc: command to issue to device
+ *
+ *	Prepare an ATA command to submission to device.
+ *	This includes mapping the data into a DMA-able
+ *	area, filling in the S/G table, and finally
+ *	writing the taskfile to hardware, starting the command.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ *
+ *	RETURNS:
+ *	Zero on success, negative on error.
+ */
+
+int ata_qc_issue(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+
+	if (ata_should_dma_map(qc)) {
+		if (qc->flags & ATA_QCFLAG_SG) {
+			if (ata_sg_setup(qc))
+				goto err_out;
+		} else if (qc->flags & ATA_QCFLAG_SINGLE) {
+			if (ata_sg_setup_one(qc))
+				goto err_out;
+		}
+	} else {
+		qc->flags &= ~ATA_QCFLAG_DMAMAP;
+	}
+
+	ap->ops->qc_prep(qc);
+
+	qc->ap->active_tag = qc->tag;
+	qc->flags |= ATA_QCFLAG_ACTIVE;
+
+	return ap->ops->qc_issue(qc);
+
+err_out:
+	return -1;
+}
+
+
+/**
+ *	ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
+ *	@qc: command to issue to device
+ *
+ *	Using various libata functions and hooks, this function
+ *	starts an ATA command.  ATA commands are grouped into
+ *	classes called "protocols", and issuing each type of protocol
+ *	is slightly different.
+ *
+ *	May be used as the qc_issue() entry in ata_port_operations.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ *
+ *	RETURNS:
+ *	Zero on success, negative on error.
+ */
+
+int ata_qc_issue_prot(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+
+	ata_dev_select(ap, qc->dev->devno, 1, 0);
+
+	switch (qc->tf.protocol) {
+	case ATA_PROT_NODATA:
+		ata_tf_to_host(ap, &qc->tf);
+		break;
+
+	case ATA_PROT_DMA:
+		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
+		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
+		ap->ops->bmdma_start(qc);	    /* initiate bmdma */
+		break;
+
+	case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
+		ata_qc_set_polling(qc);
+		ata_tf_to_host(ap, &qc->tf);
+		ap->hsm_task_state = HSM_ST;
+		queue_work(ata_wq, &ap->pio_task);
+		break;
+
+	case ATA_PROT_ATAPI:
+		ata_qc_set_polling(qc);
+		ata_tf_to_host(ap, &qc->tf);
+		queue_work(ata_wq, &ap->packet_task);
+		break;
+
+	case ATA_PROT_ATAPI_NODATA:
+		ap->flags |= ATA_FLAG_NOINTR;
+		ata_tf_to_host(ap, &qc->tf);
+		queue_work(ata_wq, &ap->packet_task);
+		break;
+
+	case ATA_PROT_ATAPI_DMA:
+		ap->flags |= ATA_FLAG_NOINTR;
+		ap->ops->tf_load(ap, &qc->tf);	 /* load tf registers */
+		ap->ops->bmdma_setup(qc);	    /* set up bmdma */
+		queue_work(ata_wq, &ap->packet_task);
+		break;
+
+	default:
+		WARN_ON(1);
+		return -1;
+	}
+
+	return 0;
+}
+
+/**
+ *	ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction
+ *	@qc: Info associated with this ATA transaction.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+
+static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
+	u8 dmactl;
+	void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
+
+	/* load PRD table addr. */
+	mb();	/* make sure PRD table writes are visible to controller */
+	writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
+
+	/* specify data direction, triple-check start bit is clear */
+	dmactl = readb(mmio + ATA_DMA_CMD);
+	dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
+	if (!rw)
+		dmactl |= ATA_DMA_WR;
+	writeb(dmactl, mmio + ATA_DMA_CMD);
+
+	/* issue r/w command */
+	ap->ops->exec_command(ap, &qc->tf);
+}
+
+/**
+ *	ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction
+ *	@qc: Info associated with this ATA transaction.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+
+static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
+	u8 dmactl;
+
+	/* start host DMA transaction */
+	dmactl = readb(mmio + ATA_DMA_CMD);
+	writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
+
+	/* Strictly, one may wish to issue a readb() here, to
+	 * flush the mmio write.  However, control also passes
+	 * to the hardware at this point, and it will interrupt
+	 * us when we are to resume control.  So, in effect,
+	 * we don't care when the mmio write flushes.
+	 * Further, a read of the DMA status register _immediately_
+	 * following the write may not be what certain flaky hardware
+	 * is expected, so I think it is best to not add a readb()
+	 * without first all the MMIO ATA cards/mobos.
+	 * Or maybe I'm just being paranoid.
+	 */
+}
+
+/**
+ *	ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
+ *	@qc: Info associated with this ATA transaction.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+
+static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
+	u8 dmactl;
+
+	/* load PRD table addr. */
+	outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
+
+	/* specify data direction, triple-check start bit is clear */
+	dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+	dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
+	if (!rw)
+		dmactl |= ATA_DMA_WR;
+	outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+
+	/* issue r/w command */
+	ap->ops->exec_command(ap, &qc->tf);
+}
+
+/**
+ *	ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
+ *	@qc: Info associated with this ATA transaction.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+
+static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	u8 dmactl;
+
+	/* start host DMA transaction */
+	dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+	outb(dmactl | ATA_DMA_START,
+	     ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+}
+
+
+/**
+ *	ata_bmdma_start - Start a PCI IDE BMDMA transaction
+ *	@qc: Info associated with this ATA transaction.
+ *
+ *	Writes the ATA_DMA_START flag to the DMA command register.
+ *
+ *	May be used as the bmdma_start() entry in ata_port_operations.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+void ata_bmdma_start(struct ata_queued_cmd *qc)
+{
+	if (qc->ap->flags & ATA_FLAG_MMIO)
+		ata_bmdma_start_mmio(qc);
+	else
+		ata_bmdma_start_pio(qc);
+}
+
+
+/**
+ *	ata_bmdma_setup - Set up PCI IDE BMDMA transaction
+ *	@qc: Info associated with this ATA transaction.
+ *
+ *	Writes address of PRD table to device's PRD Table Address
+ *	register, sets the DMA control register, and calls
+ *	ops->exec_command() to start the transfer.
+ *
+ *	May be used as the bmdma_setup() entry in ata_port_operations.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+void ata_bmdma_setup(struct ata_queued_cmd *qc)
+{
+	if (qc->ap->flags & ATA_FLAG_MMIO)
+		ata_bmdma_setup_mmio(qc);
+	else
+		ata_bmdma_setup_pio(qc);
+}
+
+
+/**
+ *	ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
+ *	@ap: Port associated with this ATA transaction.
+ *
+ *	Clear interrupt and error flags in DMA status register.
+ *
+ *	May be used as the irq_clear() entry in ata_port_operations.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+
+void ata_bmdma_irq_clear(struct ata_port *ap)
+{
+    if (ap->flags & ATA_FLAG_MMIO) {
+        void __iomem *mmio = ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
+        writeb(readb(mmio), mmio);
+    } else {
+        unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
+        outb(inb(addr), addr);
+    }
+
+}
+
+
+/**
+ *	ata_bmdma_status - Read PCI IDE BMDMA status
+ *	@ap: Port associated with this ATA transaction.
+ *
+ *	Read and return BMDMA status register.
+ *
+ *	May be used as the bmdma_status() entry in ata_port_operations.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+
+u8 ata_bmdma_status(struct ata_port *ap)
+{
+	u8 host_stat;
+	if (ap->flags & ATA_FLAG_MMIO) {
+		void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
+		host_stat = readb(mmio + ATA_DMA_STATUS);
+	} else
+		host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
+	return host_stat;
+}
+
+
+/**
+ *	ata_bmdma_stop - Stop PCI IDE BMDMA transfer
+ *	@qc: Command we are ending DMA for
+ *
+ *	Clears the ATA_DMA_START flag in the dma control register
+ *
+ *	May be used as the bmdma_stop() entry in ata_port_operations.
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ */
+
+void ata_bmdma_stop(struct ata_queued_cmd *qc)
+{
+	struct ata_port *ap = qc->ap;
+	if (ap->flags & ATA_FLAG_MMIO) {
+		void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
+
+		/* clear start/stop bit */
+		writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
+			mmio + ATA_DMA_CMD);
+	} else {
+		/* clear start/stop bit */
+		outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
+			ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
+	}
+
+	/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
+	ata_altstatus(ap);        /* dummy read */
+}
+
+/**
+ *	ata_host_intr - Handle host interrupt for given (port, task)
+ *	@ap: Port on which interrupt arrived (possibly...)
+ *	@qc: Taskfile currently active in engine
+ *
+ *	Handle host interrupt for given queued command.  Currently,
+ *	only DMA interrupts are handled.  All other commands are
+ *	handled via polling with interrupts disabled (nIEN bit).
+ *
+ *	LOCKING:
+ *	spin_lock_irqsave(host_set lock)
+ *
+ *	RETURNS:
+ *	One if interrupt was handled, zero if not (shared irq).
+ */
+
+inline unsigned int ata_host_intr (struct ata_port *ap,
+				   struct ata_queued_cmd *qc)
+{
+	u8 status, host_stat;
+
+	switch (qc->tf.protocol) {
+
+	case ATA_PROT_DMA:
+	case ATA_PROT_ATAPI_DMA:
+	case ATA_PROT_ATAPI:
+		/* check status of DMA engine */
+		host_stat = ap->ops->bmdma_status(ap);
+		VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
+
+		/* if it's not our irq... */
+		if (!(host_stat & ATA_DMA_INTR))
+			goto idle_irq;
+
+		/* before we do anything else, clear DMA-Start bit */
+		ap->ops->bmdma_stop(qc);
+
+		/* fall through */
+
+	case ATA_PROT_ATAPI_NODATA:
+	case ATA_PROT_NODATA:
+		/* check altstatus */
+		status = ata_altstatus(ap);
+		if (status & ATA_BUSY)
+			goto idle_irq;
+
+		/* check main status, clearing INTRQ */
+		status = ata_chk_status(ap);
+		if (unlikely(status & ATA_BUSY))
+			goto idle_irq;
+		DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
+			ap->id, qc->tf.protocol, status);
+
+		/* ack bmdma irq events */
+		ap->ops->irq_clear(ap);
+
+		/* complete taskfile transaction */
+		ata_qc_complete(qc, ac_err_mask(status));
+		break;
+
+	default:
+		goto idle_irq;
+	}
+
+	return 1;	/* irq handled */
+
+idle_irq:
+	ap->stats.idle_irq++;
+
+#ifdef ATA_IRQ_TRAP
+	if ((ap->stats.idle_irq % 1000) == 0) {
+		handled = 1;
+		ata_irq_ack(ap, 0); /* debug trap */
+		printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
+	}
+#endif
+	return 0;	/* irq not handled */
+}
+
+/**
+ *	ata_interrupt - Default ATA host interrupt handler
+ *	@irq: irq line (unused)
+ *	@dev_instance: pointer to our ata_host_set information structure
+ *	@regs: unused
+ *
+ *	Default interrupt handler for PCI IDE devices.  Calls
+ *	ata_host_intr() for each port that is not disabled.
+ *
+ *	LOCKING:
+ *	Obtains host_set lock during operation.
+ *
+ *	RETURNS:
+ *	IRQ_NONE or IRQ_HANDLED.
+ */
+
+irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
+{
+	struct ata_host_set *host_set = dev_instance;
+	unsigned int i;
+	unsigned int handled = 0;
+	unsigned long flags;
+
+	/* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
+	spin_lock_irqsave(&host_set->lock, flags);
+
+	for (i = 0; i < host_set->n_ports; i++) {
+		struct ata_port *ap;
+
+		ap = host_set->ports[i];
+		if (ap &&
+		    !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) {
+			struct ata_queued_cmd *qc;
+
+			qc = ata_qc_from_tag(ap, ap->active_tag);
+			if (qc && (!(qc->tf.ctl & ATA_NIEN)) &&
+			    (qc->flags & ATA_QCFLAG_ACTIVE))
+				handled |= ata_host_intr(ap, qc);
+		}
+	}
+
+	spin_unlock_irqrestore(&host_set->lock, flags);
+
+	return IRQ_RETVAL(handled);
+}
+
+/**
+ *	atapi_packet_task - Write CDB bytes to hardware
+ *	@_data: Port to which ATAPI device is attached.
+ *
+ *	When device has indicated its readiness to accept
+ *	a CDB, this function is called.  Send the CDB.
+ *	If DMA is to be performed, exit immediately.
+ *	Otherwise, we are in polling mode, so poll
+ *	status under operation succeeds or fails.
+ *
+ *	LOCKING:
+ *	Kernel thread context (may sleep)
+ */
+
+static void atapi_packet_task(void *_data)
+{
+	struct ata_port *ap = _data;
+	struct ata_queued_cmd *qc;
+	u8 status;
+
+	qc = ata_qc_from_tag(ap, ap->active_tag);
+	assert(qc != NULL);
+	assert(qc->flags & ATA_QCFLAG_ACTIVE);
+
+	/* sleep-wait for BSY to clear */
+	DPRINTK("busy wait\n");
+	if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB))
+		goto err_out_status;
+
+	/* make sure DRQ is set */
+	status = ata_chk_status(ap);
+	if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)
+		goto err_out;
+
+	/* send SCSI cdb */
+	DPRINTK("send cdb\n");
+	assert(ap->cdb_len >= 12);
+
+	if (qc->tf.protocol == ATA_PROT_ATAPI_DMA ||
+	    qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
+		unsigned long flags;
+
+		/* Once we're done issuing command and kicking bmdma,
+		 * irq handler takes over.  To not lose irq, we need
+		 * to clear NOINTR flag before sending cdb, but
+		 * interrupt handler shouldn't be invoked before we're
+		 * finished.  Hence, the following locking.
+		 */
+		spin_lock_irqsave(&ap->host_set->lock, flags);
+		ap->flags &= ~ATA_FLAG_NOINTR;
+		ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
+		if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
+			ap->ops->bmdma_start(qc);	/* initiate bmdma */
+		spin_unlock_irqrestore(&ap->host_set->lock, flags);
+	} else {
+		ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
+
+		/* PIO commands are handled by polling */
+		ap->hsm_task_state = HSM_ST;
+		queue_work(ata_wq, &ap->pio_task);
+	}
+
+	return;
+
+err_out_status:
+	status = ata_chk_status(ap);
+err_out:
+	ata_poll_qc_complete(qc, __ac_err_mask(status));
+}
+
+
+/**
+ *	ata_port_start - Set port up for dma.
+ *	@ap: Port to initialize
+ *
+ *	Called just after data structures for each port are
+ *	initialized.  Allocates space for PRD table.
+ *
+ *	May be used as the port_start() entry in ata_port_operations.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+
+int ata_port_start (struct ata_port *ap)
+{
+	struct device *dev = ap->host_set->dev;
+	int rc;
+
+	ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
+	if (!ap->prd)
+		return -ENOMEM;
+
+	rc = ata_pad_alloc(ap, dev);
+	if (rc) {
+		dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
+		return rc;
+	}
+
+	DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
+
+	return 0;
+}
+
+
+/**
+ *	ata_port_stop - Undo ata_port_start()
+ *	@ap: Port to shut down
+ *
+ *	Frees the PRD table.
+ *
+ *	May be used as the port_stop() entry in ata_port_operations.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+
+void ata_port_stop (struct ata_port *ap)
+{
+	struct device *dev = ap->host_set->dev;
+
+	dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
+	ata_pad_free(ap, dev);
+}
+
+void ata_host_stop (struct ata_host_set *host_set)
+{
+	if (host_set->mmio_base)
+		iounmap(host_set->mmio_base);
+}
+
+
+/**
+ *	ata_host_remove - Unregister SCSI host structure with upper layers
+ *	@ap: Port to unregister
+ *	@do_unregister: 1 if we fully unregister, 0 to just stop the port
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+
+static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
+{
+	struct Scsi_Host *sh = ap->host;
+
+	DPRINTK("ENTER\n");
+
+	if (do_unregister)
+		scsi_remove_host(sh);
+
+	ap->ops->port_stop(ap);
+}
+
+/**
+ *	ata_host_init - Initialize an ata_port structure
+ *	@ap: Structure to initialize
+ *	@host: associated SCSI mid-layer structure
+ *	@host_set: Collection of hosts to which @ap belongs
+ *	@ent: Probe information provided by low-level driver
+ *	@port_no: Port number associated with this ata_port
+ *
+ *	Initialize a new ata_port structure, and its associated
+ *	scsi_host.
+ *
+ *	LOCKING:
+ *	Inherited from caller.
+ */
+
+static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
+			  struct ata_host_set *host_set,
+			  const struct ata_probe_ent *ent, unsigned int port_no)
+{
+	unsigned int i;
+
+	host->max_id = 16;
+	host->max_lun = 1;
+	host->max_channel = 1;
+	host->unique_id = ata_unique_id++;
+	host->max_cmd_len = 12;
+
+	ap->flags = ATA_FLAG_PORT_DISABLED;
+	ap->id = host->unique_id;
+	ap->host = host;
+	ap->ctl = ATA_DEVCTL_OBS;
+	ap->host_set = host_set;
+	ap->port_no = port_no;
+	ap->hard_port_no =
+		ent->legacy_mode ? ent->hard_port_no : port_no;
+	ap->pio_mask = ent->pio_mask;
+	ap->mwdma_mask = ent->mwdma_mask;
+	ap->udma_mask = ent->udma_mask;
+	ap->flags |= ent->host_flags;
+	ap->ops = ent->port_ops;
+	ap->cbl = ATA_CBL_NONE;
+	ap->active_tag = ATA_TAG_POISON;
+	ap->last_ctl = 0xFF;
+
+	INIT_WORK(&ap->packet_task, atapi_packet_task, ap);
+	INIT_WORK(&ap->pio_task, ata_pio_task, ap);
+
+	for (i = 0; i < ATA_MAX_DEVICES; i++)
+		ap->device[i].devno = i;
+
+#ifdef ATA_IRQ_TRAP
+	ap->stats.unhandled_irq = 1;
+	ap->stats.idle_irq = 1;
+#endif
+
+	memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
+}
+
+/**
+ *	ata_host_add - Attach low-level ATA driver to system
+ *	@ent: Information provided by low-level driver
+ *	@host_set: Collections of ports to which we add
+ *	@port_no: Port number associated with this host
+ *
+ *	Attach low-level ATA driver to system.
+ *
+ *	LOCKING:
+ *	PCI/etc. bus probe sem.
+ *
+ *	RETURNS:
+ *	New ata_port on success, for NULL on error.
+ */
+
+static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
+				      struct ata_host_set *host_set,
+				      unsigned int port_no)
+{
+	struct Scsi_Host *host;
+	struct ata_port *ap;
+	int rc;
+
+	DPRINTK("ENTER\n");
+	host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
+	if (!host)
+		return NULL;
+
+	ap = (struct ata_port *) &host->hostdata[0];
+
+	ata_host_init(ap, host, host_set, ent, port_no);
+
+	rc = ap->ops->port_start(ap);
+	if (rc)
+		goto err_out;
+
+	return ap;
+
+err_out:
+	scsi_host_put(host);
+	return NULL;
+}
+
+/**
+ *	ata_device_add - Register hardware device with ATA and SCSI layers
+ *	@ent: Probe information describing hardware device to be registered
+ *
+ *	This function processes the information provided in the probe
+ *	information struct @ent, allocates the necessary ATA and SCSI
+ *	host information structures, initializes them, and registers
+ *	everything with requisite kernel subsystems.
+ *
+ *	This function requests irqs, probes the ATA bus, and probes
+ *	the SCSI bus.
+ *
+ *	LOCKING:
+ *	PCI/etc. bus probe sem.
+ *
+ *	RETURNS:
+ *	Number of ports registered.  Zero on error (no ports registered).
+ */
+
+int ata_device_add(const struct ata_probe_ent *ent)
+{
+	unsigned int count = 0, i;
+	struct device *dev = ent->dev;
+	struct ata_host_set *host_set;
+
+	DPRINTK("ENTER\n");
+	/* alloc a container for our list of ATA ports (buses) */
+	host_set = kzalloc(sizeof(struct ata_host_set) +
+			   (ent->n_ports * sizeof(void *)), GFP_KERNEL);
+	if (!host_set)
+		return 0;
+	spin_lock_init(&host_set->lock);
+
+	host_set->dev = dev;
+	host_set->n_ports = ent->n_ports;
+	host_set->irq = ent->irq;
+	host_set->mmio_base = ent->mmio_base;
+	host_set->private_data = ent->private_data;
+	host_set->ops = ent->port_ops;
+
+	/* register each port bound to this device */
+	for (i = 0; i < ent->n_ports; i++) {
+		struct ata_port *ap;
+		unsigned long xfer_mode_mask;
+
+		ap = ata_host_add(ent, host_set, i);
+		if (!ap)
+			goto err_out;
+
+		host_set->ports[i] = ap;
+		xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
+				(ap->mwdma_mask << ATA_SHIFT_MWDMA) |
+				(ap->pio_mask << ATA_SHIFT_PIO);
+
+		/* print per-port info to dmesg */
+		printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
+				 "bmdma 0x%lX irq %lu\n",
+			ap->id,
+			ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
+			ata_mode_string(xfer_mode_mask),
+	       		ap->ioaddr.cmd_addr,
+	       		ap->ioaddr.ctl_addr,
+	       		ap->ioaddr.bmdma_addr,
+	       		ent->irq);
+
+		ata_chk_status(ap);
+		host_set->ops->irq_clear(ap);
+		count++;
+	}
+
+	if (!count)
+		goto err_free_ret;
+
+	/* obtain irq, that is shared between channels */
+	if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
+			DRV_NAME, host_set))
+		goto err_out;
+
+	/* perform each probe synchronously */
+	DPRINTK("probe begin\n");
+	for (i = 0; i < count; i++) {
+		struct ata_port *ap;
+		int rc;
+
+		ap = host_set->ports[i];
+
+		DPRINTK("ata%u: probe begin\n", ap->id);
+		rc = ata_bus_probe(ap);
+		DPRINTK("ata%u: probe end\n", ap->id);
+
+		if (rc) {
+			/* FIXME: do something useful here?
+			 * Current libata behavior will
+			 * tear down everything when
+			 * the module is removed
+			 * or the h/w is unplugged.
+			 */
+		}
+
+		rc = scsi_add_host(ap->host, dev);
+		if (rc) {
+			printk(KERN_ERR "ata%u: scsi_add_host failed\n",
+			       ap->id);
+			/* FIXME: do something useful here */
+			/* FIXME: handle unconditional calls to
+			 * scsi_scan_host and ata_host_remove, below,
+			 * at the very least
+			 */
+		}
+	}
+
+	/* probes are done, now scan each port's disk(s) */
+	DPRINTK("probe begin\n");
+	for (i = 0; i < count; i++) {
+		struct ata_port *ap = host_set->ports[i];
+
+		ata_scsi_scan_host(ap);
+	}
+
+	dev_set_drvdata(dev, host_set);
+
+	VPRINTK("EXIT, returning %u\n", ent->n_ports);
+	return ent->n_ports; /* success */
+
+err_out:
+	for (i = 0; i < count; i++) {
+		ata_host_remove(host_set->ports[i], 1);
+		scsi_host_put(host_set->ports[i]->host);
+	}
+err_free_ret:
+	kfree(host_set);
+	VPRINTK("EXIT, returning 0\n");
+	return 0;
+}
+
+/**
+ *	ata_host_set_remove - PCI layer callback for device removal
+ *	@host_set: ATA host set that was removed
+ *
+ *	Unregister all objects associated with this host set. Free those 
+ *	objects.
+ *
+ *	LOCKING:
+ *	Inherited from calling layer (may sleep).
+ */
+
+void ata_host_set_remove(struct ata_host_set *host_set)
+{
+	struct ata_port *ap;
+	unsigned int i;
+
+	for (i = 0; i < host_set->n_ports; i++) {
+		ap = host_set->ports[i];
+		scsi_remove_host(ap->host);
+	}
+
+	free_irq(host_set->irq, host_set);
+
+	for (i = 0; i < host_set->n_ports; i++) {
+		ap = host_set->ports[i];
+
+		ata_scsi_release(ap->host);
+
+		if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
+			struct ata_ioports *ioaddr = &ap->ioaddr;
+
+			if (ioaddr->cmd_addr == 0x1f0)
+				release_region(0x1f0, 8);
+			else if (ioaddr->cmd_addr == 0x170)
+				release_region(0x170, 8);
+		}
+
+		scsi_host_put(ap->host);
+	}
+
+	if (host_set->ops->host_stop)
+		host_set->ops->host_stop(host_set);
+
+	kfree(host_set);
+}
+
+/**
+ *	ata_scsi_release - SCSI layer callback hook for host unload
+ *	@host: libata host to be unloaded
+ *
+ *	Performs all duties necessary to shut down a libata port...
+ *	Kill port kthread, disable port, and release resources.
+ *
+ *	LOCKING:
+ *	Inherited from SCSI layer.
+ *
+ *	RETURNS:
+ *	One.
+ */
+
+int ata_scsi_release(struct Scsi_Host *host)
+{
+	struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
+
+	DPRINTK("ENTER\n");
+
+	ap->ops->port_disable(ap);
+	ata_host_remove(ap, 0);
+
+	DPRINTK("EXIT\n");
+	return 1;
+}
+
+/**
+ *	ata_std_ports - initialize ioaddr with standard port offsets.
+ *	@ioaddr: IO address structure to be initialized
+ *
+ *	Utility function which initializes data_addr, error_addr,
+ *	feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
+ *	device_addr, status_addr, and command_addr to standard offsets
+ *	relative to cmd_addr.
+ *
+ *	Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
+ */
+
+void ata_std_ports(struct ata_ioports *ioaddr)
+{
+	ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
+	ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
+	ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
+	ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
+	ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
+	ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
+	ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
+	ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
+	ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
+	ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
+}
+
+static struct ata_probe_ent *
+ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
+{
+	struct ata_probe_ent *probe_ent;
+
+	probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
+	if (!probe_ent) {
+		printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
+		       kobject_name(&(dev->kobj)));
+		return NULL;
+	}
+
+	INIT_LIST_HEAD(&probe_ent->node);
+	probe_ent->dev = dev;
+
+	probe_ent->sht = port->sht;
+	probe_ent->host_flags = port->host_flags;
+	probe_ent->pio_mask = port->pio_mask;
+	probe_ent->mwdma_mask = port->mwdma_mask;
+	probe_ent->udma_mask = port->udma_mask;
+	probe_ent->port_ops = port->port_ops;
+
+	return probe_ent;
+}
+
+
+
+#ifdef CONFIG_PCI
+
+void ata_pci_host_stop (struct ata_host_set *host_set)
+{
+	struct pci_dev *pdev = to_pci_dev(host_set->dev);
+
+	pci_iounmap(pdev, host_set->mmio_base);
+}
+
+/**
+ *	ata_pci_init_native_mode - Initialize native-mode driver
+ *	@pdev:  pci device to be initialized
+ *	@port:  array[2] of pointers to port info structures.
+ *	@ports: bitmap of ports present
+ *
+ *	Utility function which allocates and initializes an
+ *	ata_probe_ent structure for a standard dual-port
+ *	PIO-based IDE controller.  The returned ata_probe_ent
+ *	structure can be passed to ata_device_add().  The returned
+ *	ata_probe_ent structure should then be freed with kfree().
+ *
+ *	The caller need only pass the address of the primary port, the
+ *	secondary will be deduced automatically. If the device has non
+ *	standard secondary port mappings this function can be called twice,
+ *	once for each interface.
+ */
+
+struct ata_probe_ent *
+ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
+{
+	struct ata_probe_ent *probe_ent =
+		ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
+	int p = 0;
+
+	if (!probe_ent)
+		return NULL;
+
+	probe_ent->irq = pdev->irq;
+	probe_ent->irq_flags = SA_SHIRQ;
+	probe_ent->private_data = port[0]->private_data;
+
+	if (ports & ATA_PORT_PRIMARY) {
+		probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
+		probe_ent->port[p].altstatus_addr =
+		probe_ent->port[p].ctl_addr =
+			pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
+		probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4);
+		ata_std_ports(&probe_ent->port[p]);
+		p++;
+	}
+
+	if (ports & ATA_PORT_SECONDARY) {
+		probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
+		probe_ent->port[p].altstatus_addr =
+		probe_ent->port[p].ctl_addr =
+			pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
+		probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4) + 8;
+		ata_std_ports(&probe_ent->port[p]);
+		p++;
+	}
+
+	probe_ent->n_ports = p;
+	return probe_ent;
+}
+
+static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, struct ata_port_info *port, int port_num)
+{
+	struct ata_probe_ent *probe_ent;
+
+	probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port);
+	if (!probe_ent)
+		return NULL;
+
+	probe_ent->legacy_mode = 1;
+	probe_ent->n_ports = 1;
+	probe_ent->hard_port_no = port_num;
+	probe_ent->private_data = port->private_data;
+
+	switch(port_num)
+	{
+		case 0:
+			probe_ent->irq = 14;
+			probe_ent->port[0].cmd_addr = 0x1f0;
+			probe_ent->port[0].altstatus_addr =
+			probe_ent->port[0].ctl_addr = 0x3f6;
+			break;
+		case 1:
+			probe_ent->irq = 15;
+			probe_ent->port[0].cmd_addr = 0x170;
+			probe_ent->port[0].altstatus_addr =
+			probe_ent->port[0].ctl_addr = 0x376;
+			break;
+	}
+	probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4) + 8 * port_num;
+	ata_std_ports(&probe_ent->port[0]);
+	return probe_ent;
+}
+
+/**
+ *	ata_pci_init_one - Initialize/register PCI IDE host controller
+ *	@pdev: Controller to be initialized
+ *	@port_info: Information from low-level host driver
+ *	@n_ports: Number of ports attached to host controller
+ *
+ *	This is a helper function which can be called from a driver's
+ *	xxx_init_one() probe function if the hardware uses traditional
+ *	IDE taskfile registers.
+ *
+ *	This function calls pci_enable_device(), reserves its register
+ *	regions, sets the dma mask, enables bus master mode, and calls
+ *	ata_device_add()
+ *
+ *	LOCKING:
+ *	Inherited from PCI layer (may sleep).
+ *
+ *	RETURNS:
+ *	Zero on success, negative on errno-based value on error.
+ */
+
+int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
+		      unsigned int n_ports)
+{
+	struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL;
+	struct ata_port_info *port[2];
+	u8 tmp8, mask;
+	unsigned int legacy_mode = 0;
+	int disable_dev_on_err = 1;
+	int rc;
+
+	DPRINTK("ENTER\n");
+
+	port[0] = port_info[0];
+	if (n_ports > 1)
+		port[1] = port_info[1];
+	else
+		port[1] = port[0];
+
+	if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
+	    && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
+		/* TODO: What if one channel is in native mode ... */
+		pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
+		mask = (1 << 2) | (1 << 0);
+		if ((tmp8 & mask) != mask)
+			legacy_mode = (1 << 3);
+	}
+
+	/* FIXME... */
+	if ((!legacy_mode) && (n_ports > 2)) {
+		printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
+		n_ports = 2;
+		/* For now */
+	}
+
+	/* FIXME: Really for ATA it isn't safe because the device may be
+	   multi-purpose and we want to leave it alone if it was already
+	   enabled. Secondly for shared use as Arjan says we want refcounting
+	   
+	   Checking dev->is_enabled is insufficient as this is not set at
+	   boot for the primary video which is BIOS enabled
+         */
+         
+	rc = pci_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	rc = pci_request_regions(pdev, DRV_NAME);
+	if (rc) {
+		disable_dev_on_err = 0;
+		goto err_out;
+	}
+
+	/* FIXME: Should use platform specific mappers for legacy port ranges */
+	if (legacy_mode) {
+		if (!request_region(0x1f0, 8, "libata")) {
+			struct resource *conflict, res;
+			res.start = 0x1f0;
+			res.end = 0x1f0 + 8 - 1;
+			conflict = ____request_resource(&ioport_resource, &res);
+			if (!strcmp(conflict->name, "libata"))
+				legacy_mode |= (1 << 0);
+			else {
+				disable_dev_on_err = 0;
+				printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
+			}
+		} else
+			legacy_mode |= (1 << 0);
+
+		if (!request_region(0x170, 8, "libata")) {
+			struct resource *conflict, res;
+			res.start = 0x170;
+			res.end = 0x170 + 8 - 1;
+			conflict = ____request_resource(&ioport_resource, &res);
+			if (!strcmp(conflict->name, "libata"))
+				legacy_mode |= (1 << 1);
+			else {
+				disable_dev_on_err = 0;
+				printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
+			}
+		} else
+			legacy_mode |= (1 << 1);
+	}
+
+	/* we have legacy mode, but all ports are unavailable */
+	if (legacy_mode == (1 << 3)) {
+		rc = -EBUSY;
+		goto err_out_regions;
+	}
+
+	rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
+	if (rc)
+		goto err_out_regions;
+	rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
+	if (rc)
+		goto err_out_regions;
+
+	if (legacy_mode) {
+		if (legacy_mode & (1 << 0))
+			probe_ent = ata_pci_init_legacy_port(pdev, port[0], 0);
+		if (legacy_mode & (1 << 1))
+			probe_ent2 = ata_pci_init_legacy_port(pdev, port[1], 1);
+	} else {
+		if (n_ports == 2)
+			probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
+		else
+			probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
+	}
+	if (!probe_ent && !probe_ent2) {
+		rc = -ENOMEM;
+		goto err_out_regions;
+	}
+
+	pci_set_master(pdev);
+
+	/* FIXME: check ata_device_add return */
+	if (legacy_mode) {
+		if (legacy_mode & (1 << 0))
+			ata_device_add(probe_ent);
+		if (legacy_mode & (1 << 1))
+			ata_device_add(probe_ent2);
+	} else
+		ata_device_add(probe_ent);
+
+	kfree(probe_ent);
+	kfree(probe_ent2);
+
+	return 0;
+
+err_out_regions:
+	if (legacy_mode & (1 << 0))
+		release_region(0x1f0, 8);
+	if (legacy_mode & (1 << 1))
+		release_region(0x170, 8);
+	pci_release_regions(pdev);
+err_out:
+	if (disable_dev_on_err)
+		pci_disable_device(pdev);
+	return rc;
+}
+
+/**
+ *	ata_pci_remove_one - PCI layer callback for device removal
+ *	@pdev: PCI device that was removed
+ *
+ *	PCI layer indicates to libata via this hook that
+ *	hot-unplug or module unload event has occurred.
+ *	Handle this by unregistering all objects associated
+ *	with this PCI device.  Free those objects.  Then finally
+ *	release PCI resources and disable device.
+ *
+ *	LOCKING:
+ *	Inherited from PCI layer (may sleep).
+ */
+
+void ata_pci_remove_one (struct pci_dev *pdev)
+{
+	struct device *dev = pci_dev_to_dev(pdev);
+	struct ata_host_set *host_set = dev_get_drvdata(dev);
+
+	ata_host_set_remove(host_set);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	dev_set_drvdata(dev, NULL);
+}
+
+/* move to PCI subsystem */
+int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
+{
+	unsigned long tmp = 0;
+
+	switch (bits->width) {
+	case 1: {
+		u8 tmp8 = 0;
+		pci_read_config_byte(pdev, bits->reg, &tmp8);
+		tmp = tmp8;
+		break;
+	}
+	case 2: {
+		u16 tmp16 = 0;
+		pci_read_config_word(pdev, bits->reg, &tmp16);
+		tmp = tmp16;
+		break;
+	}
+	case 4: {
+		u32 tmp32 = 0;
+		pci_read_config_dword(pdev, bits->reg, &tmp32);
+		tmp = tmp32;
+		break;
+	}
+
+	default:
+		return -EINVAL;
+	}
+
+	tmp &= bits->mask;
+
+	return (tmp == bits->val) ? 1 : 0;
+}
+#endif /* CONFIG_PCI */
+
+
+static int __init ata_init(void)
+{
+	ata_wq = create_workqueue("ata");
+	if (!ata_wq)
+		return -ENOMEM;
+
+	printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
+	return 0;
+}
+
+static void __exit ata_exit(void)
+{
+	destroy_workqueue(ata_wq);
+}
+
+module_init(ata_init);
+module_exit(ata_exit);
+
+static unsigned long ratelimit_time;
+static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
+
+int ata_ratelimit(void)
+{
+	int rc;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ata_ratelimit_lock, flags);
+
+	if (time_after(jiffies, ratelimit_time)) {
+		rc = 1;
+		ratelimit_time = jiffies + (HZ/5);
+	} else
+		rc = 0;
+
+	spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
+
+	return rc;
+}
+
+/*
+ * libata is essentially a library of internal helper functions for
+ * low-level ATA host controller drivers.  As such, the API/ABI is
+ * likely to change as new drivers are added and updated.
+ * Do not depend on ABI/API stability.
+ */
+
+EXPORT_SYMBOL_GPL(ata_std_bios_param);
+EXPORT_SYMBOL_GPL(ata_std_ports);
+EXPORT_SYMBOL_GPL(ata_device_add);
+EXPORT_SYMBOL_GPL(ata_host_set_remove);
+EXPORT_SYMBOL_GPL(ata_sg_init);
+EXPORT_SYMBOL_GPL(ata_sg_init_one);
+EXPORT_SYMBOL_GPL(ata_qc_complete);
+EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
+EXPORT_SYMBOL_GPL(ata_eng_timeout);
+EXPORT_SYMBOL_GPL(ata_tf_load);
+EXPORT_SYMBOL_GPL(ata_tf_read);
+EXPORT_SYMBOL_GPL(ata_noop_dev_select);
+EXPORT_SYMBOL_GPL(ata_std_dev_select);
+EXPORT_SYMBOL_GPL(ata_tf_to_fis);
+EXPORT_SYMBOL_GPL(ata_tf_from_fis);
+EXPORT_SYMBOL_GPL(ata_check_status);
+EXPORT_SYMBOL_GPL(ata_altstatus);
+EXPORT_SYMBOL_GPL(ata_exec_command);
+EXPORT_SYMBOL_GPL(ata_port_start);
+EXPORT_SYMBOL_GPL(ata_port_stop);
+EXPORT_SYMBOL_GPL(ata_host_stop);
+EXPORT_SYMBOL_GPL(ata_interrupt);
+EXPORT_SYMBOL_GPL(ata_qc_prep);
+EXPORT_SYMBOL_GPL(ata_bmdma_setup);
+EXPORT_SYMBOL_GPL(ata_bmdma_start);
+EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
+EXPORT_SYMBOL_GPL(ata_bmdma_status);
+EXPORT_SYMBOL_GPL(ata_bmdma_stop);
+EXPORT_SYMBOL_GPL(ata_port_probe);
+EXPORT_SYMBOL_GPL(sata_phy_reset);
+EXPORT_SYMBOL_GPL(__sata_phy_reset);
+EXPORT_SYMBOL_GPL(ata_bus_reset);
+EXPORT_SYMBOL_GPL(ata_port_disable);
+EXPORT_SYMBOL_GPL(ata_ratelimit);
+EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
+EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
+EXPORT_SYMBOL_GPL(ata_scsi_error);
+EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
+EXPORT_SYMBOL_GPL(ata_scsi_release);
+EXPORT_SYMBOL_GPL(ata_host_intr);
+EXPORT_SYMBOL_GPL(ata_dev_classify);
+EXPORT_SYMBOL_GPL(ata_dev_id_string);
+EXPORT_SYMBOL_GPL(ata_dev_config);
+EXPORT_SYMBOL_GPL(ata_scsi_simulate);
+
+EXPORT_SYMBOL_GPL(ata_timing_compute);
+EXPORT_SYMBOL_GPL(ata_timing_merge);
+
+#ifdef CONFIG_PCI
+EXPORT_SYMBOL_GPL(pci_test_config_bits);
+EXPORT_SYMBOL_GPL(ata_pci_host_stop);
+EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
+EXPORT_SYMBOL_GPL(ata_pci_init_one);
+EXPORT_SYMBOL_GPL(ata_pci_remove_one);
+#endif /* CONFIG_PCI */
diff -urN linux-2.6.15/fs/bio.c linux-2.6.15-no1/fs/bio.c
--- linux-2.6.15/fs/bio.c	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/fs/bio.c	2006-01-26 15:41:13.998759632 +0000
@@ -1198,11 +1198,11 @@
 		scale = 4;
 
 	/*
-	 * scale number of entries
+	 * Limit number of entries reserved -- mempools are only used when
+	 * the system is completely unable to allocate memory, so we only
+	 * need enough to make progress.
 	 */
-	bvec_pool_entries = megabytes * 2;
-	if (bvec_pool_entries > 256)
-		bvec_pool_entries = 256;
+	bvec_pool_entries = 1 + scale;
 
 	fs_bio_set = bioset_create(BIO_POOL_SIZE, bvec_pool_entries, scale);
 	if (!fs_bio_set)
diff -urN linux-2.6.15/fs/ext3/balloc.c linux-2.6.15-no1/fs/ext3/balloc.c
--- linux-2.6.15/fs/ext3/balloc.c	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/fs/ext3/balloc.c	2006-01-26 15:41:14.000759328 +0000
@@ -654,9 +654,11 @@
  */
 static int
 ext3_try_to_allocate(struct super_block *sb, handle_t *handle, int group,
-	struct buffer_head *bitmap_bh, int goal, struct ext3_reserve_window *my_rsv)
+			struct buffer_head *bitmap_bh, int goal,
+			unsigned long *count, struct ext3_reserve_window *my_rsv)
 {
 	int group_first_block, start, end;
+	unsigned long num = 0;
 
 	/* we do allocation within the reservation window if we have a window */
 	if (my_rsv) {
@@ -714,8 +716,18 @@
 			goto fail_access;
 		goto repeat;
 	}
-	return goal;
+	num++;
+	goal++;
+	while (num < *count && goal < end
+		&& ext3_test_allocatable(goal, bitmap_bh)
+		&& claim_block(sb_bgl_lock(EXT3_SB(sb), group), goal, bitmap_bh)) {
+		num++;
+		goal++;
+	}
+	*count = num;
+	return goal - num;
 fail_access:
+	*count = num;
 	return -1;
 }
 
@@ -1000,6 +1012,31 @@
 	goto retry;
 }
 
+static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv,
+			struct super_block *sb, int size)
+{
+	struct ext3_reserve_window_node *next_rsv;
+	struct rb_node *next;
+	spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock;
+
+	if (!spin_trylock(rsv_lock))
+		return;
+
+	next = rb_next(&my_rsv->rsv_node);
+
+	if (!next)
+		my_rsv->rsv_end += size;
+	else {
+		next_rsv = list_entry(next, struct ext3_reserve_window_node, rsv_node);
+
+		if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size)
+			my_rsv->rsv_end += size;
+		else
+			my_rsv->rsv_end = next_rsv->rsv_start - 1;
+	}
+	spin_unlock(rsv_lock);
+}
+
 /*
  * This is the main function used to allocate a new block and its reservation
  * window.
@@ -1025,11 +1062,12 @@
 ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
 			unsigned int group, struct buffer_head *bitmap_bh,
 			int goal, struct ext3_reserve_window_node * my_rsv,
-			int *errp)
+			unsigned long *count, int *errp)
 {
 	unsigned long group_first_block;
 	int ret = 0;
 	int fatal;
+	unsigned long num = *count;
 
 	*errp = 0;
 
@@ -1052,7 +1090,8 @@
 	 * or last attempt to allocate a block with reservation turned on failed
 	 */
 	if (my_rsv == NULL ) {
-		ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, goal, NULL);
+		ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh,
+						goal, count, NULL);
 		goto out;
 	}
 	/*
@@ -1082,6 +1121,8 @@
 	while (1) {
 		if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) ||
 			!goal_in_my_reservation(&my_rsv->rsv_window, goal, group, sb)) {
+			if (my_rsv->rsv_goal_size < *count)
+				my_rsv->rsv_goal_size = *count;
 			ret = alloc_new_reservation(my_rsv, goal, sb,
 							group, bitmap_bh);
 			if (ret < 0)
@@ -1089,16 +1130,21 @@
 
 			if (!goal_in_my_reservation(&my_rsv->rsv_window, goal, group, sb))
 				goal = -1;
-		}
+		} else if (goal > 0 && (my_rsv->rsv_end-goal+1) < *count)
+			try_to_extend_reservation(my_rsv, sb,
+					*count-my_rsv->rsv_end + goal - 1);
+
 		if ((my_rsv->rsv_start >= group_first_block + EXT3_BLOCKS_PER_GROUP(sb))
 		    || (my_rsv->rsv_end < group_first_block))
 			BUG();
 		ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, goal,
-					   &my_rsv->rsv_window);
+					   &num, &my_rsv->rsv_window);
 		if (ret >= 0) {
-			my_rsv->rsv_alloc_hit++;
+			my_rsv->rsv_alloc_hit += num;
+			*count = num;
 			break;				/* succeed */
 		}
+		num = *count;
 	}
 out:
 	if (ret >= 0) {
@@ -1155,8 +1201,8 @@
  * bitmap, and then for any free bit if that fails.
  * This function also updates quota and i_blocks field.
  */
-int ext3_new_block(handle_t *handle, struct inode *inode,
-			unsigned long goal, int *errp)
+int ext3_new_blocks(handle_t *handle, struct inode *inode,
+			unsigned long goal, unsigned long *count, int *errp)
 {
 	struct buffer_head *bitmap_bh = NULL;
 	struct buffer_head *gdp_bh;
@@ -1179,6 +1225,7 @@
 	static int goal_hits, goal_attempts;
 #endif
 	unsigned long ngroups;
+	unsigned long num = *count;
 
 	*errp = -ENOSPC;
 	sb = inode->i_sb;
@@ -1190,7 +1237,7 @@
 	/*
 	 * Check quota for allocation of this block.
 	 */
-	if (DQUOT_ALLOC_BLOCK(inode, 1)) {
+	if (DQUOT_ALLOC_BLOCK(inode, num)) {
 		*errp = -EDQUOT;
 		return 0;
 	}
@@ -1245,7 +1292,7 @@
 		if (!bitmap_bh)
 			goto io_error;
 		ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no,
-					bitmap_bh, ret_block, my_rsv, &fatal);
+					bitmap_bh, ret_block, my_rsv, &num, &fatal);
 		if (fatal)
 			goto out;
 		if (ret_block >= 0)
@@ -1282,7 +1329,7 @@
 		if (!bitmap_bh)
 			goto io_error;
 		ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no,
-					bitmap_bh, -1, my_rsv, &fatal);
+					bitmap_bh, -1, my_rsv, &num, &fatal);
 		if (fatal)
 			goto out;
 		if (ret_block >= 0) 
@@ -1317,13 +1364,15 @@
 	target_block = ret_block + group_no * EXT3_BLOCKS_PER_GROUP(sb)
 				+ le32_to_cpu(es->s_first_data_block);
 
-	if (target_block == le32_to_cpu(gdp->bg_block_bitmap) ||
-	    target_block == le32_to_cpu(gdp->bg_inode_bitmap) ||
+	if (in_range(le32_to_cpu(gdp->bg_block_bitmap), target_block, num) ||
+	    in_range(le32_to_cpu(gdp->bg_inode_bitmap), target_block, num) ||
 	    in_range(target_block, le32_to_cpu(gdp->bg_inode_table),
+		      EXT3_SB(sb)->s_itb_per_group) ||
+	    in_range(target_block + num - 1, le32_to_cpu(gdp->bg_inode_table),
 		      EXT3_SB(sb)->s_itb_per_group))
 		ext3_error(sb, "ext3_new_block",
 			    "Allocating block in system zone - "
-			    "block = %u", target_block);
+			    "blocks from %u, length %lu", target_block, num);
 
 	performed_allocation = 1;
 
@@ -1342,10 +1391,14 @@
 	jbd_lock_bh_state(bitmap_bh);
 	spin_lock(sb_bgl_lock(sbi, group_no));
 	if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) {
-		if (ext3_test_bit(ret_block,
-				bh2jh(bitmap_bh)->b_committed_data)) {
-			printk("%s: block was unexpectedly set in "
-				"b_committed_data\n", __FUNCTION__);
+		int i;
+
+		for (i = 0; i < num; i++) {
+			if (ext3_test_bit(ret_block,
+					bh2jh(bitmap_bh)->b_committed_data)) {
+				printk("%s: block was unexpectedly set in "
+					"b_committed_data\n", __FUNCTION__);
+			}
 		}
 	}
 	ext3_debug("found bit %d\n", ret_block);
@@ -1356,7 +1409,7 @@
 	/* ret_block was blockgroup-relative.  Now it becomes fs-relative */
 	ret_block = target_block;
 
-	if (ret_block >= le32_to_cpu(es->s_blocks_count)) {
+	if (ret_block + num - 1 >= le32_to_cpu(es->s_blocks_count)) {
 		ext3_error(sb, "ext3_new_block",
 			    "block(%d) >= blocks count(%d) - "
 			    "block_group = %d, es == %p ", ret_block,
@@ -1374,9 +1427,9 @@
 
 	spin_lock(sb_bgl_lock(sbi, group_no));
 	gdp->bg_free_blocks_count =
-			cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) - 1);
+			cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) - num);
 	spin_unlock(sb_bgl_lock(sbi, group_no));
-	percpu_counter_mod(&sbi->s_freeblocks_counter, -1);
+	percpu_counter_mod(&sbi->s_freeblocks_counter, -num);
 
 	BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor");
 	err = ext3_journal_dirty_metadata(handle, gdp_bh);
@@ -1389,6 +1442,8 @@
 
 	*errp = 0;
 	brelse(bitmap_bh);
+	*count = num;
+	DQUOT_FREE_BLOCK(inode, *count-num);
 	return ret_block;
 
 io_error:
@@ -1402,11 +1457,19 @@
 	 * Undo the block allocation
 	 */
 	if (!performed_allocation)
-		DQUOT_FREE_BLOCK(inode, 1);
+		DQUOT_FREE_BLOCK(inode, *count);
 	brelse(bitmap_bh);
 	return 0;
 }
 
+int ext3_new_block(handle_t *handle, struct inode *inode,
+			unsigned long goal, int *errp)
+{
+	unsigned long count = 1;
+
+	return ext3_new_blocks(handle, inode, goal, &count, errp);
+}
+
 unsigned long ext3_count_free_blocks(struct super_block *sb)
 {
 	unsigned long desc_count;
diff -urN linux-2.6.15/fs/ext3/dir.c linux-2.6.15-no1/fs/ext3/dir.c
--- linux-2.6.15/fs/ext3/dir.c	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/fs/ext3/dir.c	2006-01-26 15:41:14.000759328 +0000
@@ -95,11 +95,10 @@
 			 void * dirent, filldir_t filldir)
 {
 	int error = 0;
-	unsigned long offset, blk;
-	int i, num, stored;
-	struct buffer_head * bh, * tmp, * bha[16];
-	struct ext3_dir_entry_2 * de;
-	struct super_block * sb;
+	unsigned long offset;
+	int i, stored;
+	struct ext3_dir_entry_2 *de;
+	struct super_block *sb;
 	int err;
 	struct inode *inode = filp->f_dentry->d_inode;
 	int ret = 0;
@@ -124,12 +123,30 @@
 	}
 #endif
 	stored = 0;
-	bh = NULL;
 	offset = filp->f_pos & (sb->s_blocksize - 1);
 
 	while (!error && !stored && filp->f_pos < inode->i_size) {
-		blk = (filp->f_pos) >> EXT3_BLOCK_SIZE_BITS(sb);
-		bh = ext3_bread(NULL, inode, blk, 0, &err);
+		unsigned long blk = filp->f_pos >> EXT3_BLOCK_SIZE_BITS(sb);
+		struct buffer_head map_bh;
+		struct buffer_head *bh = NULL;
+
+		map_bh.b_state = 0;
+		err = ext3_get_blocks_handle(NULL, inode, blk, 1,
+						&map_bh, 0, 0);
+		if (err > 0) {
+			page_cache_readahead(sb->s_bdev->bd_inode->i_mapping,
+				&filp->f_ra,
+				filp,
+				map_bh.b_blocknr >>
+					(PAGE_CACHE_SHIFT - inode->i_blkbits),
+				1);
+			bh = ext3_bread(NULL, inode, blk, 0, &err);
+		}
+
+		/*
+		 * We ignore I/O errors on directories so users have a chance
+		 * of recovering data when there's a bad sector
+		 */
 		if (!bh) {
 			ext3_error (sb, "ext3_readdir",
 				"directory #%lu contains a hole at offset %lu",
@@ -138,26 +155,6 @@
 			continue;
 		}
 
-		/*
-		 * Do the readahead
-		 */
-		if (!offset) {
-			for (i = 16 >> (EXT3_BLOCK_SIZE_BITS(sb) - 9), num = 0;
-			     i > 0; i--) {
-				tmp = ext3_getblk (NULL, inode, ++blk, 0, &err);
-				if (tmp && !buffer_uptodate(tmp) &&
-						!buffer_locked(tmp))
-					bha[num++] = tmp;
-				else
-					brelse (tmp);
-			}
-			if (num) {
-				ll_rw_block (READA, num, bha);
-				for (i = 0; i < num; i++)
-					brelse (bha[i]);
-			}
-		}
-
 revalidate:
 		/* If the dir block has changed since the last call to
 		 * readdir(2), then we might be pointing to an invalid
diff -urN linux-2.6.15/fs/ext3/inode.c linux-2.6.15-no1/fs/ext3/inode.c
--- linux-2.6.15/fs/ext3/inode.c	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/fs/ext3/inode.c	2006-01-26 15:41:14.003758872 +0000
@@ -235,16 +235,6 @@
 	clear_inode(inode);	/* We must guarantee clearing of inode... */
 }
 
-static int ext3_alloc_block (handle_t *handle,
-			struct inode * inode, unsigned long goal, int *err)
-{
-	unsigned long result;
-
-	result = ext3_new_block(handle, inode, goal, err);
-	return result;
-}
-
-
 typedef struct {
 	__le32	*p;
 	__le32	key;
@@ -330,7 +320,7 @@
 		ext3_warning (inode->i_sb, "ext3_block_to_path", "block > big");
 	}
 	if (boundary)
-		*boundary = (i_block & (ptrs - 1)) == (final - 1);
+		*boundary = final - 1 - (i_block & (ptrs - 1));
 	return n;
 }
 
@@ -476,15 +466,115 @@
 
 	return ext3_find_near(inode, partial);
 }
+/**
+ *	ext3_blks_to_allocate: Look up the block map and count the number
+ *	of direct blocks need to be allocated for the given branch.
+ *
+ * 	@branch: chain of indirect blocks
+ *	@k: number of blocks need for indirect blocks
+ *	@blks: number of data blocks to be mapped.
+ *	@blocks_to_boundary:  the offset in the indirect block
+ *
+ *	return the total number of blocks to be allocate, including the
+ *	direct and indirect blocks.
+ */
+static int
+ext3_blks_to_allocate(Indirect * branch, int k, unsigned long blks,
+		int blocks_to_boundary)
+{
+	unsigned long count = 0;
+
+	/*
+	 * Simple case, [t,d]Indirect block(s) has not allocated yet
+	 * then it's clear blocks on that path have not allocated
+	 */
+	if (k > 0) {
+		/* right now don't hanel cross boundary allocation */
+		if (blks < blocks_to_boundary + 1)
+			count += blks;
+		else
+			count += blocks_to_boundary + 1;
+		return count;
+	}
+
+	count++;
+	while (count < blks && count <= blocks_to_boundary
+		&& le32_to_cpu(*(branch[0].p + count)) == 0) {
+		count++;
+	}
+	return count;
+}
+/**
+ *	ext3_alloc_blocks: multiple allocate blocks needed for a branch
+ *	@indirect_blks: the number of blocks need to allocate for indirect
+ *			blocks
+ *
+ *	@new_blocks: on return it will store the new block numbers for
+ *	the indirect blocks(if needed) and the first direct block,
+ *	@blks:	on return it will store the total number of allocated
+ *		direct blocks
+ */
+static int ext3_alloc_blocks(handle_t *handle, struct inode *inode,
+			unsigned long goal, int indirect_blks, int blks,
+			unsigned long long new_blocks[4], int *err)
+{
+	int target;
+	unsigned long count;
+	int index, i;
+	unsigned long current_block = 0;
+	int ret = 0;
+
+	/*
+	 * Here we try to allocate the requested multiple blocks at once,
+	 * on a best-effort basis.
+	 * To build a branch, we should allocate blocks for
+	 * the indirect blocks(if not allocated yet), and at least
+	 * the first direct block of this branch.  That's the
+	 * minimum number of blocks need to allocate(required)
+	 *
+	 */
+	target = blks + indirect_blks;
+	index = 0; count = 0;
+
+	while (1) {
+		count = target;
+		/* allocating blocks for indirect blocks and direct blocks */
+		current_block = ext3_new_blocks(handle, inode, goal, &count, err);
+		if (*err)
+			goto failed_out;
+
+		target -= count;
+		/* allocate blocks for indirect blocks */
+		while (index < indirect_blks && count) {
+			new_blocks[index++] = current_block++;
+			count--;
+		}
+
+		if (count > 0)
+			break;
+	}
+
+	/* save the new block number for the first direct block*/
+	new_blocks[index] = current_block;
+	/* total number of blocks allocated for direct blocks */
+	ret = count;
+	*err = 0;
+	return ret;
+failed_out:
+	for (i = 0; i <index; i++)
+		ext3_free_blocks(handle, inode, new_blocks[i], 1);
+	return ret;
+}
 
 /**
  *	ext3_alloc_branch - allocate and set up a chain of blocks.
  *	@inode: owner
- *	@num: depth of the chain (number of blocks to allocate)
+ *	@indirect_blks: number of allocated indirect blocks
+ *	@blks: number of allocated direct blocks
  *	@offsets: offsets (in the blocks) to store the pointers to next.
  *	@branch: place to store the chain in.
  *
- *	This function allocates @num blocks, zeroes out all but the last one,
+ *	This function allocates blocks, zeroes out all but the last one,
  *	links them into chain and (if we are synchronous) writes them to disk.
  *	In other words, it prepares a branch that can be spliced onto the
  *	inode. It stores the information about that chain in the branch[], in
@@ -503,71 +593,79 @@
  */
 
 static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
-			     int num,
-			     unsigned long goal,
-			     int *offsets,
-			     Indirect *branch)
+			int indirect_blks, int *blks, unsigned long goal,
+			int *offsets, Indirect *branch)
 {
 	int blocksize = inode->i_sb->s_blocksize;
-	int n = 0, keys = 0;
+	int i, n = 0;
 	int err = 0;
-	int i;
-	int parent = ext3_alloc_block(handle, inode, goal, &err);
-
-	branch[0].key = cpu_to_le32(parent);
-	if (parent) {
-		for (n = 1; n < num; n++) {
-			struct buffer_head *bh;
-			/* Allocate the next block */
-			int nr = ext3_alloc_block(handle, inode, parent, &err);
-			if (!nr)
-				break;
-			branch[n].key = cpu_to_le32(nr);
+	struct buffer_head *bh;
+	int num;
+	unsigned long long new_blocks[4];
+	unsigned long long current_block;
 
-			/*
-			 * Get buffer_head for parent block, zero it out
-			 * and set the pointer to new one, then send
-			 * parent to disk.  
-			 */
-			bh = sb_getblk(inode->i_sb, parent);
-			if (!bh)
-				break;
-			keys = n+1;
-			branch[n].bh = bh;
-			lock_buffer(bh);
-			BUFFER_TRACE(bh, "call get_create_access");
-			err = ext3_journal_get_create_access(handle, bh);
-			if (err) {
-				unlock_buffer(bh);
-				brelse(bh);
-				break;
-			}
+	num = ext3_alloc_blocks(handle, inode, goal, indirect_blks,
+					*blks, new_blocks, &err);
+	if (err)
+		return err;
 
-			memset(bh->b_data, 0, blocksize);
-			branch[n].p = (__le32*) bh->b_data + offsets[n];
-			*branch[n].p = branch[n].key;
-			BUFFER_TRACE(bh, "marking uptodate");
-			set_buffer_uptodate(bh);
+	branch[0].key = cpu_to_le32(new_blocks[0]);
+	/*
+	 * metadata blocks and data blocks are allocated.
+	 */
+	for (n = 1; n <= indirect_blks;  n++) {
+		/*
+		 * Get buffer_head for parent block, zero it out
+		 * and set the pointer to new one, then send
+		 * parent to disk.
+		 */
+		bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
+		branch[n].bh = bh;
+		lock_buffer(bh);
+		BUFFER_TRACE(bh, "call get_create_access");
+		err = ext3_journal_get_create_access(handle, bh);
+		if (err) {
 			unlock_buffer(bh);
+			brelse(bh);
+			goto failed;
+		}
 
-			BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
-			err = ext3_journal_dirty_metadata(handle, bh);
-			if (err)
-				break;
-
-			parent = nr;
+		memset(bh->b_data, 0, blocksize);
+		branch[n].p = (__le32*) bh->b_data + offsets[n];
+		branch[n].key = cpu_to_le32(new_blocks[n]);
+		*branch[n].p = branch[n].key;
+		if ( n == indirect_blks) {
+			current_block = new_blocks[n];
+			/*
+			 * End of chain, update the last new metablock of
+			 * the chain to point to the new allocated
+			 * data blocks numbers
+			 */
+			for (i=1; i < num; i++)
+				*(branch[n].p + i) = cpu_to_le32(++current_block);
 		}
-	}
-	if (n == num)
-		return 0;
+		BUFFER_TRACE(bh, "marking uptodate");
+		set_buffer_uptodate(bh);
+		unlock_buffer(bh);
 
+		BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
+		err = ext3_journal_dirty_metadata(handle, bh);
+		if (err)
+			goto failed;
+	}
+	*blks = num;
+	return err;
+failed:
 	/* Allocation failed, free what we already allocated */
-	for (i = 1; i < keys; i++) {
+	for (i = 1; i <= n ; i++) {
 		BUFFER_TRACE(branch[i].bh, "call journal_forget");
 		ext3_journal_forget(handle, branch[i].bh);
 	}
-	for (i = 0; i < keys; i++)
-		ext3_free_blocks(handle, inode, le32_to_cpu(branch[i].key), 1);
+	for (i = 0; i <indirect_blks; i++)
+		ext3_free_blocks(handle, inode, new_blocks[i], 1);
+
+	ext3_free_blocks(handle, inode, new_blocks[i], num);
+
 	return err;
 }
 
@@ -578,7 +676,8 @@
  *	@chain: chain of indirect blocks (with a missing link - see
  *		ext3_alloc_branch)
  *	@where: location of missing link
- *	@num:   number of blocks we are adding
+ *	@num:   number of indirect blocks we are adding
+ *	@blks:  number of direct blocks we are adding
  *
  *	This function fills the missing link and does all housekeeping needed in
  *	inode (->i_blocks, etc.). In case of success we end up with the full
@@ -586,12 +685,12 @@
  */
 
 static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
-			      Indirect chain[4], Indirect *where, int num)
+			      Indirect *where, int num, int blks)
 {
 	int i;
 	int err = 0;
 	struct ext3_block_alloc_info *block_i = EXT3_I(inode)->i_block_alloc_info;
-
+	unsigned long current_block;
 	/*
 	 * If we're splicing into a [td]indirect block (as opposed to the
 	 * inode) then we need to get write access to the [td]indirect block
@@ -606,6 +705,13 @@
 	/* That's it */
 
 	*where->p = where->key;
+	/* update host bufferhead or inode to point to
+	 * more just allocated direct blocks blocks */
+	if (num == 0 && blks > 1) {
+		current_block = le32_to_cpu(where->key + 1);
+		for (i = 1; i < blks; i++)
+			*(where->p + i ) = cpu_to_le32(current_block++);
+	}
 
 	/*
 	 * update the most recently allocated logical & physical block
@@ -613,8 +719,8 @@
 	 * allocation
 	 */
 	if (block_i) {
-		block_i->last_alloc_logical_block = block;
-		block_i->last_alloc_physical_block = le32_to_cpu(where[num-1].key);
+		block_i->last_alloc_logical_block = block + blks - 1;
+		block_i->last_alloc_physical_block = le32_to_cpu(where[num].key + blks - 1);
 	}
 
 	/* We are done with atomic stuff, now do the rest of housekeeping */
@@ -647,10 +753,13 @@
 	return err;
 
 err_out:
-	for (i = 1; i < num; i++) {
+	for (i = 1; i <= num; i++) {
 		BUFFER_TRACE(where[i].bh, "call journal_forget");
 		ext3_journal_forget(handle, where[i].bh);
+		ext3_free_blocks(handle, inode, le32_to_cpu(where[i-1].key), 1);
 	}
+	ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks);
+
 	return err;
 }
 
@@ -669,23 +778,30 @@
  * akpm: `handle' can be NULL if create == 0.
  *
  * The BKL may not be held on entry here.  Be sure to take it early.
+ * return > 0, # of blocks mapped or allocated
+ * return < 0, error case
  */
 
-static int
-ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
-		struct buffer_head *bh_result, int create, int extend_disksize)
+int
+ext3_get_blocks_handle(handle_t *handle, struct inode *inode, sector_t iblock,
+		unsigned long maxblocks, struct buffer_head *bh_result,
+		int create, int extend_disksize)
 {
 	int err = -EIO;
 	int offsets[4];
 	Indirect chain[4];
 	Indirect *partial;
 	unsigned long goal;
-	int left;
-	int boundary = 0;
-	const int depth = ext3_block_to_path(inode, iblock, offsets, &boundary);
+	int indirect_blks;
+	int blocks_to_boundary = 0;
+	int depth;
 	struct ext3_inode_info *ei = EXT3_I(inode);
+	int count = 0;
+	unsigned long first_block = 0;
+
 
 	J_ASSERT(handle != NULL || create == 0);
+	depth = ext3_block_to_path(inode, iblock, offsets, &blocks_to_boundary);
 
 	if (depth == 0)
 		goto out;
@@ -694,7 +810,29 @@
 
 	/* Simplest case - block found, no allocation needed */
 	if (!partial) {
+		first_block = chain[depth - 1].key;
 		clear_buffer_new(bh_result);
+		count++;
+		/*map more blocks*/
+		while (count < maxblocks && count <= blocks_to_boundary) {
+			if (!verify_chain(chain, partial)) {
+				/*
+				 * Indirect block might be removed by
+				 * truncate while we were reading it.
+				 * Handling of that case: forget what we've
+				 * got now. Flag the err as EAGAIN, so it
+				 * will reread.
+				 */
+				err = -EAGAIN;
+				count = 0;
+				break;
+			}
+			if (le32_to_cpu(*(chain[depth-1].p+count) ==
+					(first_block + count)))
+				count++;
+			else
+				break;
+		}
 		goto got_it;
 	}
 
@@ -723,6 +861,7 @@
 		}
 		partial = ext3_get_branch(inode, depth, offsets, chain, &err);
 		if (!partial) {
+			count++;
 			up(&ei->truncate_sem);
 			if (err)
 				goto cleanup;
@@ -740,12 +879,19 @@
 
 	goal = ext3_find_goal(inode, iblock, chain, partial);
 
-	left = (chain + depth) - partial;
+	/* the number of blocks need to allocate for [d,t]indirect blocks */
+	indirect_blks = (chain + depth) - partial - 1;
 
 	/*
+	 * Next look up the indirect map to count the totoal number of
+	 * direct blocks to allocate for this branch.
+	 */
+	count = ext3_blks_to_allocate(partial, indirect_blks,
+					maxblocks, blocks_to_boundary);
+	/*
 	 * Block out ext3_truncate while we alter the tree
 	 */
-	err = ext3_alloc_branch(handle, inode, left, goal,
+	err = ext3_alloc_branch(handle, inode, indirect_blks, &count, goal,
 				offsets + (partial - chain), partial);
 
 	/*
@@ -756,8 +902,8 @@
 	 * may need to return -EAGAIN upwards in the worst case.  --sct
 	 */
 	if (!err)
-		err = ext3_splice_branch(handle, inode, iblock, chain,
-					 partial, left);
+		err = ext3_splice_branch(handle, inode, iblock,
+					partial, indirect_blks, count);
 	/*
 	 * i_disksize growing is protected by truncate_sem.  Don't forget to
 	 * protect it if you're about to implement concurrent
@@ -772,8 +918,9 @@
 	set_buffer_new(bh_result);
 got_it:
 	map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
-	if (boundary)
+	if (blocks_to_boundary == 0)
 		set_buffer_boundary(bh_result);
+	err = count;
 	/* Clean up and exit */
 	partial = chain + depth - 1;	/* the whole chain */
 cleanup:
@@ -787,21 +934,6 @@
 	return err;
 }
 
-static int ext3_get_block(struct inode *inode, sector_t iblock,
-			struct buffer_head *bh_result, int create)
-{
-	handle_t *handle = NULL;
-	int ret;
-
-	if (create) {
-		handle = ext3_journal_current_handle();
-		J_ASSERT(handle != 0);
-	}
-	ret = ext3_get_block_handle(handle, inode, iblock,
-				bh_result, create, 1);
-	return ret;
-}
-
 #define DIO_CREDITS (EXT3_RESERVE_TRANS_BLOCKS + 32)
 
 static int
@@ -815,6 +947,9 @@
 	if (!handle)
 		goto get_block;		/* A read */
 
+	if (max_blocks == 1)
+		goto get_block;		/* A single block get */
+
 	if (handle->h_transaction->t_state == T_LOCKED) {
 		/*
 		 * Huge direct-io writes can hold off commits for long
@@ -841,13 +976,31 @@
 	}
 
 get_block:
-	if (ret == 0)
-		ret = ext3_get_block_handle(handle, inode, iblock,
-					bh_result, create, 0);
-	bh_result->b_size = (1 << inode->i_blkbits);
+	if (ret == 0) {
+		ret = ext3_get_blocks_handle(handle, inode, iblock,
+					max_blocks, bh_result, create, 0);
+		if (ret > 0) {
+			bh_result->b_size = (ret << inode->i_blkbits);
+			ret = 0;
+		}
+	}
 	return ret;
 }
 
+static int ext3_get_blocks(struct inode *inode, sector_t iblock,
+		unsigned long maxblocks, struct buffer_head *bh_result,
+		int create)
+{
+	return ext3_direct_io_get_blocks(inode, iblock, maxblocks,
+					bh_result, create);
+}
+
+static int ext3_get_block(struct inode *inode, sector_t iblock,
+			struct buffer_head *bh_result, int create)
+{
+	return ext3_get_blocks(inode, iblock, 1, bh_result, create);
+}
+
 /*
  * `handle' can be NULL if create is zero
  */
@@ -862,8 +1015,8 @@
 	dummy.b_state = 0;
 	dummy.b_blocknr = -1000;
 	buffer_trace_init(&dummy.b_history);
-	*errp = ext3_get_block_handle(handle, inode, block, &dummy, create, 1);
-	if (!*errp && buffer_mapped(&dummy)) {
+	*errp = ext3_get_blocks_handle(handle, inode, block, 1, &dummy, create, 1);
+	if ((*errp == 1 ) && buffer_mapped(&dummy)) {
 		struct buffer_head *bh;
 		bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
 		if (!bh) {
diff -urN linux-2.6.15/fs/namespace.c linux-2.6.15-no1/fs/namespace.c
--- linux-2.6.15/fs/namespace.c	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/fs/namespace.c	2006-01-26 15:41:25.304040968 +0000
@@ -47,6 +47,10 @@
 static kmem_cache_t *mnt_cache;
 static struct rw_semaphore namespace_sem;
 
+/* /sys/fs */
+decl_subsys(fs, NULL, NULL);
+EXPORT_SYMBOL(fs_subsys);
+
 static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
 {
 	unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
@@ -1714,6 +1718,7 @@
 		i--;
 	} while (i);
 	sysfs_init();
+	subsystem_register(&fs_subsys);
 	init_rootfs();
 	init_mount_tree();
 }
diff -urN linux-2.6.15/fs/namespace.c.orig linux-2.6.15-no1/fs/namespace.c.orig
--- linux-2.6.15/fs/namespace.c.orig	1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6.15-no1/fs/namespace.c.orig	2006-01-03 03:21:10.000000000 +0000
@@ -0,0 +1,1734 @@
+/*
+ *  linux/fs/namespace.c
+ *
+ * (C) Copyright Al Viro 2000, 2001
+ *	Released under GPL v2.
+ *
+ * Based on code from fs/super.c, copyright Linus Torvalds and others.
+ * Heavily rewritten.
+ */
+
+#include <linux/config.h>
+#include <linux/syscalls.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/smp_lock.h>
+#include <linux/init.h>
+#include <linux/quotaops.h>
+#include <linux/acct.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/namespace.h>
+#include <linux/namei.h>
+#include <linux/security.h>
+#include <linux/mount.h>
+#include <asm/uaccess.h>
+#include <asm/unistd.h>
+#include "pnode.h"
+
+extern int __init init_rootfs(void);
+
+#ifdef CONFIG_SYSFS
+extern int __init sysfs_init(void);
+#else
+static inline int sysfs_init(void)
+{
+	return 0;
+}
+#endif
+
+/* spinlock for vfsmount related operations, inplace of dcache_lock */
+__cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);
+
+static int event;
+
+static struct list_head *mount_hashtable;
+static int hash_mask __read_mostly, hash_bits __read_mostly;
+static kmem_cache_t *mnt_cache;
+static struct rw_semaphore namespace_sem;
+
+static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
+{
+	unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
+	tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
+	tmp = tmp + (tmp >> hash_bits);
+	return tmp & hash_mask;
+}
+
+struct vfsmount *alloc_vfsmnt(const char *name)
+{
+	struct vfsmount *mnt = kmem_cache_alloc(mnt_cache, GFP_KERNEL);
+	if (mnt) {
+		memset(mnt, 0, sizeof(struct vfsmount));
+		atomic_set(&mnt->mnt_count, 1);
+		INIT_LIST_HEAD(&mnt->mnt_hash);
+		INIT_LIST_HEAD(&mnt->mnt_child);
+		INIT_LIST_HEAD(&mnt->mnt_mounts);
+		INIT_LIST_HEAD(&mnt->mnt_list);
+		INIT_LIST_HEAD(&mnt->mnt_expire);
+		INIT_LIST_HEAD(&mnt->mnt_share);
+		INIT_LIST_HEAD(&mnt->mnt_slave_list);
+		INIT_LIST_HEAD(&mnt->mnt_slave);
+		if (name) {
+			int size = strlen(name) + 1;
+			char *newname = kmalloc(size, GFP_KERNEL);
+			if (newname) {
+				memcpy(newname, name, size);
+				mnt->mnt_devname = newname;
+			}
+		}
+	}
+	return mnt;
+}
+
+void free_vfsmnt(struct vfsmount *mnt)
+{
+	kfree(mnt->mnt_devname);
+	kmem_cache_free(mnt_cache, mnt);
+}
+
+/*
+ * find the first or last mount at @dentry on vfsmount @mnt depending on
+ * @dir. If @dir is set return the first mount else return the last mount.
+ */
+struct vfsmount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry,
+			      int dir)
+{
+	struct list_head *head = mount_hashtable + hash(mnt, dentry);
+	struct list_head *tmp = head;
+	struct vfsmount *p, *found = NULL;
+
+	for (;;) {
+		tmp = dir ? tmp->next : tmp->prev;
+		p = NULL;
+		if (tmp == head)
+			break;
+		p = list_entry(tmp, struct vfsmount, mnt_hash);
+		if (p->mnt_parent == mnt && p->mnt_mountpoint == dentry) {
+			found = p;
+			break;
+		}
+	}
+	return found;
+}
+
+/*
+ * lookup_mnt increments the ref count before returning
+ * the vfsmount struct.
+ */
+struct vfsmount *lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
+{
+	struct vfsmount *child_mnt;
+	spin_lock(&vfsmount_lock);
+	if ((child_mnt = __lookup_mnt(mnt, dentry, 1)))
+		mntget(child_mnt);
+	spin_unlock(&vfsmount_lock);
+	return child_mnt;
+}
+
+static inline int check_mnt(struct vfsmount *mnt)
+{
+	return mnt->mnt_namespace == current->namespace;
+}
+
+static void touch_namespace(struct namespace *ns)
+{
+	if (ns) {
+		ns->event = ++event;
+		wake_up_interruptible(&ns->poll);
+	}
+}
+
+static void __touch_namespace(struct namespace *ns)
+{
+	if (ns && ns->event != event) {
+		ns->event = event;
+		wake_up_interruptible(&ns->poll);
+	}
+}
+
+static void detach_mnt(struct vfsmount *mnt, struct nameidata *old_nd)
+{
+	old_nd->dentry = mnt->mnt_mountpoint;
+	old_nd->mnt = mnt->mnt_parent;
+	mnt->mnt_parent = mnt;
+	mnt->mnt_mountpoint = mnt->mnt_root;
+	list_del_init(&mnt->mnt_child);
+	list_del_init(&mnt->mnt_hash);
+	old_nd->dentry->d_mounted--;
+}
+
+void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry,
+			struct vfsmount *child_mnt)
+{
+	child_mnt->mnt_parent = mntget(mnt);
+	child_mnt->mnt_mountpoint = dget(dentry);
+	dentry->d_mounted++;
+}
+
+static void attach_mnt(struct vfsmount *mnt, struct nameidata *nd)
+{
+	mnt_set_mountpoint(nd->mnt, nd->dentry, mnt);
+	list_add_tail(&mnt->mnt_hash, mount_hashtable +
+			hash(nd->mnt, nd->dentry));
+	list_add_tail(&mnt->mnt_child, &nd->mnt->mnt_mounts);
+}
+
+/*
+ * the caller must hold vfsmount_lock
+ */
+static void commit_tree(struct vfsmount *mnt)
+{
+	struct vfsmount *parent = mnt->mnt_parent;
+	struct vfsmount *m;
+	LIST_HEAD(head);
+	struct namespace *n = parent->mnt_namespace;
+
+	BUG_ON(parent == mnt);
+
+	list_add_tail(&head, &mnt->mnt_list);
+	list_for_each_entry(m, &head, mnt_list)
+		m->mnt_namespace = n;
+	list_splice(&head, n->list.prev);
+
+	list_add_tail(&mnt->mnt_hash, mount_hashtable +
+				hash(parent, mnt->mnt_mountpoint));
+	list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
+	touch_namespace(n);
+}
+
+static struct vfsmount *next_mnt(struct vfsmount *p, struct vfsmount *root)
+{
+	struct list_head *next = p->mnt_mounts.next;
+	if (next == &p->mnt_mounts) {
+		while (1) {
+			if (p == root)
+				return NULL;
+			next = p->mnt_child.next;
+			if (next != &p->mnt_parent->mnt_mounts)
+				break;
+			p = p->mnt_parent;
+		}
+	}
+	return list_entry(next, struct vfsmount, mnt_child);
+}
+
+static struct vfsmount *skip_mnt_tree(struct vfsmount *p)
+{
+	struct list_head *prev = p->mnt_mounts.prev;
+	while (prev != &p->mnt_mounts) {
+		p = list_entry(prev, struct vfsmount, mnt_child);
+		prev = p->mnt_mounts.prev;
+	}
+	return p;
+}
+
+static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
+					int flag)
+{
+	struct super_block *sb = old->mnt_sb;
+	struct vfsmount *mnt = alloc_vfsmnt(old->mnt_devname);
+
+	if (mnt) {
+		mnt->mnt_flags = old->mnt_flags;
+		atomic_inc(&sb->s_active);
+		mnt->mnt_sb = sb;
+		mnt->mnt_root = dget(root);
+		mnt->mnt_mountpoint = mnt->mnt_root;
+		mnt->mnt_parent = mnt;
+
+		if (flag & CL_SLAVE) {
+			list_add(&mnt->mnt_slave, &old->mnt_slave_list);
+			mnt->mnt_master = old;
+			CLEAR_MNT_SHARED(mnt);
+		} else {
+			if ((flag & CL_PROPAGATION) || IS_MNT_SHARED(old))
+				list_add(&mnt->mnt_share, &old->mnt_share);
+			if (IS_MNT_SLAVE(old))
+				list_add(&mnt->mnt_slave, &old->mnt_slave);
+			mnt->mnt_master = old->mnt_master;
+		}
+		if (flag & CL_MAKE_SHARED)
+			set_mnt_shared(mnt);
+
+		/* stick the duplicate mount on the same expiry list
+		 * as the original if that was on one */
+		if (flag & CL_EXPIRE) {
+			spin_lock(&vfsmount_lock);
+			if (!list_empty(&old->mnt_expire))
+				list_add(&mnt->mnt_expire, &old->mnt_expire);
+			spin_unlock(&vfsmount_lock);
+		}
+	}
+	return mnt;
+}
+
+static inline void __mntput(struct vfsmount *mnt)
+{
+	struct super_block *sb = mnt->mnt_sb;
+	dput(mnt->mnt_root);
+	free_vfsmnt(mnt);
+	deactivate_super(sb);
+}
+
+void mntput_no_expire(struct vfsmount *mnt)
+{
+repeat:
+	if (atomic_dec_and_lock(&mnt->mnt_count, &vfsmount_lock)) {
+		if (likely(!mnt->mnt_pinned)) {
+			spin_unlock(&vfsmount_lock);
+			__mntput(mnt);
+			return;
+		}
+		atomic_add(mnt->mnt_pinned + 1, &mnt->mnt_count);
+		mnt->mnt_pinned = 0;
+		spin_unlock(&vfsmount_lock);
+		acct_auto_close_mnt(mnt);
+		security_sb_umount_close(mnt);
+		goto repeat;
+	}
+}
+
+EXPORT_SYMBOL(mntput_no_expire);
+
+void mnt_pin(struct vfsmount *mnt)
+{
+	spin_lock(&vfsmount_lock);
+	mnt->mnt_pinned++;
+	spin_unlock(&vfsmount_lock);
+}
+
+EXPORT_SYMBOL(mnt_pin);
+
+void mnt_unpin(struct vfsmount *mnt)
+{
+	spin_lock(&vfsmount_lock);
+	if (mnt->mnt_pinned) {
+		atomic_inc(&mnt->mnt_count);
+		mnt->mnt_pinned--;
+	}
+	spin_unlock(&vfsmount_lock);
+}
+
+EXPORT_SYMBOL(mnt_unpin);
+
+/* iterator */
+static void *m_start(struct seq_file *m, loff_t *pos)
+{
+	struct namespace *n = m->private;
+	struct list_head *p;
+	loff_t l = *pos;
+
+	down_read(&namespace_sem);
+	list_for_each(p, &n->list)
+		if (!l--)
+			return list_entry(p, struct vfsmount, mnt_list);
+	return NULL;
+}
+
+static void *m_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	struct namespace *n = m->private;
+	struct list_head *p = ((struct vfsmount *)v)->mnt_list.next;
+	(*pos)++;
+	return p == &n->list ? NULL : list_entry(p, struct vfsmount, mnt_list);
+}
+
+static void m_stop(struct seq_file *m, void *v)
+{
+	up_read(&namespace_sem);
+}
+
+static inline void mangle(struct seq_file *m, const char *s)
+{
+	seq_escape(m, s, " \t\n\\");
+}
+
+static int show_vfsmnt(struct seq_file *m, void *v)
+{
+	struct vfsmount *mnt = v;
+	int err = 0;
+	static struct proc_fs_info {
+		int flag;
+		char *str;
+	} fs_info[] = {
+		{ MS_SYNCHRONOUS, ",sync" },
+		{ MS_DIRSYNC, ",dirsync" },
+		{ MS_MANDLOCK, ",mand" },
+		{ MS_NOATIME, ",noatime" },
+		{ MS_NODIRATIME, ",nodiratime" },
+		{ 0, NULL }
+	};
+	static struct proc_fs_info mnt_info[] = {
+		{ MNT_NOSUID, ",nosuid" },
+		{ MNT_NODEV, ",nodev" },
+		{ MNT_NOEXEC, ",noexec" },
+		{ 0, NULL }
+	};
+	struct proc_fs_info *fs_infop;
+
+	mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
+	seq_putc(m, ' ');
+	seq_path(m, mnt, mnt->mnt_root, " \t\n\\");
+	seq_putc(m, ' ');
+	mangle(m, mnt->mnt_sb->s_type->name);
+	seq_puts(m, mnt->mnt_sb->s_flags & MS_RDONLY ? " ro" : " rw");
+	for (fs_infop = fs_info; fs_infop->flag; fs_infop++) {
+		if (mnt->mnt_sb->s_flags & fs_infop->flag)
+			seq_puts(m, fs_infop->str);
+	}
+	for (fs_infop = mnt_info; fs_infop->flag; fs_infop++) {
+		if (mnt->mnt_flags & fs_infop->flag)
+			seq_puts(m, fs_infop->str);
+	}
+	if (mnt->mnt_sb->s_op->show_options)
+		err = mnt->mnt_sb->s_op->show_options(m, mnt);
+	seq_puts(m, " 0 0\n");
+	return err;
+}
+
+struct seq_operations mounts_op = {
+	.start	= m_start,
+	.next	= m_next,
+	.stop	= m_stop,
+	.show	= show_vfsmnt
+};
+
+/**
+ * may_umount_tree - check if a mount tree is busy
+ * @mnt: root of mount tree
+ *
+ * This is called to check if a tree of mounts has any
+ * open files, pwds, chroots or sub mounts that are
+ * busy.
+ */
+int may_umount_tree(struct vfsmount *mnt)
+{
+	int actual_refs = 0;
+	int minimum_refs = 0;
+	struct vfsmount *p;
+
+	spin_lock(&vfsmount_lock);
+	for (p = mnt; p; p = next_mnt(p, mnt)) {
+		actual_refs += atomic_read(&p->mnt_count);
+		minimum_refs += 2;
+	}
+	spin_unlock(&vfsmount_lock);
+
+	if (actual_refs > minimum_refs)
+		return -EBUSY;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(may_umount_tree);
+
+/**
+ * may_umount - check if a mount point is busy
+ * @mnt: root of mount
+ *
+ * This is called to check if a mount point has any
+ * open files, pwds, chroots or sub mounts. If the
+ * mount has sub mounts this will return busy
+ * regardless of whether the sub mounts are busy.
+ *
+ * Doesn't take quota and stuff into account. IOW, in some cases it will
+ * give false negatives. The main reason why it's here is that we need
+ * a non-destructive way to look for easily umountable filesystems.
+ */
+int may_umount(struct vfsmount *mnt)
+{
+	int ret = 0;
+	spin_lock(&vfsmount_lock);
+	if (propagate_mount_busy(mnt, 2))
+		ret = -EBUSY;
+	spin_unlock(&vfsmount_lock);
+	return ret;
+}
+
+EXPORT_SYMBOL(may_umount);
+
+void release_mounts(struct list_head *head)
+{
+	struct vfsmount *mnt;
+	while(!list_empty(head)) {
+		mnt = list_entry(head->next, struct vfsmount, mnt_hash);
+		list_del_init(&mnt->mnt_hash);
+		if (mnt->mnt_parent != mnt) {
+			struct dentry *dentry;
+			struct vfsmount *m;
+			spin_lock(&vfsmount_lock);
+			dentry = mnt->mnt_mountpoint;
+			m = mnt->mnt_parent;
+			mnt->mnt_mountpoint = mnt->mnt_root;
+			mnt->mnt_parent = mnt;
+			spin_unlock(&vfsmount_lock);
+			dput(dentry);
+			mntput(m);
+		}
+		mntput(mnt);
+	}
+}
+
+void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill)
+{
+	struct vfsmount *p;
+
+	for (p = mnt; p; p = next_mnt(p, mnt)) {
+		list_del(&p->mnt_hash);
+		list_add(&p->mnt_hash, kill);
+	}
+
+	if (propagate)
+		propagate_umount(kill);
+
+	list_for_each_entry(p, kill, mnt_hash) {
+		list_del_init(&p->mnt_expire);
+		list_del_init(&p->mnt_list);
+		__touch_namespace(p->mnt_namespace);
+		p->mnt_namespace = NULL;
+		list_del_init(&p->mnt_child);
+		if (p->mnt_parent != p)
+			mnt->mnt_mountpoint->d_mounted--;
+		change_mnt_propagation(p, MS_PRIVATE);
+	}
+}
+
+static int do_umount(struct vfsmount *mnt, int flags)
+{
+	struct super_block *sb = mnt->mnt_sb;
+	int retval;
+	LIST_HEAD(umount_list);
+
+	retval = security_sb_umount(mnt, flags);
+	if (retval)
+		return retval;
+
+	/*
+	 * Allow userspace to request a mountpoint be expired rather than
+	 * unmounting unconditionally. Unmount only happens if:
+	 *  (1) the mark is already set (the mark is cleared by mntput())
+	 *  (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
+	 */
+	if (flags & MNT_EXPIRE) {
+		if (mnt == current->fs->rootmnt ||
+		    flags & (MNT_FORCE | MNT_DETACH))
+			return -EINVAL;
+
+		if (atomic_read(&mnt->mnt_count) != 2)
+			return -EBUSY;
+
+		if (!xchg(&mnt->mnt_expiry_mark, 1))
+			return -EAGAIN;
+	}
+
+	/*
+	 * If we may have to abort operations to get out of this
+	 * mount, and they will themselves hold resources we must
+	 * allow the fs to do things. In the Unix tradition of
+	 * 'Gee thats tricky lets do it in userspace' the umount_begin
+	 * might fail to complete on the first run through as other tasks
+	 * must return, and the like. Thats for the mount program to worry
+	 * about for the moment.
+	 */
+
+	lock_kernel();
+	if ((flags & MNT_FORCE) && sb->s_op->umount_begin)
+		sb->s_op->umount_begin(sb);
+	unlock_kernel();
+
+	/*
+	 * No sense to grab the lock for this test, but test itself looks
+	 * somewhat bogus. Suggestions for better replacement?
+	 * Ho-hum... In principle, we might treat that as umount + switch
+	 * to rootfs. GC would eventually take care of the old vfsmount.
+	 * Actually it makes sense, especially if rootfs would contain a
+	 * /reboot - static binary that would close all descriptors and
+	 * call reboot(9). Then init(8) could umount root and exec /reboot.
+	 */
+	if (mnt == current->fs->rootmnt && !(flags & MNT_DETACH)) {
+		/*
+		 * Special case for "unmounting" root ...
+		 * we just try to remount it readonly.
+		 */
+		down_write(&sb->s_umount);
+		if (!(sb->s_flags & MS_RDONLY)) {
+			lock_kernel();
+			DQUOT_OFF(sb);
+			retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
+			unlock_kernel();
+		}
+		up_write(&sb->s_umount);
+		return retval;
+	}
+
+	down_write(&namespace_sem);
+	spin_lock(&vfsmount_lock);
+	event++;
+
+	retval = -EBUSY;
+	if (flags & MNT_DETACH || !propagate_mount_busy(mnt, 2)) {
+		if (!list_empty(&mnt->mnt_list))
+			umount_tree(mnt, 1, &umount_list);
+		retval = 0;
+	}
+	spin_unlock(&vfsmount_lock);
+	if (retval)
+		security_sb_umount_busy(mnt);
+	up_write(&namespace_sem);
+	release_mounts(&umount_list);
+	return retval;
+}
+
+/*
+ * Now umount can handle mount points as well as block devices.
+ * This is important for filesystems which use unnamed block devices.
+ *
+ * We now support a flag for forced unmount like the other 'big iron'
+ * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
+ */
+
+asmlinkage long sys_umount(char __user * name, int flags)
+{
+	struct nameidata nd;
+	int retval;
+
+	retval = __user_walk(name, LOOKUP_FOLLOW, &nd);
+	if (retval)
+		goto out;
+	retval = -EINVAL;
+	if (nd.dentry != nd.mnt->mnt_root)
+		goto dput_and_out;
+	if (!check_mnt(nd.mnt))
+		goto dput_and_out;
+
+	retval = -EPERM;
+	if (!capable(CAP_SYS_ADMIN))
+		goto dput_and_out;
+
+	retval = do_umount(nd.mnt, flags);
+dput_and_out:
+	path_release_on_umount(&nd);
+out:
+	return retval;
+}
+
+#ifdef __ARCH_WANT_SYS_OLDUMOUNT
+
+/*
+ *	The 2.0 compatible umount. No flags.
+ */
+asmlinkage long sys_oldumount(char __user * name)
+{
+	return sys_umount(name, 0);
+}
+
+#endif
+
+static int mount_is_safe(struct nameidata *nd)
+{
+	if (capable(CAP_SYS_ADMIN))
+		return 0;
+	return -EPERM;
+#ifdef notyet
+	if (S_ISLNK(nd->dentry->d_inode->i_mode))
+		return -EPERM;
+	if (nd->dentry->d_inode->i_mode & S_ISVTX) {
+		if (current->uid != nd->dentry->d_inode->i_uid)
+			return -EPERM;
+	}
+	if (vfs_permission(nd, MAY_WRITE))
+		return -EPERM;
+	return 0;
+#endif
+}
+
+static int lives_below_in_same_fs(struct dentry *d, struct dentry *dentry)
+{
+	while (1) {
+		if (d == dentry)
+			return 1;
+		if (d == NULL || d == d->d_parent)
+			return 0;
+		d = d->d_parent;
+	}
+}
+
+struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry,
+					int flag)
+{
+	struct vfsmount *res, *p, *q, *r, *s;
+	struct nameidata nd;
+
+	if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(mnt))
+		return NULL;
+
+	res = q = clone_mnt(mnt, dentry, flag);
+	if (!q)
+		goto Enomem;
+	q->mnt_mountpoint = mnt->mnt_mountpoint;
+
+	p = mnt;
+	list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
+		if (!lives_below_in_same_fs(r->mnt_mountpoint, dentry))
+			continue;
+
+		for (s = r; s; s = next_mnt(s, r)) {
+			if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(s)) {
+				s = skip_mnt_tree(s);
+				continue;
+			}
+			while (p != s->mnt_parent) {
+				p = p->mnt_parent;
+				q = q->mnt_parent;
+			}
+			p = s;
+			nd.mnt = q;
+			nd.dentry = p->mnt_mountpoint;
+			q = clone_mnt(p, p->mnt_root, flag);
+			if (!q)
+				goto Enomem;
+			spin_lock(&vfsmount_lock);
+			list_add_tail(&q->mnt_list, &res->mnt_list);
+			attach_mnt(q, &nd);
+			spin_unlock(&vfsmount_lock);
+		}
+	}
+	return res;
+Enomem:
+	if (res) {
+		LIST_HEAD(umount_list);
+		spin_lock(&vfsmount_lock);
+		umount_tree(res, 0, &umount_list);
+		spin_unlock(&vfsmount_lock);
+		release_mounts(&umount_list);
+	}
+	return NULL;
+}
+
+/*
+ *  @source_mnt : mount tree to be attached
+ *  @nd         : place the mount tree @source_mnt is attached
+ *  @parent_nd  : if non-null, detach the source_mnt from its parent and
+ *  		   store the parent mount and mountpoint dentry.
+ *  		   (done when source_mnt is moved)
+ *
+ *  NOTE: in the table below explains the semantics when a source mount
+ *  of a given type is attached to a destination mount of a given type.
+ * ---------------------------------------------------------------------------
+ * |         BIND MOUNT OPERATION                                            |
+ * |**************************************************************************
+ * | source-->| shared        |       private  |       slave    | unbindable |
+ * | dest     |               |                |                |            |
+ * |   |      |               |                |                |            |
+ * |   v      |               |                |                |            |
+ * |**************************************************************************
+ * |  shared  | shared (++)   |     shared (+) |     shared(+++)|  invalid   |
+ * |          |               |                |                |            |
+ * |non-shared| shared (+)    |      private   |      slave (*) |  invalid   |
+ * ***************************************************************************
+ * A bind operation clones the source mount and mounts the clone on the
+ * destination mount.
+ *
+ * (++)  the cloned mount is propagated to all the mounts in the propagation
+ * 	 tree of the destination mount and the cloned mount is added to
+ * 	 the peer group of the source mount.
+ * (+)   the cloned mount is created under the destination mount and is marked
+ *       as shared. The cloned mount is added to the peer group of the source
+ *       mount.
+ * (+++) the mount is propagated to all the mounts in the propagation tree
+ *       of the destination mount and the cloned mount is made slave
+ *       of the same master as that of the source mount. The cloned mount
+ *       is marked as 'shared and slave'.
+ * (*)   the cloned mount is made a slave of the same master as that of the
+ * 	 source mount.
+ *
+ * ---------------------------------------------------------------------------
+ * |         		MOVE MOUNT OPERATION                                 |
+ * |**************************************************************************
+ * | source-->| shared        |       private  |       slave    | unbindable |
+ * | dest     |               |                |                |            |
+ * |   |      |               |                |                |            |
+ * |   v      |               |                |                |            |
+ * |**************************************************************************
+ * |  shared  | shared (+)    |     shared (+) |    shared(+++) |  invalid   |
+ * |          |               |                |                |            |
+ * |non-shared| shared (+*)   |      private   |    slave (*)   | unbindable |
+ * ***************************************************************************
+ *
+ * (+)  the mount is moved to the destination. And is then propagated to
+ * 	all the mounts in the propagation tree of the destination mount.
+ * (+*)  the mount is moved to the destination.
+ * (+++)  the mount is moved to the destination and is then propagated to
+ * 	all the mounts belonging to the destination mount's propagation tree.
+ * 	the mount is marked as 'shared and slave'.
+ * (*)	the mount continues to be a slave at the new location.
+ *
+ * if the source mount is a tree, the operations explained above is
+ * applied to each mount in the tree.
+ * Must be called without spinlocks held, since this function can sleep
+ * in allocations.
+ */
+static int attach_recursive_mnt(struct vfsmount *source_mnt,
+			struct nameidata *nd, struct nameidata *parent_nd)
+{
+	LIST_HEAD(tree_list);
+	struct vfsmount *dest_mnt = nd->mnt;
+	struct dentry *dest_dentry = nd->dentry;
+	struct vfsmount *child, *p;
+
+	if (propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list))
+		return -EINVAL;
+
+	if (IS_MNT_SHARED(dest_mnt)) {
+		for (p = source_mnt; p; p = next_mnt(p, source_mnt))
+			set_mnt_shared(p);
+	}
+
+	spin_lock(&vfsmount_lock);
+	if (parent_nd) {
+		detach_mnt(source_mnt, parent_nd);
+		attach_mnt(source_mnt, nd);
+		touch_namespace(current->namespace);
+	} else {
+		mnt_set_mountpoint(dest_mnt, dest_dentry, source_mnt);
+		commit_tree(source_mnt);
+	}
+
+	list_for_each_entry_safe(child, p, &tree_list, mnt_hash) {
+		list_del_init(&child->mnt_hash);
+		commit_tree(child);
+	}
+	spin_unlock(&vfsmount_lock);
+	return 0;
+}
+
+static int graft_tree(struct vfsmount *mnt, struct nameidata *nd)
+{
+	int err;
+	if (mnt->mnt_sb->s_flags & MS_NOUSER)
+		return -EINVAL;
+
+	if (S_ISDIR(nd->dentry->d_inode->i_mode) !=
+	      S_ISDIR(mnt->mnt_root->d_inode->i_mode))
+		return -ENOTDIR;
+
+	err = -ENOENT;
+	down(&nd->dentry->d_inode->i_sem);
+	if (IS_DEADDIR(nd->dentry->d_inode))
+		goto out_unlock;
+
+	err = security_sb_check_sb(mnt, nd);
+	if (err)
+		goto out_unlock;
+
+	err = -ENOENT;
+	if (IS_ROOT(nd->dentry) || !d_unhashed(nd->dentry))
+		err = attach_recursive_mnt(mnt, nd, NULL);
+out_unlock:
+	up(&nd->dentry->d_inode->i_sem);
+	if (!err)
+		security_sb_post_addmount(mnt, nd);
+	return err;
+}
+
+/*
+ * recursively change the type of the mountpoint.
+ */
+static int do_change_type(struct nameidata *nd, int flag)
+{
+	struct vfsmount *m, *mnt = nd->mnt;
+	int recurse = flag & MS_REC;
+	int type = flag & ~MS_REC;
+
+	if (nd->dentry != nd->mnt->mnt_root)
+		return -EINVAL;
+
+	down_write(&namespace_sem);
+	spin_lock(&vfsmount_lock);
+	for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
+		change_mnt_propagation(m, type);
+	spin_unlock(&vfsmount_lock);
+	up_write(&namespace_sem);
+	return 0;
+}
+
+/*
+ * do loopback mount.
+ */
+static int do_loopback(struct nameidata *nd, char *old_name, int recurse)
+{
+	struct nameidata old_nd;
+	struct vfsmount *mnt = NULL;
+	int err = mount_is_safe(nd);
+	if (err)
+		return err;
+	if (!old_name || !*old_name)
+		return -EINVAL;
+	err = path_lookup(old_name, LOOKUP_FOLLOW, &old_nd);
+	if (err)
+		return err;
+
+	down_write(&namespace_sem);
+	err = -EINVAL;
+	if (IS_MNT_UNBINDABLE(old_nd.mnt))
+ 		goto out;
+
+	if (!check_mnt(nd->mnt) || !check_mnt(old_nd.mnt))
+		goto out;
+
+	err = -ENOMEM;
+	if (recurse)
+		mnt = copy_tree(old_nd.mnt, old_nd.dentry, 0);
+	else
+		mnt = clone_mnt(old_nd.mnt, old_nd.dentry, 0);
+
+	if (!mnt)
+		goto out;
+
+	err = graft_tree(mnt, nd);
+	if (err) {
+		LIST_HEAD(umount_list);
+		spin_lock(&vfsmount_lock);
+		umount_tree(mnt, 0, &umount_list);
+		spin_unlock(&vfsmount_lock);
+		release_mounts(&umount_list);
+	}
+
+out:
+	up_write(&namespace_sem);
+	path_release(&old_nd);
+	return err;
+}
+
+/*
+ * change filesystem flags. dir should be a physical root of filesystem.
+ * If you've mounted a non-root directory somewhere and want to do remount
+ * on it - tough luck.
+ */
+static int do_remount(struct nameidata *nd, int flags, int mnt_flags,
+		      void *data)
+{
+	int err;
+	struct super_block *sb = nd->mnt->mnt_sb;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	if (!check_mnt(nd->mnt))
+		return -EINVAL;
+
+	if (nd->dentry != nd->mnt->mnt_root)
+		return -EINVAL;
+
+	down_write(&sb->s_umount);
+	err = do_remount_sb(sb, flags, data, 0);
+	if (!err)
+		nd->mnt->mnt_flags = mnt_flags;
+	up_write(&sb->s_umount);
+	if (!err)
+		security_sb_post_remount(nd->mnt, flags, data);
+	return err;
+}
+
+static inline int tree_contains_unbindable(struct vfsmount *mnt)
+{
+	struct vfsmount *p;
+	for (p = mnt; p; p = next_mnt(p, mnt)) {
+		if (IS_MNT_UNBINDABLE(p))
+			return 1;
+	}
+	return 0;
+}
+
+static int do_move_mount(struct nameidata *nd, char *old_name)
+{
+	struct nameidata old_nd, parent_nd;
+	struct vfsmount *p;
+	int err = 0;
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+	if (!old_name || !*old_name)
+		return -EINVAL;
+	err = path_lookup(old_name, LOOKUP_FOLLOW, &old_nd);
+	if (err)
+		return err;
+
+	down_write(&namespace_sem);
+	while (d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry))
+		;
+	err = -EINVAL;
+	if (!check_mnt(nd->mnt) || !check_mnt(old_nd.mnt))
+		goto out;
+
+	err = -ENOENT;
+	down(&nd->dentry->d_inode->i_sem);
+	if (IS_DEADDIR(nd->dentry->d_inode))
+		goto out1;
+
+	if (!IS_ROOT(nd->dentry) && d_unhashed(nd->dentry))
+		goto out1;
+
+	err = -EINVAL;
+	if (old_nd.dentry != old_nd.mnt->mnt_root)
+		goto out1;
+
+	if (old_nd.mnt == old_nd.mnt->mnt_parent)
+		goto out1;
+
+	if (S_ISDIR(nd->dentry->d_inode->i_mode) !=
+	      S_ISDIR(old_nd.dentry->d_inode->i_mode))
+		goto out1;
+	/*
+	 * Don't move a mount residing in a shared parent.
+	 */
+	if (old_nd.mnt->mnt_parent && IS_MNT_SHARED(old_nd.mnt->mnt_parent))
+		goto out1;
+	/*
+	 * Don't move a mount tree containing unbindable mounts to a destination
+	 * mount which is shared.
+	 */
+	if (IS_MNT_SHARED(nd->mnt) && tree_contains_unbindable(old_nd.mnt))
+		goto out1;
+	err = -ELOOP;
+	for (p = nd->mnt; p->mnt_parent != p; p = p->mnt_parent)
+		if (p == old_nd.mnt)
+			goto out1;
+
+	if ((err = attach_recursive_mnt(old_nd.mnt, nd, &parent_nd)))
+		goto out1;
+
+	spin_lock(&vfsmount_lock);
+	/* if the mount is moved, it should no longer be expire
+	 * automatically */
+	list_del_init(&old_nd.mnt->mnt_expire);
+	spin_unlock(&vfsmount_lock);
+out1:
+	up(&nd->dentry->d_inode->i_sem);
+out:
+	up_write(&namespace_sem);
+	if (!err)
+		path_release(&parent_nd);
+	path_release(&old_nd);
+	return err;
+}
+
+/*
+ * create a new mount for userspace and request it to be added into the
+ * namespace's tree
+ */
+static int do_new_mount(struct nameidata *nd, char *type, int flags,
+			int mnt_flags, char *name, void *data)
+{
+	struct vfsmount *mnt;
+
+	if (!type || !memchr(type, 0, PAGE_SIZE))
+		return -EINVAL;
+
+	/* we need capabilities... */
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	mnt = do_kern_mount(type, flags, name, data);
+	if (IS_ERR(mnt))
+		return PTR_ERR(mnt);
+
+	return do_add_mount(mnt, nd, mnt_flags, NULL);
+}
+
+/*
+ * add a mount into a namespace's mount tree
+ * - provide the option of adding the new mount to an expiration list
+ */
+int do_add_mount(struct vfsmount *newmnt, struct nameidata *nd,
+		 int mnt_flags, struct list_head *fslist)
+{
+	int err;
+
+	down_write(&namespace_sem);
+	/* Something was mounted here while we slept */
+	while (d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry))
+		;
+	err = -EINVAL;
+	if (!check_mnt(nd->mnt))
+		goto unlock;
+
+	/* Refuse the same filesystem on the same mount point */
+	err = -EBUSY;
+	if (nd->mnt->mnt_sb == newmnt->mnt_sb &&
+	    nd->mnt->mnt_root == nd->dentry)
+		goto unlock;
+
+	err = -EINVAL;
+	if (S_ISLNK(newmnt->mnt_root->d_inode->i_mode))
+		goto unlock;
+
+	newmnt->mnt_flags = mnt_flags;
+	if ((err = graft_tree(newmnt, nd)))
+		goto unlock;
+
+	if (fslist) {
+		/* add to the specified expiration list */
+		spin_lock(&vfsmount_lock);
+		list_add_tail(&newmnt->mnt_expire, fslist);
+		spin_unlock(&vfsmount_lock);
+	}
+	up_write(&namespace_sem);
+	return 0;
+
+unlock:
+	up_write(&namespace_sem);
+	mntput(newmnt);
+	return err;
+}
+
+EXPORT_SYMBOL_GPL(do_add_mount);
+
+static void expire_mount(struct vfsmount *mnt, struct list_head *mounts,
+				struct list_head *umounts)
+{
+	spin_lock(&vfsmount_lock);
+
+	/*
+	 * Check if mount is still attached, if not, let whoever holds it deal
+	 * with the sucker
+	 */
+	if (mnt->mnt_parent == mnt) {
+		spin_unlock(&vfsmount_lock);
+		return;
+	}
+
+	/*
+	 * Check that it is still dead: the count should now be 2 - as
+	 * contributed by the vfsmount parent and the mntget above
+	 */
+	if (!propagate_mount_busy(mnt, 2)) {
+		/* delete from the namespace */
+		touch_namespace(mnt->mnt_namespace);
+		list_del_init(&mnt->mnt_list);
+		mnt->mnt_namespace = NULL;
+		umount_tree(mnt, 1, umounts);
+		spin_unlock(&vfsmount_lock);
+	} else {
+		/*
+		 * Someone brought it back to life whilst we didn't have any
+		 * locks held so return it to the expiration list
+		 */
+		list_add_tail(&mnt->mnt_expire, mounts);
+		spin_unlock(&vfsmount_lock);
+	}
+}
+
+/*
+ * process a list of expirable mountpoints with the intent of discarding any
+ * mountpoints that aren't in use and haven't been touched since last we came
+ * here
+ */
+void mark_mounts_for_expiry(struct list_head *mounts)
+{
+	struct namespace *namespace;
+	struct vfsmount *mnt, *next;
+	LIST_HEAD(graveyard);
+
+	if (list_empty(mounts))
+		return;
+
+	spin_lock(&vfsmount_lock);
+
+	/* extract from the expiration list every vfsmount that matches the
+	 * following criteria:
+	 * - only referenced by its parent vfsmount
+	 * - still marked for expiry (marked on the last call here; marks are
+	 *   cleared by mntput())
+	 */
+	list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
+		if (!xchg(&mnt->mnt_expiry_mark, 1) ||
+		    atomic_read(&mnt->mnt_count) != 1)
+			continue;
+
+		mntget(mnt);
+		list_move(&mnt->mnt_expire, &graveyard);
+	}
+
+	/*
+	 * go through the vfsmounts we've just consigned to the graveyard to
+	 * - check that they're still dead
+	 * - delete the vfsmount from the appropriate namespace under lock
+	 * - dispose of the corpse
+	 */
+	while (!list_empty(&graveyard)) {
+		LIST_HEAD(umounts);
+		mnt = list_entry(graveyard.next, struct vfsmount, mnt_expire);
+		list_del_init(&mnt->mnt_expire);
+
+		/* don't do anything if the namespace is dead - all the
+		 * vfsmounts from it are going away anyway */
+		namespace = mnt->mnt_namespace;
+		if (!namespace || !namespace->root)
+			continue;
+		get_namespace(namespace);
+
+		spin_unlock(&vfsmount_lock);
+		down_write(&namespace_sem);
+		expire_mount(mnt, mounts, &umounts);
+		up_write(&namespace_sem);
+		release_mounts(&umounts);
+		mntput(mnt);
+		put_namespace(namespace);
+		spin_lock(&vfsmount_lock);
+	}
+
+	spin_unlock(&vfsmount_lock);
+}
+
+EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);
+
+/*
+ * Some copy_from_user() implementations do not return the exact number of
+ * bytes remaining to copy on a fault.  But copy_mount_options() requires that.
+ * Note that this function differs from copy_from_user() in that it will oops
+ * on bad values of `to', rather than returning a short copy.
+ */
+static long exact_copy_from_user(void *to, const void __user * from,
+				 unsigned long n)
+{
+	char *t = to;
+	const char __user *f = from;
+	char c;
+
+	if (!access_ok(VERIFY_READ, from, n))
+		return n;
+
+	while (n) {
+		if (__get_user(c, f)) {
+			memset(t, 0, n);
+			break;
+		}
+		*t++ = c;
+		f++;
+		n--;
+	}
+	return n;
+}
+
+int copy_mount_options(const void __user * data, unsigned long *where)
+{
+	int i;
+	unsigned long page;
+	unsigned long size;
+
+	*where = 0;
+	if (!data)
+		return 0;
+
+	if (!(page = __get_free_page(GFP_KERNEL)))
+		return -ENOMEM;
+
+	/* We only care that *some* data at the address the user
+	 * gave us is valid.  Just in case, we'll zero
+	 * the remainder of the page.
+	 */
+	/* copy_from_user cannot cross TASK_SIZE ! */
+	size = TASK_SIZE - (unsigned long)data;
+	if (size > PAGE_SIZE)
+		size = PAGE_SIZE;
+
+	i = size - exact_copy_from_user((void *)page, data, size);
+	if (!i) {
+		free_page(page);
+		return -EFAULT;
+	}
+	if (i != PAGE_SIZE)
+		memset((char *)page + i, 0, PAGE_SIZE - i);
+	*where = page;
+	return 0;
+}
+
+/*
+ * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to
+ * be given to the mount() call (ie: read-only, no-dev, no-suid etc).
+ *
+ * data is a (void *) that can point to any structure up to
+ * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent
+ * information (or be NULL).
+ *
+ * Pre-0.97 versions of mount() didn't have a flags word.
+ * When the flags word was introduced its top half was required
+ * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9.
+ * Therefore, if this magic number is present, it carries no information
+ * and must be discarded.
+ */
+long do_mount(char *dev_name, char *dir_name, char *type_page,
+		  unsigned long flags, void *data_page)
+{
+	struct nameidata nd;
+	int retval = 0;
+	int mnt_flags = 0;
+
+	/* Discard magic */
+	if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
+		flags &= ~MS_MGC_MSK;
+
+	/* Basic sanity checks */
+
+	if (!dir_name || !*dir_name || !memchr(dir_name, 0, PAGE_SIZE))
+		return -EINVAL;
+	if (dev_name && !memchr(dev_name, 0, PAGE_SIZE))
+		return -EINVAL;
+
+	if (data_page)
+		((char *)data_page)[PAGE_SIZE - 1] = 0;
+
+	/* Separate the per-mountpoint flags */
+	if (flags & MS_NOSUID)
+		mnt_flags |= MNT_NOSUID;
+	if (flags & MS_NODEV)
+		mnt_flags |= MNT_NODEV;
+	if (flags & MS_NOEXEC)
+		mnt_flags |= MNT_NOEXEC;
+	flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE);
+
+	/* ... and get the mountpoint */
+	retval = path_lookup(dir_name, LOOKUP_FOLLOW, &nd);
+	if (retval)
+		return retval;
+
+	retval = security_sb_mount(dev_name, &nd, type_page, flags, data_page);
+	if (retval)
+		goto dput_out;
+
+	if (flags & MS_REMOUNT)
+		retval = do_remount(&nd, flags & ~MS_REMOUNT, mnt_flags,
+				    data_page);
+	else if (flags & MS_BIND)
+		retval = do_loopback(&nd, dev_name, flags & MS_REC);
+	else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
+		retval = do_change_type(&nd, flags);
+	else if (flags & MS_MOVE)
+		retval = do_move_mount(&nd, dev_name);
+	else
+		retval = do_new_mount(&nd, type_page, flags, mnt_flags,
+				      dev_name, data_page);
+dput_out:
+	path_release(&nd);
+	return retval;
+}
+
+int copy_namespace(int flags, struct task_struct *tsk)
+{
+	struct namespace *namespace = tsk->namespace;
+	struct namespace *new_ns;
+	struct vfsmount *rootmnt = NULL, *pwdmnt = NULL, *altrootmnt = NULL;
+	struct fs_struct *fs = tsk->fs;
+	struct vfsmount *p, *q;
+
+	if (!namespace)
+		return 0;
+
+	get_namespace(namespace);
+
+	if (!(flags & CLONE_NEWNS))
+		return 0;
+
+	if (!capable(CAP_SYS_ADMIN)) {
+		put_namespace(namespace);
+		return -EPERM;
+	}
+
+	new_ns = kmalloc(sizeof(struct namespace), GFP_KERNEL);
+	if (!new_ns)
+		goto out;
+
+	atomic_set(&new_ns->count, 1);
+	INIT_LIST_HEAD(&new_ns->list);
+	init_waitqueue_head(&new_ns->poll);
+	new_ns->event = 0;
+
+	down_write(&namespace_sem);
+	/* First pass: copy the tree topology */
+	new_ns->root = copy_tree(namespace->root, namespace->root->mnt_root,
+					CL_COPY_ALL | CL_EXPIRE);
+	if (!new_ns->root) {
+		up_write(&namespace_sem);
+		kfree(new_ns);
+		goto out;
+	}
+	spin_lock(&vfsmount_lock);
+	list_add_tail(&new_ns->list, &new_ns->root->mnt_list);
+	spin_unlock(&vfsmount_lock);
+
+	/*
+	 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
+	 * as belonging to new namespace.  We have already acquired a private
+	 * fs_struct, so tsk->fs->lock is not needed.
+	 */
+	p = namespace->root;
+	q = new_ns->root;
+	while (p) {
+		q->mnt_namespace = new_ns;
+		if (fs) {
+			if (p == fs->rootmnt) {
+				rootmnt = p;
+				fs->rootmnt = mntget(q);
+			}
+			if (p == fs->pwdmnt) {
+				pwdmnt = p;
+				fs->pwdmnt = mntget(q);
+			}
+			if (p == fs->altrootmnt) {
+				altrootmnt = p;
+				fs->altrootmnt = mntget(q);
+			}
+		}
+		p = next_mnt(p, namespace->root);
+		q = next_mnt(q, new_ns->root);
+	}
+	up_write(&namespace_sem);
+
+	tsk->namespace = new_ns;
+
+	if (rootmnt)
+		mntput(rootmnt);
+	if (pwdmnt)
+		mntput(pwdmnt);
+	if (altrootmnt)
+		mntput(altrootmnt);
+
+	put_namespace(namespace);
+	return 0;
+
+out:
+	put_namespace(namespace);
+	return -ENOMEM;
+}
+
+asmlinkage long sys_mount(char __user * dev_name, char __user * dir_name,
+			  char __user * type, unsigned long flags,
+			  void __user * data)
+{
+	int retval;
+	unsigned long data_page;
+	unsigned long type_page;
+	unsigned long dev_page;
+	char *dir_page;
+
+	retval = copy_mount_options(type, &type_page);
+	if (retval < 0)
+		return retval;
+
+	dir_page = getname(dir_name);
+	retval = PTR_ERR(dir_page);
+	if (IS_ERR(dir_page))
+		goto out1;
+
+	retval = copy_mount_options(dev_name, &dev_page);
+	if (retval < 0)
+		goto out2;
+
+	retval = copy_mount_options(data, &data_page);
+	if (retval < 0)
+		goto out3;
+
+	lock_kernel();
+	retval = do_mount((char *)dev_page, dir_page, (char *)type_page,
+			  flags, (void *)data_page);
+	unlock_kernel();
+	free_page(data_page);
+
+out3:
+	free_page(dev_page);
+out2:
+	putname(dir_page);
+out1:
+	free_page(type_page);
+	return retval;
+}
+
+/*
+ * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
+ * It can block. Requires the big lock held.
+ */
+void set_fs_root(struct fs_struct *fs, struct vfsmount *mnt,
+		 struct dentry *dentry)
+{
+	struct dentry *old_root;
+	struct vfsmount *old_rootmnt;
+	write_lock(&fs->lock);
+	old_root = fs->root;
+	old_rootmnt = fs->rootmnt;
+	fs->rootmnt = mntget(mnt);
+	fs->root = dget(dentry);
+	write_unlock(&fs->lock);
+	if (old_root) {
+		dput(old_root);
+		mntput(old_rootmnt);
+	}
+}
+
+/*
+ * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values.
+ * It can block. Requires the big lock held.
+ */
+void set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
+		struct dentry *dentry)
+{
+	struct dentry *old_pwd;
+	struct vfsmount *old_pwdmnt;
+
+	write_lock(&fs->lock);
+	old_pwd = fs->pwd;
+	old_pwdmnt = fs->pwdmnt;
+	fs->pwdmnt = mntget(mnt);
+	fs->pwd = dget(dentry);
+	write_unlock(&fs->lock);
+
+	if (old_pwd) {
+		dput(old_pwd);
+		mntput(old_pwdmnt);
+	}
+}
+
+static void chroot_fs_refs(struct nameidata *old_nd, struct nameidata *new_nd)
+{
+	struct task_struct *g, *p;
+	struct fs_struct *fs;
+
+	read_lock(&tasklist_lock);
+	do_each_thread(g, p) {
+		task_lock(p);
+		fs = p->fs;
+		if (fs) {
+			atomic_inc(&fs->count);
+			task_unlock(p);
+			if (fs->root == old_nd->dentry
+			    && fs->rootmnt == old_nd->mnt)
+				set_fs_root(fs, new_nd->mnt, new_nd->dentry);
+			if (fs->pwd == old_nd->dentry
+			    && fs->pwdmnt == old_nd->mnt)
+				set_fs_pwd(fs, new_nd->mnt, new_nd->dentry);
+			put_fs_struct(fs);
+		} else
+			task_unlock(p);
+	} while_each_thread(g, p);
+	read_unlock(&tasklist_lock);
+}
+
+/*
+ * pivot_root Semantics:
+ * Moves the root file system of the current process to the directory put_old,
+ * makes new_root as the new root file system of the current process, and sets
+ * root/cwd of all processes which had them on the current root to new_root.
+ *
+ * Restrictions:
+ * The new_root and put_old must be directories, and  must not be on the
+ * same file  system as the current process root. The put_old  must  be
+ * underneath new_root,  i.e. adding a non-zero number of /.. to the string
+ * pointed to by put_old must yield the same directory as new_root. No other
+ * file system may be mounted on put_old. After all, new_root is a mountpoint.
+ *
+ * Notes:
+ *  - we don't move root/cwd if they are not at the root (reason: if something
+ *    cared enough to change them, it's probably wrong to force them elsewhere)
+ *  - it's okay to pick a root that isn't the root of a file system, e.g.
+ *    /nfs/my_root where /nfs is the mount point. It must be a mountpoint,
+ *    though, so you may need to say mount --bind /nfs/my_root /nfs/my_root
+ *    first.
+ */
+asmlinkage long sys_pivot_root(const char __user * new_root,
+			       const char __user * put_old)
+{
+	struct vfsmount *tmp;
+	struct nameidata new_nd, old_nd, parent_nd, root_parent, user_nd;
+	int error;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	lock_kernel();
+
+	error = __user_walk(new_root, LOOKUP_FOLLOW | LOOKUP_DIRECTORY,
+			    &new_nd);
+	if (error)
+		goto out0;
+	error = -EINVAL;
+	if (!check_mnt(new_nd.mnt))
+		goto out1;
+
+	error = __user_walk(put_old, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &old_nd);
+	if (error)
+		goto out1;
+
+	error = security_sb_pivotroot(&old_nd, &new_nd);
+	if (error) {
+		path_release(&old_nd);
+		goto out1;
+	}
+
+	read_lock(&current->fs->lock);
+	user_nd.mnt = mntget(current->fs->rootmnt);
+	user_nd.dentry = dget(current->fs->root);
+	read_unlock(&current->fs->lock);
+	down_write(&namespace_sem);
+	down(&old_nd.dentry->d_inode->i_sem);
+	error = -EINVAL;
+	if (IS_MNT_SHARED(old_nd.mnt) ||
+		IS_MNT_SHARED(new_nd.mnt->mnt_parent) ||
+		IS_MNT_SHARED(user_nd.mnt->mnt_parent))
+		goto out2;
+	if (!check_mnt(user_nd.mnt))
+		goto out2;
+	error = -ENOENT;
+	if (IS_DEADDIR(new_nd.dentry->d_inode))
+		goto out2;
+	if (d_unhashed(new_nd.dentry) && !IS_ROOT(new_nd.dentry))
+		goto out2;
+	if (d_unhashed(old_nd.dentry) && !IS_ROOT(old_nd.dentry))
+		goto out2;
+	error = -EBUSY;
+	if (new_nd.mnt == user_nd.mnt || old_nd.mnt == user_nd.mnt)
+		goto out2; /* loop, on the same file system  */
+	error = -EINVAL;
+	if (user_nd.mnt->mnt_root != user_nd.dentry)
+		goto out2; /* not a mountpoint */
+	if (user_nd.mnt->mnt_parent == user_nd.mnt)
+		goto out2; /* not attached */
+	if (new_nd.mnt->mnt_root != new_nd.dentry)
+		goto out2; /* not a mountpoint */
+	if (new_nd.mnt->mnt_parent == new_nd.mnt)
+		goto out2; /* not attached */
+	tmp = old_nd.mnt; /* make sure we can reach put_old from new_root */
+	spin_lock(&vfsmount_lock);
+	if (tmp != new_nd.mnt) {
+		for (;;) {
+			if (tmp->mnt_parent == tmp)
+				goto out3; /* already mounted on put_old */
+			if (tmp->mnt_parent == new_nd.mnt)
+				break;
+			tmp = tmp->mnt_parent;
+		}
+		if (!is_subdir(tmp->mnt_mountpoint, new_nd.dentry))
+			goto out3;
+	} else if (!is_subdir(old_nd.dentry, new_nd.dentry))
+		goto out3;
+	detach_mnt(new_nd.mnt, &parent_nd);
+	detach_mnt(user_nd.mnt, &root_parent);
+	attach_mnt(user_nd.mnt, &old_nd);     /* mount old root on put_old */
+	attach_mnt(new_nd.mnt, &root_parent); /* mount new_root on / */
+	touch_namespace(current->namespace);
+	spin_unlock(&vfsmount_lock);
+	chroot_fs_refs(&user_nd, &new_nd);
+	security_sb_post_pivotroot(&user_nd, &new_nd);
+	error = 0;
+	path_release(&root_parent);
+	path_release(&parent_nd);
+out2:
+	up(&old_nd.dentry->d_inode->i_sem);
+	up_write(&namespace_sem);
+	path_release(&user_nd);
+	path_release(&old_nd);
+out1:
+	path_release(&new_nd);
+out0:
+	unlock_kernel();
+	return error;
+out3:
+	spin_unlock(&vfsmount_lock);
+	goto out2;
+}
+
+static void __init init_mount_tree(void)
+{
+	struct vfsmount *mnt;
+	struct namespace *namespace;
+	struct task_struct *g, *p;
+
+	mnt = do_kern_mount("rootfs", 0, "rootfs", NULL);
+	if (IS_ERR(mnt))
+		panic("Can't create rootfs");
+	namespace = kmalloc(sizeof(*namespace), GFP_KERNEL);
+	if (!namespace)
+		panic("Can't allocate initial namespace");
+	atomic_set(&namespace->count, 1);
+	INIT_LIST_HEAD(&namespace->list);
+	init_waitqueue_head(&namespace->poll);
+	namespace->event = 0;
+	list_add(&mnt->mnt_list, &namespace->list);
+	namespace->root = mnt;
+	mnt->mnt_namespace = namespace;
+
+	init_task.namespace = namespace;
+	read_lock(&tasklist_lock);
+	do_each_thread(g, p) {
+		get_namespace(namespace);
+		p->namespace = namespace;
+	} while_each_thread(g, p);
+	read_unlock(&tasklist_lock);
+
+	set_fs_pwd(current->fs, namespace->root, namespace->root->mnt_root);
+	set_fs_root(current->fs, namespace->root, namespace->root->mnt_root);
+}
+
+void __init mnt_init(unsigned long mempages)
+{
+	struct list_head *d;
+	unsigned int nr_hash;
+	int i;
+
+	init_rwsem(&namespace_sem);
+
+	mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct vfsmount),
+			0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL, NULL);
+
+	mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC);
+
+	if (!mount_hashtable)
+		panic("Failed to allocate mount hash table\n");
+
+	/*
+	 * Find the power-of-two list-heads that can fit into the allocation..
+	 * We don't guarantee that "sizeof(struct list_head)" is necessarily
+	 * a power-of-two.
+	 */
+	nr_hash = PAGE_SIZE / sizeof(struct list_head);
+	hash_bits = 0;
+	do {
+		hash_bits++;
+	} while ((nr_hash >> hash_bits) != 0);
+	hash_bits--;
+
+	/*
+	 * Re-calculate the actual number of entries and the mask
+	 * from the number of bits we can fit.
+	 */
+	nr_hash = 1UL << hash_bits;
+	hash_mask = nr_hash - 1;
+
+	printk("Mount-cache hash table entries: %d\n", nr_hash);
+
+	/* And initialize the newly allocated array */
+	d = mount_hashtable;
+	i = nr_hash;
+	do {
+		INIT_LIST_HEAD(d);
+		d++;
+		i--;
+	} while (i);
+	sysfs_init();
+	init_rootfs();
+	init_mount_tree();
+}
+
+void __put_namespace(struct namespace *namespace)
+{
+	struct vfsmount *root = namespace->root;
+	LIST_HEAD(umount_list);
+	namespace->root = NULL;
+	spin_unlock(&vfsmount_lock);
+	down_write(&namespace_sem);
+	spin_lock(&vfsmount_lock);
+	umount_tree(root, 0, &umount_list);
+	spin_unlock(&vfsmount_lock);
+	up_write(&namespace_sem);
+	release_mounts(&umount_list);
+	kfree(namespace);
+}
diff -urN linux-2.6.15/fs/select.c linux-2.6.15-no1/fs/select.c
--- linux-2.6.15/fs/select.c	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/fs/select.c	2006-01-26 15:41:14.004758720 +0000
@@ -29,12 +29,6 @@
 #define ROUND_UP(x,y) (((x)+(y)-1)/(y))
 #define DEFAULT_POLLMASK (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)
 
-struct poll_table_entry {
-	struct file * filp;
-	wait_queue_t wait;
-	wait_queue_head_t * wait_address;
-};
-
 struct poll_table_page {
 	struct poll_table_page * next;
 	struct poll_table_entry * entry;
@@ -64,13 +58,23 @@
 	init_poll_funcptr(&pwq->pt, __pollwait);
 	pwq->error = 0;
 	pwq->table = NULL;
+	pwq->inline_index = 0;
 }
 
 EXPORT_SYMBOL(poll_initwait);
 
+static void free_poll_entry(struct poll_table_entry *entry)
+{
+	remove_wait_queue(entry->wait_address,&entry->wait);
+	fput(entry->filp);
+}
+
 void poll_freewait(struct poll_wqueues *pwq)
 {
 	struct poll_table_page * p = pwq->table;
+	int i;
+	for (i = 0; i < pwq->inline_index; i++)
+		free_poll_entry(pwq->inline_entries + i);
 	while (p) {
 		struct poll_table_entry * entry;
 		struct poll_table_page *old;
@@ -78,8 +82,7 @@
 		entry = p->entry;
 		do {
 			entry--;
-			remove_wait_queue(entry->wait_address,&entry->wait);
-			fput(entry->filp);
+			free_poll_entry(entry);
 		} while (entry > p->entries);
 		old = p;
 		p = p->next;
@@ -89,12 +92,14 @@
 
 EXPORT_SYMBOL(poll_freewait);
 
-static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
-		       poll_table *_p)
+static struct poll_table_entry *poll_get_entry(poll_table *_p)
 {
 	struct poll_wqueues *p = container_of(_p, struct poll_wqueues, pt);
 	struct poll_table_page *table = p->table;
 
+	if (p->inline_index < N_INLINE_POLL_ENTRIES)
+		return p->inline_entries + p->inline_index++;
+
 	if (!table || POLL_TABLE_FULL(table)) {
 		struct poll_table_page *new_table;
 
@@ -102,7 +107,7 @@
 		if (!new_table) {
 			p->error = -ENOMEM;
 			__set_current_state(TASK_RUNNING);
-			return;
+			return NULL;
 		}
 		new_table->entry = new_table->entries;
 		new_table->next = table;
@@ -110,16 +115,21 @@
 		table = new_table;
 	}
 
-	/* Add a new entry */
-	{
-		struct poll_table_entry * entry = table->entry;
-		table->entry = entry+1;
-	 	get_file(filp);
-	 	entry->filp = filp;
-		entry->wait_address = wait_address;
-		init_waitqueue_entry(&entry->wait, current);
-		add_wait_queue(wait_address,&entry->wait);
-	}
+	return table->entry++;
+}
+
+/* Add a new entry */
+static void __pollwait(struct file *filp, wait_queue_head_t *wait_address,
+		       poll_table *p)
+{
+	struct poll_table_entry *entry = poll_get_entry(p);
+	if (!entry)
+		return;
+	get_file(filp);
+	entry->filp = filp;
+	entry->wait_address = wait_address;
+	init_waitqueue_entry(&entry->wait, current);
+	add_wait_queue(wait_address,&entry->wait);
 }
 
 #define FDS_IN(fds, n)		(fds->in + n)
@@ -274,16 +284,6 @@
 	return retval;
 }
 
-static void *select_bits_alloc(int size)
-{
-	return kmalloc(6 * size, GFP_KERNEL);
-}
-
-static void select_bits_free(void *bits, int size)
-{
-	kfree(bits);
-}
-
 /*
  * We can actually return ERESTARTSYS instead of EINTR, but I'd
  * like to be certain this leads to no problems. So I return
@@ -303,6 +303,8 @@
 	long timeout;
 	int ret, size, max_fdset;
 	struct fdtable *fdt;
+	/* Allocate small arguments on the stack to save memory and be faster */
+	char stack_fds[SELECT_STACK_ALLOC];
 
 	timeout = MAX_SCHEDULE_TIMEOUT;
 	if (tvp) {
@@ -344,7 +346,10 @@
 	 */
 	ret = -ENOMEM;
 	size = FDS_BYTES(n);
-	bits = select_bits_alloc(size);
+	if (6*size < SELECT_STACK_ALLOC)
+		bits = stack_fds;
+	else
+		bits = kmalloc(6 * size, GFP_KERNEL);
 	if (!bits)
 		goto out_nofds;
 	fds.in      = (unsigned long *)  bits;
@@ -390,7 +395,8 @@
 		ret = -EFAULT;
 
 out:
-	select_bits_free(bits, size);
+	if (bits != stack_fds)
+		kfree(bits);
 out_nofds:
 	return ret;
 }
@@ -464,6 +470,8 @@
 	return count;
 }
 
+#define N_STACK_PPS ((sizeof(stack_pps) - sizeof(struct poll_list))  / sizeof(struct pollfd))
+
 asmlinkage long sys_poll(struct pollfd __user * ufds, unsigned int nfds, long timeout)
 {
 	struct poll_wqueues table;
@@ -473,6 +481,9 @@
  	struct poll_list *walk;
 	struct fdtable *fdt;
 	int max_fdset;
+	/* Allocate small arguments on the stack to save memory and be faster */
+	char stack_pps[POLL_STACK_ALLOC];
+	struct poll_list *stack_pp = NULL;
 
 	/* Do a sanity check on nfds ... */
 	rcu_read_lock();
@@ -498,14 +509,23 @@
 	err = -ENOMEM;
 	while(i!=0) {
 		struct poll_list *pp;
-		pp = kmalloc(sizeof(struct poll_list)+
-				sizeof(struct pollfd)*
-				(i>POLLFD_PER_PAGE?POLLFD_PER_PAGE:i),
-					GFP_KERNEL);
-		if(pp==NULL)
-			goto out_fds;
+		int num, size;
+		if (stack_pp == NULL)
+			num = N_STACK_PPS;
+		else
+			num = POLLFD_PER_PAGE;
+		if (num > i)
+			num = i;
+		size = sizeof(struct poll_list) + sizeof(struct pollfd)*num;
+		if (!stack_pp)
+			stack_pp = pp = (struct poll_list *)stack_pps;
+		else {
+			pp = kmalloc(size, GFP_KERNEL);
+			if (!pp)
+				goto out_fds;
+		}
 		pp->next=NULL;
-		pp->len = (i>POLLFD_PER_PAGE?POLLFD_PER_PAGE:i);
+		pp->len = num;
 		if (head == NULL)
 			head = pp;
 		else
@@ -513,7 +533,7 @@
 
 		walk = pp;
 		if (copy_from_user(pp->entries, ufds + nfds-i, 
-				sizeof(struct pollfd)*pp->len)) {
+				sizeof(struct pollfd)*num)) {
 			err = -EFAULT;
 			goto out_fds;
 		}
@@ -541,7 +561,8 @@
 	walk = head;
 	while(walk!=NULL) {
 		struct poll_list *pp = walk->next;
-		kfree(walk);
+		if (walk != stack_pp)
+			kfree(walk);
 		walk = pp;
 	}
 	poll_freewait(&table);
diff -urN linux-2.6.15/include/linux/ext3_fs.h linux-2.6.15-no1/include/linux/ext3_fs.h
--- linux-2.6.15/include/linux/ext3_fs.h	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/include/linux/ext3_fs.h	2006-01-26 15:41:14.005758568 +0000
@@ -36,7 +36,8 @@
  * Define EXT3_RESERVATION to reserve data blocks for expanding files
  */
 #define EXT3_DEFAULT_RESERVE_BLOCKS     8
-#define EXT3_MAX_RESERVE_BLOCKS         1024
+/*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */
+#define EXT3_MAX_RESERVE_BLOCKS         1027
 #define EXT3_RESERVE_WINDOW_NOT_ALLOCATED 0
 /*
  * Always enable hashed directories
@@ -732,6 +733,8 @@
 extern int ext3_bg_has_super(struct super_block *sb, int group);
 extern unsigned long ext3_bg_num_gdb(struct super_block *sb, int group);
 extern int ext3_new_block (handle_t *, struct inode *, unsigned long, int *);
+extern int ext3_new_blocks (handle_t *, struct inode *, unsigned long,
+			unsigned long *, int *);
 extern void ext3_free_blocks (handle_t *, struct inode *, unsigned long,
 			      unsigned long);
 extern void ext3_free_blocks_sb (handle_t *, struct super_block *,
@@ -772,9 +775,12 @@
 
 
 /* inode.c */
-extern int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int);
-extern struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
-extern struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
+int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int);
+struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *);
+struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *);
+int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
+	sector_t iblock, unsigned long maxblocks, struct buffer_head *bh_result,
+	int create, int extend_disksize);
 
 extern void ext3_read_inode (struct inode *);
 extern int  ext3_write_inode (struct inode *, int);
diff -urN linux-2.6.15/include/linux/fs.h linux-2.6.15-no1/include/linux/fs.h
--- linux-2.6.15/include/linux/fs.h	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/include/linux/fs.h	2006-01-26 15:41:25.306040664 +0000
@@ -1268,6 +1268,9 @@
 
 extern int vfs_statfs(struct super_block *, struct kstatfs *);
 
+/* /sys/fs */
+extern struct subsystem fs_subsys;
+
 #define FLOCK_VERIFY_READ  1
 #define FLOCK_VERIFY_WRITE 2
 
diff -urN linux-2.6.15/include/linux/fs.h.orig linux-2.6.15-no1/include/linux/fs.h.orig
--- linux-2.6.15/include/linux/fs.h.orig	1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6.15-no1/include/linux/fs.h.orig	2006-01-03 03:21:10.000000000 +0000
@@ -0,0 +1,1800 @@
+#ifndef _LINUX_FS_H
+#define _LINUX_FS_H
+
+/*
+ * This file has definitions for some important file table
+ * structures etc.
+ */
+
+#include <linux/config.h>
+#include <linux/limits.h>
+#include <linux/ioctl.h>
+#include <linux/rcuref.h>
+
+/*
+ * It's silly to have NR_OPEN bigger than NR_FILE, but you can change
+ * the file limit at runtime and only root can increase the per-process
+ * nr_file rlimit, so it's safe to set up a ridiculously high absolute
+ * upper limit on files-per-process.
+ *
+ * Some programs (notably those using select()) may have to be 
+ * recompiled to take full advantage of the new limits..  
+ */
+
+/* Fixed constants first: */
+#undef NR_OPEN
+#define NR_OPEN (1024*1024)	/* Absolute upper limit on fd num */
+#define INR_OPEN 1024		/* Initial setting for nfile rlimits */
+
+#define BLOCK_SIZE_BITS 10
+#define BLOCK_SIZE (1<<BLOCK_SIZE_BITS)
+
+/* And dynamically-tunable limits and defaults: */
+struct files_stat_struct {
+	int nr_files;		/* read only */
+	int nr_free_files;	/* read only */
+	int max_files;		/* tunable */
+};
+extern struct files_stat_struct files_stat;
+
+struct inodes_stat_t {
+	int nr_inodes;
+	int nr_unused;
+	int dummy[5];
+};
+extern struct inodes_stat_t inodes_stat;
+
+extern int leases_enable, lease_break_time;
+
+#ifdef CONFIG_DNOTIFY
+extern int dir_notify_enable;
+#endif
+
+#define NR_FILE  8192	/* this can well be larger on a larger system */
+
+#define MAY_EXEC 1
+#define MAY_WRITE 2
+#define MAY_READ 4
+#define MAY_APPEND 8
+
+#define FMODE_READ 1
+#define FMODE_WRITE 2
+
+/* Internal kernel extensions */
+#define FMODE_LSEEK	4
+#define FMODE_PREAD	8
+#define FMODE_PWRITE	FMODE_PREAD	/* These go hand in hand */
+
+#define RW_MASK		1
+#define RWA_MASK	2
+#define READ 0
+#define WRITE 1
+#define READA 2		/* read-ahead  - don't block if no resources */
+#define SWRITE 3	/* for ll_rw_block() - wait for buffer lock */
+#define SPECIAL 4	/* For non-blockdevice requests in request queue */
+#define READ_SYNC	(READ | (1 << BIO_RW_SYNC))
+#define WRITE_SYNC	(WRITE | (1 << BIO_RW_SYNC))
+#define WRITE_BARRIER	((1 << BIO_RW) | (1 << BIO_RW_BARRIER))
+
+#define SEL_IN		1
+#define SEL_OUT		2
+#define SEL_EX		4
+
+/* public flags for file_system_type */
+#define FS_REQUIRES_DEV 1 
+#define FS_BINARY_MOUNTDATA 2
+#define FS_REVAL_DOT	16384	/* Check the paths ".", ".." for staleness */
+#define FS_ODD_RENAME	32768	/* Temporary stuff; will go away as soon
+				  * as nfs_rename() will be cleaned up
+				  */
+/*
+ * These are the fs-independent mount-flags: up to 32 flags are supported
+ */
+#define MS_RDONLY	 1	/* Mount read-only */
+#define MS_NOSUID	 2	/* Ignore suid and sgid bits */
+#define MS_NODEV	 4	/* Disallow access to device special files */
+#define MS_NOEXEC	 8	/* Disallow program execution */
+#define MS_SYNCHRONOUS	16	/* Writes are synced at once */
+#define MS_REMOUNT	32	/* Alter flags of a mounted FS */
+#define MS_MANDLOCK	64	/* Allow mandatory locks on an FS */
+#define MS_DIRSYNC	128	/* Directory modifications are synchronous */
+#define MS_NOATIME	1024	/* Do not update access times. */
+#define MS_NODIRATIME	2048	/* Do not update directory access times */
+#define MS_BIND		4096
+#define MS_MOVE		8192
+#define MS_REC		16384
+#define MS_VERBOSE	32768
+#define MS_UNBINDABLE	(1<<17)	/* change to unbindable */
+#define MS_PRIVATE	(1<<18)	/* change to private */
+#define MS_SLAVE	(1<<19)	/* change to slave */
+#define MS_SHARED	(1<<20)	/* change to shared */
+#define MS_POSIXACL	(1<<16)	/* VFS does not apply the umask */
+#define MS_ACTIVE	(1<<30)
+#define MS_NOUSER	(1<<31)
+
+/*
+ * Superblock flags that can be altered by MS_REMOUNT
+ */
+#define MS_RMT_MASK	(MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_NOATIME|\
+			 MS_NODIRATIME)
+
+/*
+ * Old magic mount flag and mask
+ */
+#define MS_MGC_VAL 0xC0ED0000
+#define MS_MGC_MSK 0xffff0000
+
+/* Inode flags - they have nothing to superblock flags now */
+
+#define S_SYNC		1	/* Writes are synced at once */
+#define S_NOATIME	2	/* Do not update access times */
+#define S_APPEND	4	/* Append-only file */
+#define S_IMMUTABLE	8	/* Immutable file */
+#define S_DEAD		16	/* removed, but still open directory */
+#define S_NOQUOTA	32	/* Inode is not counted to quota */
+#define S_DIRSYNC	64	/* Directory modifications are synchronous */
+#define S_NOCMTIME	128	/* Do not update file c/mtime */
+#define S_SWAPFILE	256	/* Do not truncate: swapon got its bmaps */
+#define S_PRIVATE	512	/* Inode is fs-internal */
+
+/*
+ * Note that nosuid etc flags are inode-specific: setting some file-system
+ * flags just means all the inodes inherit those flags by default. It might be
+ * possible to override it selectively if you really wanted to with some
+ * ioctl() that is not currently implemented.
+ *
+ * Exception: MS_RDONLY is always applied to the entire file system.
+ *
+ * Unfortunately, it is possible to change a filesystems flags with it mounted
+ * with files in use.  This means that all of the inodes will not have their
+ * i_flags updated.  Hence, i_flags no longer inherit the superblock mount
+ * flags, so these have to be checked separately. -- rmk@arm.uk.linux.org
+ */
+#define __IS_FLG(inode,flg) ((inode)->i_sb->s_flags & (flg))
+
+#define IS_RDONLY(inode) ((inode)->i_sb->s_flags & MS_RDONLY)
+#define IS_SYNC(inode)		(__IS_FLG(inode, MS_SYNCHRONOUS) || \
+					((inode)->i_flags & S_SYNC))
+#define IS_DIRSYNC(inode)	(__IS_FLG(inode, MS_SYNCHRONOUS|MS_DIRSYNC) || \
+					((inode)->i_flags & (S_SYNC|S_DIRSYNC)))
+#define IS_MANDLOCK(inode)	__IS_FLG(inode, MS_MANDLOCK)
+
+#define IS_NOQUOTA(inode)	((inode)->i_flags & S_NOQUOTA)
+#define IS_APPEND(inode)	((inode)->i_flags & S_APPEND)
+#define IS_IMMUTABLE(inode)	((inode)->i_flags & S_IMMUTABLE)
+#define IS_NOATIME(inode)	(__IS_FLG(inode, MS_NOATIME) || ((inode)->i_flags & S_NOATIME))
+#define IS_NODIRATIME(inode)	__IS_FLG(inode, MS_NODIRATIME)
+#define IS_POSIXACL(inode)	__IS_FLG(inode, MS_POSIXACL)
+
+#define IS_DEADDIR(inode)	((inode)->i_flags & S_DEAD)
+#define IS_NOCMTIME(inode)	((inode)->i_flags & S_NOCMTIME)
+#define IS_SWAPFILE(inode)	((inode)->i_flags & S_SWAPFILE)
+#define IS_PRIVATE(inode)	((inode)->i_flags & S_PRIVATE)
+
+/* the read-only stuff doesn't really belong here, but any other place is
+   probably as bad and I don't want to create yet another include file. */
+
+#define BLKROSET   _IO(0x12,93)	/* set device read-only (0 = read-write) */
+#define BLKROGET   _IO(0x12,94)	/* get read-only status (0 = read_write) */
+#define BLKRRPART  _IO(0x12,95)	/* re-read partition table */
+#define BLKGETSIZE _IO(0x12,96)	/* return device size /512 (long *arg) */
+#define BLKFLSBUF  _IO(0x12,97)	/* flush buffer cache */
+#define BLKRASET   _IO(0x12,98)	/* set read ahead for block device */
+#define BLKRAGET   _IO(0x12,99)	/* get current read ahead setting */
+#define BLKFRASET  _IO(0x12,100)/* set filesystem (mm/filemap.c) read-ahead */
+#define BLKFRAGET  _IO(0x12,101)/* get filesystem (mm/filemap.c) read-ahead */
+#define BLKSECTSET _IO(0x12,102)/* set max sectors per request (ll_rw_blk.c) */
+#define BLKSECTGET _IO(0x12,103)/* get max sectors per request (ll_rw_blk.c) */
+#define BLKSSZGET  _IO(0x12,104)/* get block device sector size */
+#if 0
+#define BLKPG      _IO(0x12,105)/* See blkpg.h */
+
+/* Some people are morons.  Do not use sizeof! */
+
+#define BLKELVGET  _IOR(0x12,106,size_t)/* elevator get */
+#define BLKELVSET  _IOW(0x12,107,size_t)/* elevator set */
+/* This was here just to show that the number is taken -
+   probably all these _IO(0x12,*) ioctls should be moved to blkpg.h. */
+#endif
+/* A jump here: 108-111 have been used for various private purposes. */
+#define BLKBSZGET  _IOR(0x12,112,size_t)
+#define BLKBSZSET  _IOW(0x12,113,size_t)
+#define BLKGETSIZE64 _IOR(0x12,114,size_t)	/* return device size in bytes (u64 *arg) */
+
+#define BMAP_IOCTL 1		/* obsolete - kept for compatibility */
+#define FIBMAP	   _IO(0x00,1)	/* bmap access */
+#define FIGETBSZ   _IO(0x00,2)	/* get the block size used for bmap */
+
+#ifdef __KERNEL__
+
+#include <linux/linkage.h>
+#include <linux/wait.h>
+#include <linux/types.h>
+#include <linux/kdev_t.h>
+#include <linux/dcache.h>
+#include <linux/stat.h>
+#include <linux/cache.h>
+#include <linux/kobject.h>
+#include <linux/list.h>
+#include <linux/radix-tree.h>
+#include <linux/prio_tree.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+
+#include <asm/atomic.h>
+#include <asm/semaphore.h>
+#include <asm/byteorder.h>
+
+struct iovec;
+struct nameidata;
+struct kiocb;
+struct pipe_inode_info;
+struct poll_table_struct;
+struct kstatfs;
+struct vm_area_struct;
+struct vfsmount;
+
+/* Used to be a macro which just called the function, now just a function */
+extern void update_atime (struct inode *);
+
+extern void __init inode_init(unsigned long);
+extern void __init inode_init_early(void);
+extern void __init mnt_init(unsigned long);
+extern void __init files_init(unsigned long);
+
+struct buffer_head;
+typedef int (get_block_t)(struct inode *inode, sector_t iblock,
+			struct buffer_head *bh_result, int create);
+typedef int (get_blocks_t)(struct inode *inode, sector_t iblock,
+			unsigned long max_blocks,
+			struct buffer_head *bh_result, int create);
+typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
+			ssize_t bytes, void *private);
+
+/*
+ * Attribute flags.  These should be or-ed together to figure out what
+ * has been changed!
+ */
+#define ATTR_MODE	1
+#define ATTR_UID	2
+#define ATTR_GID	4
+#define ATTR_SIZE	8
+#define ATTR_ATIME	16
+#define ATTR_MTIME	32
+#define ATTR_CTIME	64
+#define ATTR_ATIME_SET	128
+#define ATTR_MTIME_SET	256
+#define ATTR_FORCE	512	/* Not a change, but a change it */
+#define ATTR_ATTR_FLAG	1024
+#define ATTR_KILL_SUID	2048
+#define ATTR_KILL_SGID	4096
+#define ATTR_FILE	8192
+
+/*
+ * This is the Inode Attributes structure, used for notify_change().  It
+ * uses the above definitions as flags, to know which values have changed.
+ * Also, in this manner, a Filesystem can look at only the values it cares
+ * about.  Basically, these are the attributes that the VFS layer can
+ * request to change from the FS layer.
+ *
+ * Derek Atkins <warlord@MIT.EDU> 94-10-20
+ */
+struct iattr {
+	unsigned int	ia_valid;
+	umode_t		ia_mode;
+	uid_t		ia_uid;
+	gid_t		ia_gid;
+	loff_t		ia_size;
+	struct timespec	ia_atime;
+	struct timespec	ia_mtime;
+	struct timespec	ia_ctime;
+
+	/*
+	 * Not an attribute, but an auxilary info for filesystems wanting to
+	 * implement an ftruncate() like method.  NOTE: filesystem should
+	 * check for (ia_valid & ATTR_FILE), and not for (ia_file != NULL).
+	 */
+	struct file	*ia_file;
+};
+
+/*
+ * Includes for diskquotas.
+ */
+#include <linux/quota.h>
+
+/*
+ * oh the beauties of C type declarations.
+ */
+struct page;
+struct address_space;
+struct writeback_control;
+
+struct address_space_operations {
+	int (*writepage)(struct page *page, struct writeback_control *wbc);
+	int (*readpage)(struct file *, struct page *);
+	int (*sync_page)(struct page *);
+
+	/* Write back some dirty pages from this mapping. */
+	int (*writepages)(struct address_space *, struct writeback_control *);
+
+	/* Set a page dirty */
+	int (*set_page_dirty)(struct page *page);
+
+	int (*readpages)(struct file *filp, struct address_space *mapping,
+			struct list_head *pages, unsigned nr_pages);
+
+	/*
+	 * ext3 requires that a successful prepare_write() call be followed
+	 * by a commit_write() call - they must be balanced
+	 */
+	int (*prepare_write)(struct file *, struct page *, unsigned, unsigned);
+	int (*commit_write)(struct file *, struct page *, unsigned, unsigned);
+	/* Unfortunately this kludge is needed for FIBMAP. Don't use it */
+	sector_t (*bmap)(struct address_space *, sector_t);
+	int (*invalidatepage) (struct page *, unsigned long);
+	int (*releasepage) (struct page *, gfp_t);
+	ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
+			loff_t offset, unsigned long nr_segs);
+	struct page* (*get_xip_page)(struct address_space *, sector_t,
+			int);
+};
+
+struct backing_dev_info;
+struct address_space {
+	struct inode		*host;		/* owner: inode, block_device */
+	struct radix_tree_root	page_tree;	/* radix tree of all pages */
+	rwlock_t		tree_lock;	/* and rwlock protecting it */
+	unsigned int		i_mmap_writable;/* count VM_SHARED mappings */
+	struct prio_tree_root	i_mmap;		/* tree of private and shared mappings */
+	struct list_head	i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
+	spinlock_t		i_mmap_lock;	/* protect tree, count, list */
+	unsigned int		truncate_count;	/* Cover race condition with truncate */
+	unsigned long		nrpages;	/* number of total pages */
+	pgoff_t			writeback_index;/* writeback starts here */
+	struct address_space_operations *a_ops;	/* methods */
+	unsigned long		flags;		/* error bits/gfp mask */
+	struct backing_dev_info *backing_dev_info; /* device readahead, etc */
+	spinlock_t		private_lock;	/* for use by the address_space */
+	struct list_head	private_list;	/* ditto */
+	struct address_space	*assoc_mapping;	/* ditto */
+} __attribute__((aligned(sizeof(long))));
+	/*
+	 * On most architectures that alignment is already the case; but
+	 * must be enforced here for CRIS, to let the least signficant bit
+	 * of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON.
+	 */
+
+struct block_device {
+	dev_t			bd_dev;  /* not a kdev_t - it's a search key */
+	struct inode *		bd_inode;	/* will die */
+	int			bd_openers;
+	struct semaphore	bd_sem;	/* open/close mutex */
+	struct semaphore	bd_mount_sem;	/* mount mutex */
+	struct list_head	bd_inodes;
+	void *			bd_holder;
+	int			bd_holders;
+	struct block_device *	bd_contains;
+	unsigned		bd_block_size;
+	struct hd_struct *	bd_part;
+	/* number of times partitions within this device have been opened. */
+	unsigned		bd_part_count;
+	int			bd_invalidated;
+	struct gendisk *	bd_disk;
+	struct list_head	bd_list;
+	struct backing_dev_info *bd_inode_backing_dev_info;
+	/*
+	 * Private data.  You must have bd_claim'ed the block_device
+	 * to use this.  NOTE:  bd_claim allows an owner to claim
+	 * the same device multiple times, the owner must take special
+	 * care to not mess up bd_private for that case.
+	 */
+	unsigned long		bd_private;
+};
+
+/*
+ * Radix-tree tags, for tagging dirty and writeback pages within the pagecache
+ * radix trees
+ */
+#define PAGECACHE_TAG_DIRTY	0
+#define PAGECACHE_TAG_WRITEBACK	1
+
+int mapping_tagged(struct address_space *mapping, int tag);
+
+/*
+ * Might pages of this file be mapped into userspace?
+ */
+static inline int mapping_mapped(struct address_space *mapping)
+{
+	return	!prio_tree_empty(&mapping->i_mmap) ||
+		!list_empty(&mapping->i_mmap_nonlinear);
+}
+
+/*
+ * Might pages of this file have been modified in userspace?
+ * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap_pgoff
+ * marks vma as VM_SHARED if it is shared, and the file was opened for
+ * writing i.e. vma may be mprotected writable even if now readonly.
+ */
+static inline int mapping_writably_mapped(struct address_space *mapping)
+{
+	return mapping->i_mmap_writable != 0;
+}
+
+/*
+ * Use sequence counter to get consistent i_size on 32-bit processors.
+ */
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+#include <linux/seqlock.h>
+#define __NEED_I_SIZE_ORDERED
+#define i_size_ordered_init(inode) seqcount_init(&inode->i_size_seqcount)
+#else
+#define i_size_ordered_init(inode) do { } while (0)
+#endif
+
+struct inode {
+	struct hlist_node	i_hash;
+	struct list_head	i_list;
+	struct list_head	i_sb_list;
+	struct list_head	i_dentry;
+	unsigned long		i_ino;
+	atomic_t		i_count;
+	umode_t			i_mode;
+	unsigned int		i_nlink;
+	uid_t			i_uid;
+	gid_t			i_gid;
+	dev_t			i_rdev;
+	loff_t			i_size;
+	struct timespec		i_atime;
+	struct timespec		i_mtime;
+	struct timespec		i_ctime;
+	unsigned int		i_blkbits;
+	unsigned long		i_blksize;
+	unsigned long		i_version;
+	unsigned long		i_blocks;
+	unsigned short          i_bytes;
+	spinlock_t		i_lock;	/* i_blocks, i_bytes, maybe i_size */
+	struct semaphore	i_sem;
+	struct rw_semaphore	i_alloc_sem;
+	struct inode_operations	*i_op;
+	struct file_operations	*i_fop;	/* former ->i_op->default_file_ops */
+	struct super_block	*i_sb;
+	struct file_lock	*i_flock;
+	struct address_space	*i_mapping;
+	struct address_space	i_data;
+#ifdef CONFIG_QUOTA
+	struct dquot		*i_dquot[MAXQUOTAS];
+#endif
+	/* These three should probably be a union */
+	struct list_head	i_devices;
+	struct pipe_inode_info	*i_pipe;
+	struct block_device	*i_bdev;
+	struct cdev		*i_cdev;
+	int			i_cindex;
+
+	__u32			i_generation;
+
+#ifdef CONFIG_DNOTIFY
+	unsigned long		i_dnotify_mask; /* Directory notify events */
+	struct dnotify_struct	*i_dnotify; /* for directory notifications */
+#endif
+
+#ifdef CONFIG_INOTIFY
+	struct list_head	inotify_watches; /* watches on this inode */
+	struct semaphore	inotify_sem;	/* protects the watches list */
+#endif
+
+	unsigned long		i_state;
+	unsigned long		dirtied_when;	/* jiffies of first dirtying */
+
+	unsigned int		i_flags;
+
+	atomic_t		i_writecount;
+	void			*i_security;
+	union {
+		void		*generic_ip;
+	} u;
+#ifdef __NEED_I_SIZE_ORDERED
+	seqcount_t		i_size_seqcount;
+#endif
+};
+
+/*
+ * NOTE: in a 32bit arch with a preemptable kernel and
+ * an UP compile the i_size_read/write must be atomic
+ * with respect to the local cpu (unlike with preempt disabled),
+ * but they don't need to be atomic with respect to other cpus like in
+ * true SMP (so they need either to either locally disable irq around
+ * the read or for example on x86 they can be still implemented as a
+ * cmpxchg8b without the need of the lock prefix). For SMP compiles
+ * and 64bit archs it makes no difference if preempt is enabled or not.
+ */
+static inline loff_t i_size_read(struct inode *inode)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+	loff_t i_size;
+	unsigned int seq;
+
+	do {
+		seq = read_seqcount_begin(&inode->i_size_seqcount);
+		i_size = inode->i_size;
+	} while (read_seqcount_retry(&inode->i_size_seqcount, seq));
+	return i_size;
+#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
+	loff_t i_size;
+
+	preempt_disable();
+	i_size = inode->i_size;
+	preempt_enable();
+	return i_size;
+#else
+	return inode->i_size;
+#endif
+}
+
+
+static inline void i_size_write(struct inode *inode, loff_t i_size)
+{
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
+	write_seqcount_begin(&inode->i_size_seqcount);
+	inode->i_size = i_size;
+	write_seqcount_end(&inode->i_size_seqcount);
+#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
+	preempt_disable();
+	inode->i_size = i_size;
+	preempt_enable();
+#else
+	inode->i_size = i_size;
+#endif
+}
+
+static inline unsigned iminor(struct inode *inode)
+{
+	return MINOR(inode->i_rdev);
+}
+
+static inline unsigned imajor(struct inode *inode)
+{
+	return MAJOR(inode->i_rdev);
+}
+
+extern struct block_device *I_BDEV(struct inode *inode);
+
+struct fown_struct {
+	rwlock_t lock;          /* protects pid, uid, euid fields */
+	int pid;		/* pid or -pgrp where SIGIO should be sent */
+	uid_t uid, euid;	/* uid/euid of process setting the owner */
+	void *security;
+	int signum;		/* posix.1b rt signal to be delivered on IO */
+};
+
+/*
+ * Track a single file's readahead state
+ */
+struct file_ra_state {
+	unsigned long start;		/* Current window */
+	unsigned long size;
+	unsigned long flags;		/* ra flags RA_FLAG_xxx*/
+	unsigned long cache_hit;	/* cache hit count*/
+	unsigned long prev_page;	/* Cache last read() position */
+	unsigned long ahead_start;	/* Ahead window */
+	unsigned long ahead_size;
+	unsigned long ra_pages;		/* Maximum readahead window */
+	unsigned long mmap_hit;		/* Cache hit stat for mmap accesses */
+	unsigned long mmap_miss;	/* Cache miss stat for mmap accesses */
+};
+#define RA_FLAG_MISS 0x01	/* a cache miss occured against this file */
+#define RA_FLAG_INCACHE 0x02	/* file is already in cache */
+
+struct file {
+	/*
+	 * fu_list becomes invalid after file_free is called and queued via
+	 * fu_rcuhead for RCU freeing
+	 */
+	union {
+		struct list_head	fu_list;
+		struct rcu_head 	fu_rcuhead;
+	} f_u;
+	struct dentry		*f_dentry;
+	struct vfsmount         *f_vfsmnt;
+	struct file_operations	*f_op;
+	atomic_t		f_count;
+	unsigned int 		f_flags;
+	mode_t			f_mode;
+	loff_t			f_pos;
+	struct fown_struct	f_owner;
+	unsigned int		f_uid, f_gid;
+	struct file_ra_state	f_ra;
+
+	unsigned long		f_version;
+	void			*f_security;
+
+	/* needed for tty driver, and maybe others */
+	void			*private_data;
+
+#ifdef CONFIG_EPOLL
+	/* Used by fs/eventpoll.c to link all the hooks to this file */
+	struct list_head	f_ep_links;
+	spinlock_t		f_ep_lock;
+#endif /* #ifdef CONFIG_EPOLL */
+	struct address_space	*f_mapping;
+};
+extern spinlock_t files_lock;
+#define file_list_lock() spin_lock(&files_lock);
+#define file_list_unlock() spin_unlock(&files_lock);
+
+#define get_file(x)	rcuref_inc(&(x)->f_count)
+#define file_count(x)	atomic_read(&(x)->f_count)
+
+#define	MAX_NON_LFS	((1UL<<31) - 1)
+
+/* Page cache limit. The filesystems should put that into their s_maxbytes 
+   limits, otherwise bad things can happen in VM. */ 
+#if BITS_PER_LONG==32
+#define MAX_LFS_FILESIZE	(((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) 
+#elif BITS_PER_LONG==64
+#define MAX_LFS_FILESIZE 	0x7fffffffffffffffUL
+#endif
+
+#define FL_POSIX	1
+#define FL_FLOCK	2
+#define FL_ACCESS	8	/* not trying to lock, just looking */
+#define FL_LOCKD	16	/* lock held by rpc.lockd */
+#define FL_LEASE	32	/* lease held on this file */
+#define FL_SLEEP	128	/* A blocking lock */
+
+/*
+ * The POSIX file lock owner is determined by
+ * the "struct files_struct" in the thread group
+ * (or NULL for no owner - BSD locks).
+ *
+ * Lockd stuffs a "host" pointer into this.
+ */
+typedef struct files_struct *fl_owner_t;
+
+struct file_lock_operations {
+	void (*fl_insert)(struct file_lock *);	/* lock insertion callback */
+	void (*fl_remove)(struct file_lock *);	/* lock removal callback */
+	void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
+	void (*fl_release_private)(struct file_lock *);
+};
+
+struct lock_manager_operations {
+	int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
+	void (*fl_notify)(struct file_lock *);	/* unblock callback */
+	void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
+	void (*fl_release_private)(struct file_lock *);
+	void (*fl_break)(struct file_lock *);
+	int (*fl_mylease)(struct file_lock *, struct file_lock *);
+	int (*fl_change)(struct file_lock **, int);
+};
+
+/* that will die - we need it for nfs_lock_info */
+#include <linux/nfs_fs_i.h>
+
+struct file_lock {
+	struct file_lock *fl_next;	/* singly linked list for this inode  */
+	struct list_head fl_link;	/* doubly linked list of all locks */
+	struct list_head fl_block;	/* circular list of blocked processes */
+	fl_owner_t fl_owner;
+	unsigned int fl_pid;
+	wait_queue_head_t fl_wait;
+	struct file *fl_file;
+	unsigned char fl_flags;
+	unsigned char fl_type;
+	loff_t fl_start;
+	loff_t fl_end;
+
+	struct fasync_struct *	fl_fasync; /* for lease break notifications */
+	unsigned long fl_break_time;	/* for nonblocking lease breaks */
+
+	struct file_lock_operations *fl_ops;	/* Callbacks for filesystems */
+	struct lock_manager_operations *fl_lmops;	/* Callbacks for lockmanagers */
+	union {
+		struct nfs_lock_info	nfs_fl;
+		struct nfs4_lock_info	nfs4_fl;
+	} fl_u;
+};
+
+/* The following constant reflects the upper bound of the file/locking space */
+#ifndef OFFSET_MAX
+#define INT_LIMIT(x)	(~((x)1 << (sizeof(x)*8 - 1)))
+#define OFFSET_MAX	INT_LIMIT(loff_t)
+#define OFFT_OFFSET_MAX	INT_LIMIT(off_t)
+#endif
+
+extern struct list_head file_lock_list;
+
+#include <linux/fcntl.h>
+
+extern int fcntl_getlk(struct file *, struct flock __user *);
+extern int fcntl_setlk(unsigned int, struct file *, unsigned int,
+			struct flock __user *);
+
+#if BITS_PER_LONG == 32
+extern int fcntl_getlk64(struct file *, struct flock64 __user *);
+extern int fcntl_setlk64(unsigned int, struct file *, unsigned int,
+			struct flock64 __user *);
+#endif
+
+extern void send_sigio(struct fown_struct *fown, int fd, int band);
+extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg);
+extern int fcntl_getlease(struct file *filp);
+
+/* fs/locks.c */
+extern void locks_init_lock(struct file_lock *);
+extern void locks_copy_lock(struct file_lock *, struct file_lock *);
+extern void locks_remove_posix(struct file *, fl_owner_t);
+extern void locks_remove_flock(struct file *);
+extern struct file_lock *posix_test_lock(struct file *, struct file_lock *);
+extern int posix_lock_file(struct file *, struct file_lock *);
+extern int posix_lock_file_wait(struct file *, struct file_lock *);
+extern void posix_block_lock(struct file_lock *, struct file_lock *);
+extern void posix_unblock_lock(struct file *, struct file_lock *);
+extern int posix_locks_deadlock(struct file_lock *, struct file_lock *);
+extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl);
+extern int __break_lease(struct inode *inode, unsigned int flags);
+extern void lease_get_mtime(struct inode *, struct timespec *time);
+extern int setlease(struct file *, long, struct file_lock **);
+extern int lease_modify(struct file_lock **, int);
+extern int lock_may_read(struct inode *, loff_t start, unsigned long count);
+extern int lock_may_write(struct inode *, loff_t start, unsigned long count);
+extern void steal_locks(fl_owner_t from);
+
+struct fasync_struct {
+	int	magic;
+	int	fa_fd;
+	struct	fasync_struct	*fa_next; /* singly linked list */
+	struct	file 		*fa_file;
+};
+
+#define FASYNC_MAGIC 0x4601
+
+/* SMP safe fasync helpers: */
+extern int fasync_helper(int, struct file *, int, struct fasync_struct **);
+/* can be called from interrupts */
+extern void kill_fasync(struct fasync_struct **, int, int);
+/* only for net: no internal synchronization */
+extern void __kill_fasync(struct fasync_struct *, int, int);
+
+extern int f_setown(struct file *filp, unsigned long arg, int force);
+extern void f_delown(struct file *filp);
+extern int send_sigurg(struct fown_struct *fown);
+
+/*
+ *	Umount options
+ */
+
+#define MNT_FORCE	0x00000001	/* Attempt to forcibily umount */
+#define MNT_DETACH	0x00000002	/* Just detach from the tree */
+#define MNT_EXPIRE	0x00000004	/* Mark for expiry */
+
+extern struct list_head super_blocks;
+extern spinlock_t sb_lock;
+
+#define sb_entry(list)	list_entry((list), struct super_block, s_list)
+#define S_BIAS (1<<30)
+struct super_block {
+	struct list_head	s_list;		/* Keep this first */
+	dev_t			s_dev;		/* search index; _not_ kdev_t */
+	unsigned long		s_blocksize;
+	unsigned long		s_old_blocksize;
+	unsigned char		s_blocksize_bits;
+	unsigned char		s_dirt;
+	unsigned long long	s_maxbytes;	/* Max file size */
+	struct file_system_type	*s_type;
+	struct super_operations	*s_op;
+	struct dquot_operations	*dq_op;
+ 	struct quotactl_ops	*s_qcop;
+	struct export_operations *s_export_op;
+	unsigned long		s_flags;
+	unsigned long		s_magic;
+	struct dentry		*s_root;
+	struct rw_semaphore	s_umount;
+	struct semaphore	s_lock;
+	int			s_count;
+	int			s_syncing;
+	int			s_need_sync_fs;
+	atomic_t		s_active;
+	void                    *s_security;
+	struct xattr_handler	**s_xattr;
+
+	struct list_head	s_inodes;	/* all inodes */
+	struct list_head	s_dirty;	/* dirty inodes */
+	struct list_head	s_io;		/* parked for writeback */
+	struct hlist_head	s_anon;		/* anonymous dentries for (nfs) exporting */
+	struct list_head	s_files;
+
+	struct block_device	*s_bdev;
+	struct list_head	s_instances;
+	struct quota_info	s_dquot;	/* Diskquota specific options */
+
+	int			s_frozen;
+	wait_queue_head_t	s_wait_unfrozen;
+
+	char s_id[32];				/* Informational name */
+
+	void 			*s_fs_info;	/* Filesystem private info */
+
+	/*
+	 * The next field is for VFS *only*. No filesystems have any business
+	 * even looking at it. You had been warned.
+	 */
+	struct semaphore s_vfs_rename_sem;	/* Kludge */
+
+	/* Granuality of c/m/atime in ns.
+	   Cannot be worse than a second */
+	u32		   s_time_gran;
+};
+
+extern struct timespec current_fs_time(struct super_block *sb);
+
+/*
+ * Snapshotting support.
+ */
+enum {
+	SB_UNFROZEN = 0,
+	SB_FREEZE_WRITE	= 1,
+	SB_FREEZE_TRANS = 2,
+};
+
+#define vfs_check_frozen(sb, level) \
+	wait_event((sb)->s_wait_unfrozen, ((sb)->s_frozen < (level)))
+
+static inline void get_fs_excl(void)
+{
+	atomic_inc(&current->fs_excl);
+}
+
+static inline void put_fs_excl(void)
+{
+	atomic_dec(&current->fs_excl);
+}
+
+static inline int has_fs_excl(void)
+{
+	return atomic_read(&current->fs_excl);
+}
+
+
+/*
+ * Superblock locking.
+ */
+static inline void lock_super(struct super_block * sb)
+{
+	get_fs_excl();
+	down(&sb->s_lock);
+}
+
+static inline void unlock_super(struct super_block * sb)
+{
+	put_fs_excl();
+	up(&sb->s_lock);
+}
+
+/*
+ * VFS helper functions..
+ */
+extern int vfs_permission(struct nameidata *, int);
+extern int vfs_create(struct inode *, struct dentry *, int, struct nameidata *);
+extern int vfs_mkdir(struct inode *, struct dentry *, int);
+extern int vfs_mknod(struct inode *, struct dentry *, int, dev_t);
+extern int vfs_symlink(struct inode *, struct dentry *, const char *, int);
+extern int vfs_link(struct dentry *, struct inode *, struct dentry *);
+extern int vfs_rmdir(struct inode *, struct dentry *);
+extern int vfs_unlink(struct inode *, struct dentry *);
+extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
+
+/*
+ * VFS dentry helper functions.
+ */
+extern void dentry_unhash(struct dentry *dentry);
+
+/*
+ * VFS file helper functions.
+ */
+extern int file_permission(struct file *, int);
+
+/*
+ * File types
+ *
+ * NOTE! These match bits 12..15 of stat.st_mode
+ * (ie "(i_mode >> 12) & 15").
+ */
+#define DT_UNKNOWN	0
+#define DT_FIFO		1
+#define DT_CHR		2
+#define DT_DIR		4
+#define DT_BLK		6
+#define DT_REG		8
+#define DT_LNK		10
+#define DT_SOCK		12
+#define DT_WHT		14
+
+#define OSYNC_METADATA	(1<<0)
+#define OSYNC_DATA	(1<<1)
+#define OSYNC_INODE	(1<<2)
+int generic_osync_inode(struct inode *, struct address_space *, int);
+
+/*
+ * This is the "filldir" function type, used by readdir() to let
+ * the kernel specify what kind of dirent layout it wants to have.
+ * This allows the kernel to read directories into kernel space or
+ * to have different dirent layouts depending on the binary type.
+ */
+typedef int (*filldir_t)(void *, const char *, int, loff_t, ino_t, unsigned);
+
+struct block_device_operations {
+	int (*open) (struct inode *, struct file *);
+	int (*release) (struct inode *, struct file *);
+	int (*ioctl) (struct inode *, struct file *, unsigned, unsigned long);
+	long (*unlocked_ioctl) (struct file *, unsigned, unsigned long);
+	long (*compat_ioctl) (struct file *, unsigned, unsigned long);
+	int (*direct_access) (struct block_device *, sector_t, unsigned long *);
+	int (*media_changed) (struct gendisk *);
+	int (*revalidate_disk) (struct gendisk *);
+	struct module *owner;
+};
+
+/*
+ * "descriptor" for what we're up to with a read for sendfile().
+ * This allows us to use the same read code yet
+ * have multiple different users of the data that
+ * we read from a file.
+ *
+ * The simplest case just copies the data to user
+ * mode.
+ */
+typedef struct {
+	size_t written;
+	size_t count;
+	union {
+		char __user * buf;
+		void *data;
+	} arg;
+	int error;
+} read_descriptor_t;
+
+typedef int (*read_actor_t)(read_descriptor_t *, struct page *, unsigned long, unsigned long);
+
+/* These macros are for out of kernel modules to test that
+ * the kernel supports the unlocked_ioctl and compat_ioctl
+ * fields in struct file_operations. */
+#define HAVE_COMPAT_IOCTL 1
+#define HAVE_UNLOCKED_IOCTL 1
+
+/*
+ * NOTE:
+ * read, write, poll, fsync, readv, writev, unlocked_ioctl and compat_ioctl
+ * can be called without the big kernel lock held in all filesystems.
+ */
+struct file_operations {
+	struct module *owner;
+	loff_t (*llseek) (struct file *, loff_t, int);
+	ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
+	ssize_t (*aio_read) (struct kiocb *, char __user *, size_t, loff_t);
+	ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
+	ssize_t (*aio_write) (struct kiocb *, const char __user *, size_t, loff_t);
+	int (*readdir) (struct file *, void *, filldir_t);
+	unsigned int (*poll) (struct file *, struct poll_table_struct *);
+	int (*ioctl) (struct inode *, struct file *, unsigned int, unsigned long);
+	long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
+	long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
+	int (*mmap) (struct file *, struct vm_area_struct *);
+	int (*open) (struct inode *, struct file *);
+	int (*flush) (struct file *);
+	int (*release) (struct inode *, struct file *);
+	int (*fsync) (struct file *, struct dentry *, int datasync);
+	int (*aio_fsync) (struct kiocb *, int datasync);
+	int (*fasync) (int, struct file *, int);
+	int (*lock) (struct file *, int, struct file_lock *);
+	ssize_t (*readv) (struct file *, const struct iovec *, unsigned long, loff_t *);
+	ssize_t (*writev) (struct file *, const struct iovec *, unsigned long, loff_t *);
+	ssize_t (*sendfile) (struct file *, loff_t *, size_t, read_actor_t, void *);
+	ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int);
+	unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
+	int (*check_flags)(int);
+	int (*dir_notify)(struct file *filp, unsigned long arg);
+	int (*flock) (struct file *, int, struct file_lock *);
+};
+
+struct inode_operations {
+	int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
+	struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
+	int (*link) (struct dentry *,struct inode *,struct dentry *);
+	int (*unlink) (struct inode *,struct dentry *);
+	int (*symlink) (struct inode *,struct dentry *,const char *);
+	int (*mkdir) (struct inode *,struct dentry *,int);
+	int (*rmdir) (struct inode *,struct dentry *);
+	int (*mknod) (struct inode *,struct dentry *,int,dev_t);
+	int (*rename) (struct inode *, struct dentry *,
+			struct inode *, struct dentry *);
+	int (*readlink) (struct dentry *, char __user *,int);
+	void * (*follow_link) (struct dentry *, struct nameidata *);
+	void (*put_link) (struct dentry *, struct nameidata *, void *);
+	void (*truncate) (struct inode *);
+	int (*permission) (struct inode *, int, struct nameidata *);
+	int (*setattr) (struct dentry *, struct iattr *);
+	int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *);
+	int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
+	ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
+	ssize_t (*listxattr) (struct dentry *, char *, size_t);
+	int (*removexattr) (struct dentry *, const char *);
+};
+
+struct seq_file;
+
+extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
+extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
+extern ssize_t vfs_readv(struct file *, const struct iovec __user *,
+		unsigned long, loff_t *);
+extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
+		unsigned long, loff_t *);
+
+/*
+ * NOTE: write_inode, delete_inode, clear_inode, put_inode can be called
+ * without the big kernel lock held in all filesystems.
+ */
+struct super_operations {
+   	struct inode *(*alloc_inode)(struct super_block *sb);
+	void (*destroy_inode)(struct inode *);
+
+	void (*read_inode) (struct inode *);
+  
+   	void (*dirty_inode) (struct inode *);
+	int (*write_inode) (struct inode *, int);
+	void (*put_inode) (struct inode *);
+	void (*drop_inode) (struct inode *);
+	void (*delete_inode) (struct inode *);
+	void (*put_super) (struct super_block *);
+	void (*write_super) (struct super_block *);
+	int (*sync_fs)(struct super_block *sb, int wait);
+	void (*write_super_lockfs) (struct super_block *);
+	void (*unlockfs) (struct super_block *);
+	int (*statfs) (struct super_block *, struct kstatfs *);
+	int (*remount_fs) (struct super_block *, int *, char *);
+	void (*clear_inode) (struct inode *);
+	void (*umount_begin) (struct super_block *);
+
+	int (*show_options)(struct seq_file *, struct vfsmount *);
+
+	ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
+	ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
+};
+
+/* Inode state bits.  Protected by inode_lock. */
+#define I_DIRTY_SYNC		1 /* Not dirty enough for O_DATASYNC */
+#define I_DIRTY_DATASYNC	2 /* Data-related inode changes pending */
+#define I_DIRTY_PAGES		4 /* Data-related inode changes pending */
+#define __I_LOCK		3
+#define I_LOCK			(1 << __I_LOCK)
+#define I_FREEING		16
+#define I_CLEAR			32
+#define I_NEW			64
+#define I_WILL_FREE		128
+
+#define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES)
+
+extern void __mark_inode_dirty(struct inode *, int);
+static inline void mark_inode_dirty(struct inode *inode)
+{
+	__mark_inode_dirty(inode, I_DIRTY);
+}
+
+static inline void mark_inode_dirty_sync(struct inode *inode)
+{
+	__mark_inode_dirty(inode, I_DIRTY_SYNC);
+}
+
+static inline void touch_atime(struct vfsmount *mnt, struct dentry *dentry)
+{
+	/* per-mountpoint checks will go here */
+	update_atime(dentry->d_inode);
+}
+
+static inline void file_accessed(struct file *file)
+{
+	if (!(file->f_flags & O_NOATIME))
+		touch_atime(file->f_vfsmnt, file->f_dentry);
+}
+
+int sync_inode(struct inode *inode, struct writeback_control *wbc);
+
+/**
+ * struct export_operations - for nfsd to communicate with file systems
+ * @decode_fh:      decode a file handle fragment and return a &struct dentry
+ * @encode_fh:      encode a file handle fragment from a dentry
+ * @get_name:       find the name for a given inode in a given directory
+ * @get_parent:     find the parent of a given directory
+ * @get_dentry:     find a dentry for the inode given a file handle sub-fragment
+ * @find_exported_dentry:
+ *	set by the exporting module to a standard helper function.
+ *
+ * Description:
+ *    The export_operations structure provides a means for nfsd to communicate
+ *    with a particular exported file system  - particularly enabling nfsd and
+ *    the filesystem to co-operate when dealing with file handles.
+ *
+ *    export_operations contains two basic operation for dealing with file
+ *    handles, decode_fh() and encode_fh(), and allows for some other
+ *    operations to be defined which standard helper routines use to get
+ *    specific information from the filesystem.
+ *
+ *    nfsd encodes information use to determine which filesystem a filehandle
+ *    applies to in the initial part of the file handle.  The remainder, termed
+ *    a file handle fragment, is controlled completely by the filesystem.  The
+ *    standard helper routines assume that this fragment will contain one or
+ *    two sub-fragments, one which identifies the file, and one which may be
+ *    used to identify the (a) directory containing the file.
+ *
+ *    In some situations, nfsd needs to get a dentry which is connected into a
+ *    specific part of the file tree.  To allow for this, it passes the
+ *    function acceptable() together with a @context which can be used to see
+ *    if the dentry is acceptable.  As there can be multiple dentrys for a
+ *    given file, the filesystem should check each one for acceptability before
+ *    looking for the next.  As soon as an acceptable one is found, it should
+ *    be returned.
+ *
+ * decode_fh:
+ *    @decode_fh is given a &struct super_block (@sb), a file handle fragment
+ *    (@fh, @fh_len) and an acceptability testing function (@acceptable,
+ *    @context).  It should return a &struct dentry which refers to the same
+ *    file that the file handle fragment refers to,  and which passes the
+ *    acceptability test.  If it cannot, it should return a %NULL pointer if
+ *    the file was found but no acceptable &dentries were available, or a
+ *    %ERR_PTR error code indicating why it couldn't be found (e.g. %ENOENT or
+ *    %ENOMEM).
+ *
+ * encode_fh:
+ *    @encode_fh should store in the file handle fragment @fh (using at most
+ *    @max_len bytes) information that can be used by @decode_fh to recover the
+ *    file refered to by the &struct dentry @de.  If the @connectable flag is
+ *    set, the encode_fh() should store sufficient information so that a good
+ *    attempt can be made to find not only the file but also it's place in the
+ *    filesystem.   This typically means storing a reference to de->d_parent in
+ *    the filehandle fragment.  encode_fh() should return the number of bytes
+ *    stored or a negative error code such as %-ENOSPC
+ *
+ * get_name:
+ *    @get_name should find a name for the given @child in the given @parent
+ *    directory.  The name should be stored in the @name (with the
+ *    understanding that it is already pointing to a a %NAME_MAX+1 sized
+ *    buffer.   get_name() should return %0 on success, a negative error code
+ *    or error.  @get_name will be called without @parent->i_sem held.
+ *
+ * get_parent:
+ *    @get_parent should find the parent directory for the given @child which
+ *    is also a directory.  In the event that it cannot be found, or storage
+ *    space cannot be allocated, a %ERR_PTR should be returned.
+ *
+ * get_dentry:
+ *    Given a &super_block (@sb) and a pointer to a file-system specific inode
+ *    identifier, possibly an inode number, (@inump) get_dentry() should find
+ *    the identified inode and return a dentry for that inode.  Any suitable
+ *    dentry can be returned including, if necessary, a new dentry created with
+ *    d_alloc_root.  The caller can then find any other extant dentrys by
+ *    following the d_alias links.  If a new dentry was created using
+ *    d_alloc_root, DCACHE_NFSD_DISCONNECTED should be set, and the dentry
+ *    should be d_rehash()ed.
+ *
+ *    If the inode cannot be found, either a %NULL pointer or an %ERR_PTR code
+ *    can be returned.  The @inump will be whatever was passed to
+ *    nfsd_find_fh_dentry() in either the @obj or @parent parameters.
+ *
+ * Locking rules:
+ *    get_parent is called with child->d_inode->i_sem down
+ *    get_name is not (which is possibly inconsistent)
+ */
+
+struct export_operations {
+	struct dentry *(*decode_fh)(struct super_block *sb, __u32 *fh, int fh_len, int fh_type,
+			 int (*acceptable)(void *context, struct dentry *de),
+			 void *context);
+	int (*encode_fh)(struct dentry *de, __u32 *fh, int *max_len,
+			 int connectable);
+
+	/* the following are only called from the filesystem itself */
+	int (*get_name)(struct dentry *parent, char *name,
+			struct dentry *child);
+	struct dentry * (*get_parent)(struct dentry *child);
+	struct dentry * (*get_dentry)(struct super_block *sb, void *inump);
+
+	/* This is set by the exporting module to a standard helper */
+	struct dentry * (*find_exported_dentry)(
+		struct super_block *sb, void *obj, void *parent,
+		int (*acceptable)(void *context, struct dentry *de),
+		void *context);
+
+
+};
+
+extern struct dentry *
+find_exported_dentry(struct super_block *sb, void *obj, void *parent,
+		     int (*acceptable)(void *context, struct dentry *de),
+		     void *context);
+
+struct file_system_type {
+	const char *name;
+	int fs_flags;
+	struct super_block *(*get_sb) (struct file_system_type *, int,
+				       const char *, void *);
+	void (*kill_sb) (struct super_block *);
+	struct module *owner;
+	struct file_system_type * next;
+	struct list_head fs_supers;
+};
+
+struct super_block *get_sb_bdev(struct file_system_type *fs_type,
+	int flags, const char *dev_name, void *data,
+	int (*fill_super)(struct super_block *, void *, int));
+struct super_block *get_sb_single(struct file_system_type *fs_type,
+	int flags, void *data,
+	int (*fill_super)(struct super_block *, void *, int));
+struct super_block *get_sb_nodev(struct file_system_type *fs_type,
+	int flags, void *data,
+	int (*fill_super)(struct super_block *, void *, int));
+void generic_shutdown_super(struct super_block *sb);
+void kill_block_super(struct super_block *sb);
+void kill_anon_super(struct super_block *sb);
+void kill_litter_super(struct super_block *sb);
+void deactivate_super(struct super_block *sb);
+int set_anon_super(struct super_block *s, void *data);
+struct super_block *sget(struct file_system_type *type,
+			int (*test)(struct super_block *,void *),
+			int (*set)(struct super_block *,void *),
+			void *data);
+struct super_block *get_sb_pseudo(struct file_system_type *, char *,
+			struct super_operations *ops, unsigned long);
+int __put_super(struct super_block *sb);
+int __put_super_and_need_restart(struct super_block *sb);
+void unnamed_dev_init(void);
+
+/* Alas, no aliases. Too much hassle with bringing module.h everywhere */
+#define fops_get(fops) \
+	(((fops) && try_module_get((fops)->owner) ? (fops) : NULL))
+#define fops_put(fops) \
+	do { if (fops) module_put((fops)->owner); } while(0)
+
+extern int register_filesystem(struct file_system_type *);
+extern int unregister_filesystem(struct file_system_type *);
+extern struct vfsmount *kern_mount(struct file_system_type *);
+extern int may_umount_tree(struct vfsmount *);
+extern int may_umount(struct vfsmount *);
+extern void umount_tree(struct vfsmount *, int, struct list_head *);
+extern void release_mounts(struct list_head *);
+extern long do_mount(char *, char *, char *, unsigned long, void *);
+extern struct vfsmount *copy_tree(struct vfsmount *, struct dentry *, int);
+extern void mnt_set_mountpoint(struct vfsmount *, struct dentry *,
+				  struct vfsmount *);
+
+extern int vfs_statfs(struct super_block *, struct kstatfs *);
+
+#define FLOCK_VERIFY_READ  1
+#define FLOCK_VERIFY_WRITE 2
+
+extern int locks_mandatory_locked(struct inode *);
+extern int locks_mandatory_area(int, struct inode *, struct file *, loff_t, size_t);
+
+/*
+ * Candidates for mandatory locking have the setgid bit set
+ * but no group execute bit -  an otherwise meaningless combination.
+ */
+#define MANDATORY_LOCK(inode) \
+	(IS_MANDLOCK(inode) && ((inode)->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
+
+static inline int locks_verify_locked(struct inode *inode)
+{
+	if (MANDATORY_LOCK(inode))
+		return locks_mandatory_locked(inode);
+	return 0;
+}
+
+extern int rw_verify_area(int, struct file *, loff_t *, size_t);
+
+static inline int locks_verify_truncate(struct inode *inode,
+				    struct file *filp,
+				    loff_t size)
+{
+	if (inode->i_flock && MANDATORY_LOCK(inode))
+		return locks_mandatory_area(
+			FLOCK_VERIFY_WRITE, inode, filp,
+			size < inode->i_size ? size : inode->i_size,
+			(size < inode->i_size ? inode->i_size - size
+			 : size - inode->i_size)
+		);
+	return 0;
+}
+
+static inline int break_lease(struct inode *inode, unsigned int mode)
+{
+	if (inode->i_flock)
+		return __break_lease(inode, mode);
+	return 0;
+}
+
+/* fs/open.c */
+
+extern int do_truncate(struct dentry *, loff_t start, struct file *filp);
+extern long do_sys_open(const char __user *filename, int flags, int mode);
+extern struct file *filp_open(const char *, int, int);
+extern struct file * dentry_open(struct dentry *, struct vfsmount *, int);
+extern int filp_close(struct file *, fl_owner_t id);
+extern char * getname(const char __user *);
+
+/* fs/dcache.c */
+extern void __init vfs_caches_init_early(void);
+extern void __init vfs_caches_init(unsigned long);
+
+#define __getname()	kmem_cache_alloc(names_cachep, SLAB_KERNEL)
+#define __putname(name) kmem_cache_free(names_cachep, (void *)(name))
+#ifndef CONFIG_AUDITSYSCALL
+#define putname(name)   __putname(name)
+#else
+extern void putname(const char *name);
+#endif
+
+extern int register_blkdev(unsigned int, const char *);
+extern int unregister_blkdev(unsigned int, const char *);
+extern struct block_device *bdget(dev_t);
+extern void bd_set_size(struct block_device *, loff_t size);
+extern void bd_forget(struct inode *inode);
+extern void bdput(struct block_device *);
+extern struct block_device *open_by_devnum(dev_t, unsigned);
+extern struct file_operations def_blk_fops;
+extern struct address_space_operations def_blk_aops;
+extern struct file_operations def_chr_fops;
+extern struct file_operations bad_sock_fops;
+extern struct file_operations def_fifo_fops;
+extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long);
+extern int blkdev_ioctl(struct inode *, struct file *, unsigned, unsigned long);
+extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
+extern int blkdev_get(struct block_device *, mode_t, unsigned);
+extern int blkdev_put(struct block_device *);
+extern int bd_claim(struct block_device *, void *);
+extern void bd_release(struct block_device *);
+
+/* fs/char_dev.c */
+extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *);
+extern int register_chrdev_region(dev_t, unsigned, const char *);
+extern int register_chrdev(unsigned int, const char *,
+			   struct file_operations *);
+extern int unregister_chrdev(unsigned int, const char *);
+extern void unregister_chrdev_region(dev_t, unsigned);
+extern int chrdev_open(struct inode *, struct file *);
+
+/* fs/block_dev.c */
+#define BDEVNAME_SIZE	32	/* Largest string for a blockdev identifier */
+extern const char *__bdevname(dev_t, char *buffer);
+extern const char *bdevname(struct block_device *bdev, char *buffer);
+extern struct block_device *lookup_bdev(const char *);
+extern struct block_device *open_bdev_excl(const char *, int, void *);
+extern void close_bdev_excl(struct block_device *);
+
+extern void init_special_inode(struct inode *, umode_t, dev_t);
+
+/* Invalid inode operations -- fs/bad_inode.c */
+extern void make_bad_inode(struct inode *);
+extern int is_bad_inode(struct inode *);
+
+extern struct file_operations read_fifo_fops;
+extern struct file_operations write_fifo_fops;
+extern struct file_operations rdwr_fifo_fops;
+extern struct file_operations read_pipe_fops;
+extern struct file_operations write_pipe_fops;
+extern struct file_operations rdwr_pipe_fops;
+
+extern int fs_may_remount_ro(struct super_block *);
+
+/*
+ * return READ, READA, or WRITE
+ */
+#define bio_rw(bio)		((bio)->bi_rw & (RW_MASK | RWA_MASK))
+
+/*
+ * return data direction, READ or WRITE
+ */
+#define bio_data_dir(bio)	((bio)->bi_rw & 1)
+
+extern int check_disk_change(struct block_device *);
+extern int invalidate_inodes(struct super_block *);
+extern int __invalidate_device(struct block_device *);
+extern int invalidate_partition(struct gendisk *, int);
+unsigned long invalidate_mapping_pages(struct address_space *mapping,
+					pgoff_t start, pgoff_t end);
+unsigned long invalidate_inode_pages(struct address_space *mapping);
+static inline void invalidate_remote_inode(struct inode *inode)
+{
+	if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+	    S_ISLNK(inode->i_mode))
+		invalidate_inode_pages(inode->i_mapping);
+}
+extern int invalidate_inode_pages2(struct address_space *mapping);
+extern int invalidate_inode_pages2_range(struct address_space *mapping,
+					 pgoff_t start, pgoff_t end);
+extern int write_inode_now(struct inode *, int);
+extern int filemap_fdatawrite(struct address_space *);
+extern int filemap_flush(struct address_space *);
+extern int filemap_fdatawait(struct address_space *);
+extern int filemap_write_and_wait(struct address_space *mapping);
+extern int filemap_write_and_wait_range(struct address_space *mapping,
+				        loff_t lstart, loff_t lend);
+extern void sync_supers(void);
+extern void sync_filesystems(int wait);
+extern void emergency_sync(void);
+extern void emergency_remount(void);
+extern int do_remount_sb(struct super_block *sb, int flags,
+			 void *data, int force);
+extern sector_t bmap(struct inode *, sector_t);
+extern int notify_change(struct dentry *, struct iattr *);
+extern int permission(struct inode *, int, struct nameidata *);
+extern int generic_permission(struct inode *, int,
+		int (*check_acl)(struct inode *, int));
+
+extern int get_write_access(struct inode *);
+extern int deny_write_access(struct file *);
+static inline void put_write_access(struct inode * inode)
+{
+	atomic_dec(&inode->i_writecount);
+}
+static inline void allow_write_access(struct file *file)
+{
+	if (file)
+		atomic_inc(&file->f_dentry->d_inode->i_writecount);
+}
+extern int do_pipe(int *);
+
+extern int open_namei(const char *, int, int, struct nameidata *);
+extern int may_open(struct nameidata *, int, int);
+
+extern int kernel_read(struct file *, unsigned long, char *, unsigned long);
+extern struct file * open_exec(const char *);
+ 
+/* fs/dcache.c -- generic fs support functions */
+extern int is_subdir(struct dentry *, struct dentry *);
+extern ino_t find_inode_number(struct dentry *, struct qstr *);
+
+#include <linux/err.h>
+
+/* needed for stackable file system support */
+extern loff_t default_llseek(struct file *file, loff_t offset, int origin);
+
+extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin);
+
+extern void inode_init_once(struct inode *);
+extern void iput(struct inode *);
+extern struct inode * igrab(struct inode *);
+extern ino_t iunique(struct super_block *, ino_t);
+extern int inode_needs_sync(struct inode *inode);
+extern void generic_delete_inode(struct inode *inode);
+extern void generic_drop_inode(struct inode *inode);
+
+extern struct inode *ilookup5_nowait(struct super_block *sb,
+		unsigned long hashval, int (*test)(struct inode *, void *),
+		void *data);
+extern struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
+		int (*test)(struct inode *, void *), void *data);
+extern struct inode *ilookup(struct super_block *sb, unsigned long ino);
+
+extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *);
+extern struct inode * iget_locked(struct super_block *, unsigned long);
+extern void unlock_new_inode(struct inode *);
+
+static inline struct inode *iget(struct super_block *sb, unsigned long ino)
+{
+	struct inode *inode = iget_locked(sb, ino);
+	
+	if (inode && (inode->i_state & I_NEW)) {
+		sb->s_op->read_inode(inode);
+		unlock_new_inode(inode);
+	}
+
+	return inode;
+}
+
+extern void __iget(struct inode * inode);
+extern void clear_inode(struct inode *);
+extern void destroy_inode(struct inode *);
+extern struct inode *new_inode(struct super_block *);
+extern int remove_suid(struct dentry *);
+extern void remove_dquot_ref(struct super_block *, int, struct list_head *);
+extern struct semaphore iprune_sem;
+
+extern void __insert_inode_hash(struct inode *, unsigned long hashval);
+extern void remove_inode_hash(struct inode *);
+static inline void insert_inode_hash(struct inode *inode) {
+	__insert_inode_hash(inode, inode->i_ino);
+}
+
+extern struct file * get_empty_filp(void);
+extern void file_move(struct file *f, struct list_head *list);
+extern void file_kill(struct file *f);
+struct bio;
+extern void submit_bio(int, struct bio *);
+extern int bdev_read_only(struct block_device *);
+extern int set_blocksize(struct block_device *, int);
+extern int sb_set_blocksize(struct super_block *, int);
+extern int sb_min_blocksize(struct super_block *, int);
+
+extern int generic_file_mmap(struct file *, struct vm_area_struct *);
+extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
+extern int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size);
+extern int file_send_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size);
+extern ssize_t generic_file_read(struct file *, char __user *, size_t, loff_t *);
+int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk);
+extern ssize_t generic_file_write(struct file *, const char __user *, size_t, loff_t *);
+extern ssize_t generic_file_aio_read(struct kiocb *, char __user *, size_t, loff_t);
+extern ssize_t __generic_file_aio_read(struct kiocb *, const struct iovec *, unsigned long, loff_t *);
+extern ssize_t generic_file_aio_write(struct kiocb *, const char __user *, size_t, loff_t);
+extern ssize_t generic_file_aio_write_nolock(struct kiocb *, const struct iovec *,
+		unsigned long, loff_t *);
+extern ssize_t generic_file_direct_write(struct kiocb *, const struct iovec *,
+		unsigned long *, loff_t, loff_t *, size_t, size_t);
+extern ssize_t generic_file_buffered_write(struct kiocb *, const struct iovec *,
+		unsigned long, loff_t, loff_t *, size_t, ssize_t);
+extern ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos);
+extern ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
+ssize_t generic_file_write_nolock(struct file *file, const struct iovec *iov,
+				unsigned long nr_segs, loff_t *ppos);
+extern ssize_t generic_file_sendfile(struct file *, loff_t *, size_t, read_actor_t, void *);
+extern void do_generic_mapping_read(struct address_space *mapping,
+				    struct file_ra_state *, struct file *,
+				    loff_t *, read_descriptor_t *, read_actor_t);
+extern void
+file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
+extern ssize_t generic_file_readv(struct file *filp, const struct iovec *iov, 
+	unsigned long nr_segs, loff_t *ppos);
+ssize_t generic_file_writev(struct file *filp, const struct iovec *iov, 
+			unsigned long nr_segs, loff_t *ppos);
+extern loff_t no_llseek(struct file *file, loff_t offset, int origin);
+extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin);
+extern loff_t remote_llseek(struct file *file, loff_t offset, int origin);
+extern int generic_file_open(struct inode * inode, struct file * filp);
+extern int nonseekable_open(struct inode * inode, struct file * filp);
+
+#ifdef CONFIG_FS_XIP
+extern ssize_t xip_file_read(struct file *filp, char __user *buf, size_t len,
+			     loff_t *ppos);
+extern ssize_t xip_file_sendfile(struct file *in_file, loff_t *ppos,
+				 size_t count, read_actor_t actor,
+				 void *target);
+extern int xip_file_mmap(struct file * file, struct vm_area_struct * vma);
+extern ssize_t xip_file_write(struct file *filp, const char __user *buf,
+			      size_t len, loff_t *ppos);
+extern int xip_truncate_page(struct address_space *mapping, loff_t from);
+#else
+static inline int xip_truncate_page(struct address_space *mapping, loff_t from)
+{
+	return 0;
+}
+#endif
+
+static inline void do_generic_file_read(struct file * filp, loff_t *ppos,
+					read_descriptor_t * desc,
+					read_actor_t actor)
+{
+	do_generic_mapping_read(filp->f_mapping,
+				&filp->f_ra,
+				filp,
+				ppos,
+				desc,
+				actor);
+}
+
+ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
+	struct block_device *bdev, const struct iovec *iov, loff_t offset,
+	unsigned long nr_segs, get_blocks_t get_blocks, dio_iodone_t end_io,
+	int lock_type);
+
+enum {
+	DIO_LOCKING = 1, /* need locking between buffered and direct access */
+	DIO_NO_LOCKING,  /* bdev; no locking at all between buffered/direct */
+	DIO_OWN_LOCKING, /* filesystem locks buffered and direct internally */
+};
+
+static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
+	struct inode *inode, struct block_device *bdev, const struct iovec *iov,
+	loff_t offset, unsigned long nr_segs, get_blocks_t get_blocks,
+	dio_iodone_t end_io)
+{
+	return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
+				nr_segs, get_blocks, end_io, DIO_LOCKING);
+}
+
+static inline ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb,
+	struct inode *inode, struct block_device *bdev, const struct iovec *iov,
+	loff_t offset, unsigned long nr_segs, get_blocks_t get_blocks,
+	dio_iodone_t end_io)
+{
+	return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
+				nr_segs, get_blocks, end_io, DIO_NO_LOCKING);
+}
+
+static inline ssize_t blockdev_direct_IO_own_locking(int rw, struct kiocb *iocb,
+	struct inode *inode, struct block_device *bdev, const struct iovec *iov,
+	loff_t offset, unsigned long nr_segs, get_blocks_t get_blocks,
+	dio_iodone_t end_io)
+{
+	return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
+				nr_segs, get_blocks, end_io, DIO_OWN_LOCKING);
+}
+
+extern struct file_operations generic_ro_fops;
+
+#define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m))
+
+extern int vfs_readlink(struct dentry *, char __user *, int, const char *);
+extern int vfs_follow_link(struct nameidata *, const char *);
+extern int page_readlink(struct dentry *, char __user *, int);
+extern void *page_follow_link_light(struct dentry *, struct nameidata *);
+extern void page_put_link(struct dentry *, struct nameidata *, void *);
+extern int page_symlink(struct inode *inode, const char *symname, int len);
+extern struct inode_operations page_symlink_inode_operations;
+extern int generic_readlink(struct dentry *, char __user *, int);
+extern void generic_fillattr(struct inode *, struct kstat *);
+extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
+void inode_add_bytes(struct inode *inode, loff_t bytes);
+void inode_sub_bytes(struct inode *inode, loff_t bytes);
+loff_t inode_get_bytes(struct inode *inode);
+void inode_set_bytes(struct inode *inode, loff_t bytes);
+
+extern int vfs_readdir(struct file *, filldir_t, void *);
+
+extern int vfs_stat(char __user *, struct kstat *);
+extern int vfs_lstat(char __user *, struct kstat *);
+extern int vfs_fstat(unsigned int, struct kstat *);
+
+extern int vfs_ioctl(struct file *, unsigned int, unsigned int, unsigned long);
+
+extern struct file_system_type *get_fs_type(const char *name);
+extern struct super_block *get_super(struct block_device *);
+extern struct super_block *user_get_super(dev_t);
+extern void drop_super(struct super_block *sb);
+
+extern int dcache_dir_open(struct inode *, struct file *);
+extern int dcache_dir_close(struct inode *, struct file *);
+extern loff_t dcache_dir_lseek(struct file *, loff_t, int);
+extern int dcache_readdir(struct file *, void *, filldir_t);
+extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *);
+extern int simple_statfs(struct super_block *, struct kstatfs *);
+extern int simple_link(struct dentry *, struct inode *, struct dentry *);
+extern int simple_unlink(struct inode *, struct dentry *);
+extern int simple_rmdir(struct inode *, struct dentry *);
+extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
+extern int simple_sync_file(struct file *, struct dentry *, int);
+extern int simple_empty(struct dentry *);
+extern int simple_readpage(struct file *file, struct page *page);
+extern int simple_prepare_write(struct file *file, struct page *page,
+			unsigned offset, unsigned to);
+extern int simple_commit_write(struct file *file, struct page *page,
+				unsigned offset, unsigned to);
+
+extern struct dentry *simple_lookup(struct inode *, struct dentry *, struct nameidata *);
+extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *);
+extern struct file_operations simple_dir_operations;
+extern struct inode_operations simple_dir_inode_operations;
+struct tree_descr { char *name; struct file_operations *ops; int mode; };
+struct dentry *d_alloc_name(struct dentry *, const char *);
+extern int simple_fill_super(struct super_block *, int, struct tree_descr *);
+extern int simple_pin_fs(char *name, struct vfsmount **mount, int *count);
+extern void simple_release_fs(struct vfsmount **mount, int *count);
+
+extern ssize_t simple_read_from_buffer(void __user *, size_t, loff_t *, const void *, size_t);
+
+extern int inode_change_ok(struct inode *, struct iattr *);
+extern int __must_check inode_setattr(struct inode *, struct iattr *);
+
+extern void inode_update_time(struct inode *inode, int ctime_too);
+
+static inline ino_t parent_ino(struct dentry *dentry)
+{
+	ino_t res;
+
+	spin_lock(&dentry->d_lock);
+	res = dentry->d_parent->d_inode->i_ino;
+	spin_unlock(&dentry->d_lock);
+	return res;
+}
+
+/* kernel/fork.c */
+extern int unshare_files(void);
+
+/* Transaction based IO helpers */
+
+/*
+ * An argresp is stored in an allocated page and holds the
+ * size of the argument or response, along with its content
+ */
+struct simple_transaction_argresp {
+	ssize_t size;
+	char data[0];
+};
+
+#define SIMPLE_TRANSACTION_LIMIT (PAGE_SIZE - sizeof(struct simple_transaction_argresp))
+
+char *simple_transaction_get(struct file *file, const char __user *buf,
+				size_t size);
+ssize_t simple_transaction_read(struct file *file, char __user *buf,
+				size_t size, loff_t *pos);
+int simple_transaction_release(struct inode *inode, struct file *file);
+
+static inline void simple_transaction_set(struct file *file, size_t n)
+{
+	struct simple_transaction_argresp *ar = file->private_data;
+
+	BUG_ON(n > SIMPLE_TRANSACTION_LIMIT);
+
+	/*
+	 * The barrier ensures that ar->size will really remain zero until
+	 * ar->data is ready for reading.
+	 */
+	smp_mb();
+	ar->size = n;
+}
+
+/*
+ * simple attribute files
+ *
+ * These attributes behave similar to those in sysfs:
+ *
+ * Writing to an attribute immediately sets a value, an open file can be
+ * written to multiple times.
+ *
+ * Reading from an attribute creates a buffer from the value that might get
+ * read with multiple read calls. When the attribute has been read
+ * completely, no further read calls are possible until the file is opened
+ * again.
+ *
+ * All attributes contain a text representation of a numeric value
+ * that are accessed with the get() and set() functions.
+ */
+#define DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt)		\
+static int __fops ## _open(struct inode *inode, struct file *file)	\
+{									\
+	__simple_attr_check_format(__fmt, 0ull);			\
+	return simple_attr_open(inode, file, __get, __set, __fmt);	\
+}									\
+static struct file_operations __fops = {				\
+	.owner	 = THIS_MODULE,						\
+	.open	 = __fops ## _open,					\
+	.release = simple_attr_close,					\
+	.read	 = simple_attr_read,					\
+	.write	 = simple_attr_write,					\
+};
+
+static inline void __attribute__((format(printf, 1, 2)))
+__simple_attr_check_format(const char *fmt, ...)
+{
+	/* don't do anything, just let the compiler check the arguments; */
+}
+
+int simple_attr_open(struct inode *inode, struct file *file,
+		     u64 (*get)(void *), void (*set)(void *, u64),
+		     const char *fmt);
+int simple_attr_close(struct inode *inode, struct file *file);
+ssize_t simple_attr_read(struct file *file, char __user *buf,
+			 size_t len, loff_t *ppos);
+ssize_t simple_attr_write(struct file *file, const char __user *buf,
+			  size_t len, loff_t *ppos);
+
+
+#ifdef CONFIG_SECURITY
+static inline char *alloc_secdata(void)
+{
+	return (char *)get_zeroed_page(GFP_KERNEL);
+}
+
+static inline void free_secdata(void *secdata)
+{
+	free_page((unsigned long)secdata);
+}
+#else
+static inline char *alloc_secdata(void)
+{
+	return (char *)1;
+}
+
+static inline void free_secdata(void *secdata)
+{ }
+#endif	/* CONFIG_SECURITY */
+
+#endif /* __KERNEL__ */
+#endif /* _LINUX_FS_H */
diff -urN linux-2.6.15/include/linux/pci_ids.h linux-2.6.15-no1/include/linux/pci_ids.h
--- linux-2.6.15/include/linux/pci_ids.h	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/include/linux/pci_ids.h	2006-01-26 15:41:14.006758416 +0000
@@ -623,6 +623,7 @@
 #define PCI_DEVICE_ID_SI_965		0x0965
 #define PCI_DEVICE_ID_SI_5511		0x5511
 #define PCI_DEVICE_ID_SI_5513		0x5513
+#define PCI_DEVICE_ID_SI_5517		0x5517
 #define PCI_DEVICE_ID_SI_5518		0x5518
 #define PCI_DEVICE_ID_SI_5571		0x5571
 #define PCI_DEVICE_ID_SI_5581		0x5581
diff -urN linux-2.6.15/include/linux/pci_ids.h.orig linux-2.6.15-no1/include/linux/pci_ids.h.orig
--- linux-2.6.15/include/linux/pci_ids.h.orig	1970-01-01 00:00:00.000000000 +0000
+++ linux-2.6.15-no1/include/linux/pci_ids.h.orig	2006-01-03 03:21:10.000000000 +0000
@@ -0,0 +1,2194 @@
+/*
+ *	PCI Class, Vendor and Device IDs
+ *
+ *	Please keep sorted.
+ */
+
+/* Device classes and subclasses */
+
+#define PCI_CLASS_NOT_DEFINED		0x0000
+#define PCI_CLASS_NOT_DEFINED_VGA	0x0001
+
+#define PCI_BASE_CLASS_STORAGE		0x01
+#define PCI_CLASS_STORAGE_SCSI		0x0100
+#define PCI_CLASS_STORAGE_IDE		0x0101
+#define PCI_CLASS_STORAGE_FLOPPY	0x0102
+#define PCI_CLASS_STORAGE_IPI		0x0103
+#define PCI_CLASS_STORAGE_RAID		0x0104
+#define PCI_CLASS_STORAGE_OTHER		0x0180
+
+#define PCI_BASE_CLASS_NETWORK		0x02
+#define PCI_CLASS_NETWORK_ETHERNET	0x0200
+#define PCI_CLASS_NETWORK_TOKEN_RING	0x0201
+#define PCI_CLASS_NETWORK_FDDI		0x0202
+#define PCI_CLASS_NETWORK_ATM		0x0203
+#define PCI_CLASS_NETWORK_OTHER		0x0280
+
+#define PCI_BASE_CLASS_DISPLAY		0x03
+#define PCI_CLASS_DISPLAY_VGA		0x0300
+#define PCI_CLASS_DISPLAY_XGA		0x0301
+#define PCI_CLASS_DISPLAY_3D		0x0302
+#define PCI_CLASS_DISPLAY_OTHER		0x0380
+
+#define PCI_BASE_CLASS_MULTIMEDIA	0x04
+#define PCI_CLASS_MULTIMEDIA_VIDEO	0x0400
+#define PCI_CLASS_MULTIMEDIA_AUDIO	0x0401
+#define PCI_CLASS_MULTIMEDIA_PHONE	0x0402
+#define PCI_CLASS_MULTIMEDIA_OTHER	0x0480
+
+#define PCI_BASE_CLASS_MEMORY		0x05
+#define PCI_CLASS_MEMORY_RAM		0x0500
+#define PCI_CLASS_MEMORY_FLASH		0x0501
+#define PCI_CLASS_MEMORY_OTHER		0x0580
+
+#define PCI_BASE_CLASS_BRIDGE		0x06
+#define PCI_CLASS_BRIDGE_HOST		0x0600
+#define PCI_CLASS_BRIDGE_ISA		0x0601
+#define PCI_CLASS_BRIDGE_EISA		0x0602
+#define PCI_CLASS_BRIDGE_MC		0x0603
+#define PCI_CLASS_BRIDGE_PCI		0x0604
+#define PCI_CLASS_BRIDGE_PCMCIA		0x0605
+#define PCI_CLASS_BRIDGE_NUBUS		0x0606
+#define PCI_CLASS_BRIDGE_CARDBUS	0x0607
+#define PCI_CLASS_BRIDGE_RACEWAY	0x0608
+#define PCI_CLASS_BRIDGE_OTHER		0x0680
+
+#define PCI_BASE_CLASS_COMMUNICATION	0x07
+#define PCI_CLASS_COMMUNICATION_SERIAL	0x0700
+#define PCI_CLASS_COMMUNICATION_PARALLEL 0x0701
+#define PCI_CLASS_COMMUNICATION_MULTISERIAL 0x0702
+#define PCI_CLASS_COMMUNICATION_MODEM	0x0703
+#define PCI_CLASS_COMMUNICATION_OTHER	0x0780
+
+#define PCI_BASE_CLASS_SYSTEM		0x08
+#define PCI_CLASS_SYSTEM_PIC		0x0800
+#define PCI_CLASS_SYSTEM_PIC_IOAPIC	0x080010
+#define PCI_CLASS_SYSTEM_PIC_IOXAPIC	0x080020
+#define PCI_CLASS_SYSTEM_DMA		0x0801
+#define PCI_CLASS_SYSTEM_TIMER		0x0802
+#define PCI_CLASS_SYSTEM_RTC		0x0803
+#define PCI_CLASS_SYSTEM_PCI_HOTPLUG	0x0804
+#define PCI_CLASS_SYSTEM_OTHER		0x0880
+
+#define PCI_BASE_CLASS_INPUT		0x09
+#define PCI_CLASS_INPUT_KEYBOARD	0x0900
+#define PCI_CLASS_INPUT_PEN		0x0901
+#define PCI_CLASS_INPUT_MOUSE		0x0902
+#define PCI_CLASS_INPUT_SCANNER		0x0903
+#define PCI_CLASS_INPUT_GAMEPORT	0x0904
+#define PCI_CLASS_INPUT_OTHER		0x0980
+
+#define PCI_BASE_CLASS_DOCKING		0x0a
+#define PCI_CLASS_DOCKING_GENERIC	0x0a00
+#define PCI_CLASS_DOCKING_OTHER		0x0a80
+
+#define PCI_BASE_CLASS_PROCESSOR	0x0b
+#define PCI_CLASS_PROCESSOR_386		0x0b00
+#define PCI_CLASS_PROCESSOR_486		0x0b01
+#define PCI_CLASS_PROCESSOR_PENTIUM	0x0b02
+#define PCI_CLASS_PROCESSOR_ALPHA	0x0b10
+#define PCI_CLASS_PROCESSOR_POWERPC	0x0b20
+#define PCI_CLASS_PROCESSOR_MIPS	0x0b30
+#define PCI_CLASS_PROCESSOR_CO		0x0b40
+
+#define PCI_BASE_CLASS_SERIAL		0x0c
+#define PCI_CLASS_SERIAL_FIREWIRE	0x0c00
+#define PCI_CLASS_SERIAL_ACCESS		0x0c01
+#define PCI_CLASS_SERIAL_SSA		0x0c02
+#define PCI_CLASS_SERIAL_USB		0x0c03
+#define PCI_CLASS_SERIAL_USB_UHCI	0x0c0300
+#define PCI_CLASS_SERIAL_USB_OHCI	0x0c0310
+#define PCI_CLASS_SERIAL_USB_EHCI	0x0c0320
+#define PCI_CLASS_SERIAL_FIBER		0x0c04
+#define PCI_CLASS_SERIAL_SMBUS		0x0c05
+
+#define PCI_BASE_CLASS_INTELLIGENT	0x0e
+#define PCI_CLASS_INTELLIGENT_I2O	0x0e00
+
+#define PCI_BASE_CLASS_SATELLITE	0x0f
+#define PCI_CLASS_SATELLITE_TV		0x0f00
+#define PCI_CLASS_SATELLITE_AUDIO	0x0f01
+#define PCI_CLASS_SATELLITE_VOICE	0x0f03
+#define PCI_CLASS_SATELLITE_DATA	0x0f04
+
+#define PCI_BASE_CLASS_CRYPT		0x10
+#define PCI_CLASS_CRYPT_NETWORK		0x1000
+#define PCI_CLASS_CRYPT_ENTERTAINMENT	0x1001
+#define PCI_CLASS_CRYPT_OTHER		0x1080
+
+#define PCI_BASE_CLASS_SIGNAL_PROCESSING 0x11
+#define PCI_CLASS_SP_DPIO		0x1100
+#define PCI_CLASS_SP_OTHER		0x1180
+
+#define PCI_CLASS_OTHERS		0xff
+
+/* Vendors and devices.  Sort key: vendor first, device next. */
+
+#define PCI_VENDOR_ID_DYNALINK		0x0675
+#define PCI_DEVICE_ID_DYNALINK_IS64PH	0x1702
+
+#define PCI_VENDOR_ID_BERKOM			0x0871
+#define PCI_DEVICE_ID_BERKOM_A1T		0xffa1
+#define PCI_DEVICE_ID_BERKOM_T_CONCEPT		0xffa2
+#define PCI_DEVICE_ID_BERKOM_A4T		0xffa4
+#define PCI_DEVICE_ID_BERKOM_SCITEL_QUADRO	0xffa8
+
+#define PCI_VENDOR_ID_COMPAQ		0x0e11
+#define PCI_DEVICE_ID_COMPAQ_TOKENRING	0x0508
+#define PCI_DEVICE_ID_COMPAQ_TACHYON	0xa0fc
+#define PCI_DEVICE_ID_COMPAQ_SMART2P	0xae10
+#define PCI_DEVICE_ID_COMPAQ_NETEL100	0xae32
+#define PCI_DEVICE_ID_COMPAQ_NETEL10	0xae34
+#define PCI_DEVICE_ID_COMPAQ_TRIFLEX_IDE 0xae33
+#define PCI_DEVICE_ID_COMPAQ_NETFLEX3I	0xae35
+#define PCI_DEVICE_ID_COMPAQ_NETEL100D	0xae40
+#define PCI_DEVICE_ID_COMPAQ_NETEL100PI	0xae43
+#define PCI_DEVICE_ID_COMPAQ_NETEL100I	0xb011
+#define PCI_DEVICE_ID_COMPAQ_CISS	0xb060
+#define PCI_DEVICE_ID_COMPAQ_CISSB	0xb178
+#define PCI_DEVICE_ID_COMPAQ_CISSC	0x46
+#define PCI_DEVICE_ID_COMPAQ_THUNDER	0xf130
+#define PCI_DEVICE_ID_COMPAQ_NETFLEX3B	0xf150
+
+#define PCI_VENDOR_ID_NCR		0x1000
+#define PCI_VENDOR_ID_LSI_LOGIC		0x1000
+#define PCI_DEVICE_ID_NCR_53C810	0x0001
+#define PCI_DEVICE_ID_NCR_53C820	0x0002
+#define PCI_DEVICE_ID_NCR_53C825	0x0003
+#define PCI_DEVICE_ID_NCR_53C815	0x0004
+#define PCI_DEVICE_ID_LSI_53C810AP	0x0005
+#define PCI_DEVICE_ID_NCR_53C860	0x0006
+#define PCI_DEVICE_ID_LSI_53C1510	0x000a
+#define PCI_DEVICE_ID_NCR_53C896	0x000b
+#define PCI_DEVICE_ID_NCR_53C895	0x000c
+#define PCI_DEVICE_ID_NCR_53C885	0x000d
+#define PCI_DEVICE_ID_NCR_53C875	0x000f
+#define PCI_DEVICE_ID_NCR_53C1510	0x0010
+#define PCI_DEVICE_ID_LSI_53C895A	0x0012
+#define PCI_DEVICE_ID_LSI_53C875A	0x0013
+#define PCI_DEVICE_ID_LSI_53C1010_33	0x0020
+#define PCI_DEVICE_ID_LSI_53C1010_66	0x0021
+#define PCI_DEVICE_ID_LSI_53C1030	0x0030
+#define PCI_DEVICE_ID_LSI_1030_53C1035	0x0032
+#define PCI_DEVICE_ID_LSI_53C1035	0x0040
+#define PCI_DEVICE_ID_NCR_53C875J	0x008f
+#define PCI_DEVICE_ID_LSI_FC909		0x0621
+#define PCI_DEVICE_ID_LSI_FC929		0x0622
+#define PCI_DEVICE_ID_LSI_FC929_LAN	0x0623
+#define PCI_DEVICE_ID_LSI_FC919		0x0624
+#define PCI_DEVICE_ID_LSI_FC919_LAN	0x0625
+#define PCI_DEVICE_ID_LSI_FC929X	0x0626
+#define PCI_DEVICE_ID_LSI_FC939X	0x0642
+#define PCI_DEVICE_ID_LSI_FC949X	0x0640
+#define PCI_DEVICE_ID_LSI_FC919X	0x0628
+#define PCI_DEVICE_ID_NCR_YELLOWFIN	0x0701
+#define PCI_DEVICE_ID_LSI_61C102	0x0901
+#define PCI_DEVICE_ID_LSI_63C815	0x1000
+#define PCI_DEVICE_ID_LSI_SAS1064	0x0050
+#define PCI_DEVICE_ID_LSI_SAS1064R	0x0411
+#define PCI_DEVICE_ID_LSI_SAS1066	0x005E
+#define PCI_DEVICE_ID_LSI_SAS1068	0x0054
+#define PCI_DEVICE_ID_LSI_SAS1064A	0x005C
+#define PCI_DEVICE_ID_LSI_SAS1064E	0x0056
+#define PCI_DEVICE_ID_LSI_SAS1066E	0x005A
+#define PCI_DEVICE_ID_LSI_SAS1068E	0x0058
+#define PCI_DEVICE_ID_LSI_SAS1078	0x0060
+
+#define PCI_VENDOR_ID_ATI		0x1002
+/* Mach64 */
+#define PCI_DEVICE_ID_ATI_68800		0x4158
+#define PCI_DEVICE_ID_ATI_215CT222	0x4354
+#define PCI_DEVICE_ID_ATI_210888CX	0x4358
+#define PCI_DEVICE_ID_ATI_215ET222	0x4554
+/* Mach64 / Rage */
+#define PCI_DEVICE_ID_ATI_215GB		0x4742
+#define PCI_DEVICE_ID_ATI_215GD		0x4744
+#define PCI_DEVICE_ID_ATI_215GI		0x4749
+#define PCI_DEVICE_ID_ATI_215GP		0x4750
+#define PCI_DEVICE_ID_ATI_215GQ		0x4751
+#define PCI_DEVICE_ID_ATI_215XL		0x4752
+#define PCI_DEVICE_ID_ATI_215GT		0x4754
+#define PCI_DEVICE_ID_ATI_215GTB	0x4755
+#define PCI_DEVICE_ID_ATI_215_IV	0x4756
+#define PCI_DEVICE_ID_ATI_215_IW	0x4757
+#define PCI_DEVICE_ID_ATI_215_IZ	0x475A
+#define PCI_DEVICE_ID_ATI_210888GX	0x4758
+#define PCI_DEVICE_ID_ATI_215_LB	0x4c42
+#define PCI_DEVICE_ID_ATI_215_LD	0x4c44
+#define PCI_DEVICE_ID_ATI_215_LG	0x4c47
+#define PCI_DEVICE_ID_ATI_215_LI	0x4c49
+#define PCI_DEVICE_ID_ATI_215_LM	0x4c4D
+#define PCI_DEVICE_ID_ATI_215_LN	0x4c4E
+#define PCI_DEVICE_ID_ATI_215_LR	0x4c52
+#define PCI_DEVICE_ID_ATI_215_LS	0x4c53
+#define PCI_DEVICE_ID_ATI_264_LT	0x4c54
+/* Mach64 VT */
+#define PCI_DEVICE_ID_ATI_264VT		0x5654
+#define PCI_DEVICE_ID_ATI_264VU		0x5655
+#define PCI_DEVICE_ID_ATI_264VV		0x5656
+/* Rage128 GL */
+#define PCI_DEVICE_ID_ATI_RAGE128_RE	0x5245
+#define PCI_DEVICE_ID_ATI_RAGE128_RF	0x5246
+#define PCI_DEVICE_ID_ATI_RAGE128_RG	0x5247
+/* Rage128 VR */
+#define PCI_DEVICE_ID_ATI_RAGE128_RK	0x524b
+#define PCI_DEVICE_ID_ATI_RAGE128_RL	0x524c
+#define PCI_DEVICE_ID_ATI_RAGE128_SE	0x5345
+#define PCI_DEVICE_ID_ATI_RAGE128_SF	0x5346
+#define PCI_DEVICE_ID_ATI_RAGE128_SG	0x5347
+#define PCI_DEVICE_ID_ATI_RAGE128_SH	0x5348
+#define PCI_DEVICE_ID_ATI_RAGE128_SK	0x534b
+#define PCI_DEVICE_ID_ATI_RAGE128_SL	0x534c
+#define PCI_DEVICE_ID_ATI_RAGE128_SM	0x534d
+#define PCI_DEVICE_ID_ATI_RAGE128_SN	0x534e
+/* Rage128 Ultra */
+#define PCI_DEVICE_ID_ATI_RAGE128_TF	0x5446
+#define PCI_DEVICE_ID_ATI_RAGE128_TL	0x544c
+#define PCI_DEVICE_ID_ATI_RAGE128_TR	0x5452
+#define PCI_DEVICE_ID_ATI_RAGE128_TS	0x5453
+#define PCI_DEVICE_ID_ATI_RAGE128_TT	0x5454
+#define PCI_DEVICE_ID_ATI_RAGE128_TU	0x5455
+/* Rage128 M3 */
+#define PCI_DEVICE_ID_ATI_RAGE128_LE	0x4c45
+#define PCI_DEVICE_ID_ATI_RAGE128_LF	0x4c46
+/* Rage128 M4 */
+#define PCI_DEVICE_ID_ATI_RAGE128_MF    0x4d46
+#define PCI_DEVICE_ID_ATI_RAGE128_ML    0x4d4c
+/* Rage128 Pro GL */
+#define PCI_DEVICE_ID_ATI_RAGE128_PA	0x5041
+#define PCI_DEVICE_ID_ATI_RAGE128_PB	0x5042
+#define PCI_DEVICE_ID_ATI_RAGE128_PC	0x5043
+#define PCI_DEVICE_ID_ATI_RAGE128_PD	0x5044
+#define PCI_DEVICE_ID_ATI_RAGE128_PE	0x5045
+#define PCI_DEVICE_ID_ATI_RAGE128_PF	0x5046
+/* Rage128 Pro VR */
+#define PCI_DEVICE_ID_ATI_RAGE128_PG	0x5047
+#define PCI_DEVICE_ID_ATI_RAGE128_PH	0x5048
+#define PCI_DEVICE_ID_ATI_RAGE128_PI	0x5049
+#define PCI_DEVICE_ID_ATI_RAGE128_PJ	0x504A
+#define PCI_DEVICE_ID_ATI_RAGE128_PK	0x504B
+#define PCI_DEVICE_ID_ATI_RAGE128_PL	0x504C
+#define PCI_DEVICE_ID_ATI_RAGE128_PM	0x504D
+#define PCI_DEVICE_ID_ATI_RAGE128_PN	0x504E
+#define PCI_DEVICE_ID_ATI_RAGE128_PO	0x504F
+#define PCI_DEVICE_ID_ATI_RAGE128_PP	0x5050
+#define PCI_DEVICE_ID_ATI_RAGE128_PQ	0x5051
+#define PCI_DEVICE_ID_ATI_RAGE128_PR	0x5052
+#define PCI_DEVICE_ID_ATI_RAGE128_PS	0x5053
+#define PCI_DEVICE_ID_ATI_RAGE128_PT	0x5054
+#define PCI_DEVICE_ID_ATI_RAGE128_PU	0x5055
+#define PCI_DEVICE_ID_ATI_RAGE128_PV	0x5056
+#define PCI_DEVICE_ID_ATI_RAGE128_PW	0x5057
+#define PCI_DEVICE_ID_ATI_RAGE128_PX	0x5058
+/* Rage128 M4 */
+/* Radeon R100 */
+#define PCI_DEVICE_ID_ATI_RADEON_QD	0x5144
+#define PCI_DEVICE_ID_ATI_RADEON_QE	0x5145
+#define PCI_DEVICE_ID_ATI_RADEON_QF	0x5146
+#define PCI_DEVICE_ID_ATI_RADEON_QG	0x5147
+/* Radeon RV100 (VE) */
+#define PCI_DEVICE_ID_ATI_RADEON_QY	0x5159
+#define PCI_DEVICE_ID_ATI_RADEON_QZ	0x515a
+/* Radeon R200 (8500) */
+#define PCI_DEVICE_ID_ATI_RADEON_QL	0x514c
+#define PCI_DEVICE_ID_ATI_RADEON_QN	0x514e
+#define PCI_DEVICE_ID_ATI_RADEON_QO	0x514f
+#define PCI_DEVICE_ID_ATI_RADEON_Ql	0x516c
+#define PCI_DEVICE_ID_ATI_RADEON_BB	0x4242
+/* Radeon R200 (9100) */
+#define PCI_DEVICE_ID_ATI_RADEON_QM	0x514d
+/* Radeon RV200 (7500) */
+#define PCI_DEVICE_ID_ATI_RADEON_QW	0x5157
+#define PCI_DEVICE_ID_ATI_RADEON_QX	0x5158
+/* Radeon NV-100 */
+/* Radeon RV250 (9000) */
+#define PCI_DEVICE_ID_ATI_RADEON_Id	0x4964
+#define PCI_DEVICE_ID_ATI_RADEON_Ie	0x4965
+#define PCI_DEVICE_ID_ATI_RADEON_If	0x4966
+#define PCI_DEVICE_ID_ATI_RADEON_Ig	0x4967
+/* Radeon RV280 (9200) */
+#define PCI_DEVICE_ID_ATI_RADEON_Ya	0x5961
+#define PCI_DEVICE_ID_ATI_RADEON_Yd	0x5964
+/* Radeon R300 (9500) */
+/* Radeon R300 (9700) */
+#define PCI_DEVICE_ID_ATI_RADEON_ND	0x4e44
+#define PCI_DEVICE_ID_ATI_RADEON_NE	0x4e45
+#define PCI_DEVICE_ID_ATI_RADEON_NF	0x4e46
+#define PCI_DEVICE_ID_ATI_RADEON_NG	0x4e47
+/* Radeon R350 (9800) */
+/* Radeon RV350 (9600) */
+/* Radeon M6 */
+#define PCI_DEVICE_ID_ATI_RADEON_LY	0x4c59
+#define PCI_DEVICE_ID_ATI_RADEON_LZ	0x4c5a
+/* Radeon M7 */
+#define PCI_DEVICE_ID_ATI_RADEON_LW	0x4c57
+#define PCI_DEVICE_ID_ATI_RADEON_LX	0x4c58
+/* Radeon M9 */
+#define PCI_DEVICE_ID_ATI_RADEON_Ld	0x4c64
+#define PCI_DEVICE_ID_ATI_RADEON_Le	0x4c65
+#define PCI_DEVICE_ID_ATI_RADEON_Lf	0x4c66
+#define PCI_DEVICE_ID_ATI_RADEON_Lg	0x4c67
+/* Radeon */
+/* RadeonIGP */
+#define PCI_DEVICE_ID_ATI_RS100		0xcab0
+#define PCI_DEVICE_ID_ATI_RS200		0xcab2
+#define PCI_DEVICE_ID_ATI_RS200_B	0xcbb2
+#define PCI_DEVICE_ID_ATI_RS250		0xcab3
+#define PCI_DEVICE_ID_ATI_RS300_100	0x5830
+#define PCI_DEVICE_ID_ATI_RS300_133	0x5831
+#define PCI_DEVICE_ID_ATI_RS300_166	0x5832
+#define PCI_DEVICE_ID_ATI_RS300_200	0x5833
+#define PCI_DEVICE_ID_ATI_RS350_100     0x7830
+#define PCI_DEVICE_ID_ATI_RS350_133     0x7831
+#define PCI_DEVICE_ID_ATI_RS350_166     0x7832
+#define PCI_DEVICE_ID_ATI_RS350_200     0x7833
+#define PCI_DEVICE_ID_ATI_RS400_100     0x5a30
+#define PCI_DEVICE_ID_ATI_RS400_133     0x5a31
+#define PCI_DEVICE_ID_ATI_RS400_166     0x5a32
+#define PCI_DEVICE_ID_ATI_RS400_200     0x5a33
+#define PCI_DEVICE_ID_ATI_RS480         0x5950
+/* ATI IXP Chipset */
+#define PCI_DEVICE_ID_ATI_IXP200_IDE	0x4349
+#define PCI_DEVICE_ID_ATI_IXP300_IDE	0x4369
+#define PCI_DEVICE_ID_ATI_IXP300_SATA   0x436e
+#define PCI_DEVICE_ID_ATI_IXP400_IDE	0x4376
+#define PCI_DEVICE_ID_ATI_IXP400_SATA   0x4379
+
+#define PCI_VENDOR_ID_VLSI		0x1004
+#define PCI_DEVICE_ID_VLSI_82C592	0x0005
+#define PCI_DEVICE_ID_VLSI_82C593	0x0006
+#define PCI_DEVICE_ID_VLSI_82C594	0x0007
+#define PCI_DEVICE_ID_VLSI_82C597	0x0009
+#define PCI_DEVICE_ID_VLSI_82C541	0x000c
+#define PCI_DEVICE_ID_VLSI_82C543	0x000d
+#define PCI_DEVICE_ID_VLSI_82C532	0x0101
+#define PCI_DEVICE_ID_VLSI_82C534	0x0102
+#define PCI_DEVICE_ID_VLSI_82C535	0x0104
+#define PCI_DEVICE_ID_VLSI_82C147	0x0105
+#define PCI_DEVICE_ID_VLSI_VAS96011	0x0702
+
+#define PCI_VENDOR_ID_ADL		0x1005
+#define PCI_DEVICE_ID_ADL_2301		0x2301
+
+#define PCI_VENDOR_ID_NS		0x100b
+#define PCI_DEVICE_ID_NS_87415		0x0002
+#define PCI_DEVICE_ID_NS_87560_LIO	0x000e
+#define PCI_DEVICE_ID_NS_87560_USB	0x0012
+#define PCI_DEVICE_ID_NS_83815		0x0020
+#define PCI_DEVICE_ID_NS_83820		0x0022
+#define PCI_DEVICE_ID_NS_SATURN		0x0035
+#define PCI_DEVICE_ID_NS_SCx200_BRIDGE	0x0500
+#define PCI_DEVICE_ID_NS_SCx200_SMI	0x0501
+#define PCI_DEVICE_ID_NS_SCx200_IDE	0x0502
+#define PCI_DEVICE_ID_NS_SCx200_AUDIO	0x0503
+#define PCI_DEVICE_ID_NS_SCx200_VIDEO	0x0504
+#define PCI_DEVICE_ID_NS_SCx200_XBUS	0x0505
+#define PCI_DEVICE_ID_NS_SC1100_BRIDGE	0x0510
+#define PCI_DEVICE_ID_NS_SC1100_SMI	0x0511
+#define PCI_DEVICE_ID_NS_SC1100_XBUS	0x0515
+#define PCI_DEVICE_ID_NS_87410		0xd001
+#define PCI_DEVICE_ID_NS_CS5535_IDE	0x002d
+
+#define PCI_VENDOR_ID_TSENG		0x100c
+#define PCI_DEVICE_ID_TSENG_W32P_2	0x3202
+#define PCI_DEVICE_ID_TSENG_W32P_b	0x3205
+#define PCI_DEVICE_ID_TSENG_W32P_c	0x3206
+#define PCI_DEVICE_ID_TSENG_W32P_d	0x3207
+#define PCI_DEVICE_ID_TSENG_ET6000	0x3208
+
+#define PCI_VENDOR_ID_WEITEK		0x100e
+#define PCI_DEVICE_ID_WEITEK_P9000	0x9001
+#define PCI_DEVICE_ID_WEITEK_P9100	0x9100
+
+#define PCI_VENDOR_ID_DEC		0x1011
+#define PCI_DEVICE_ID_DEC_BRD		0x0001
+#define PCI_DEVICE_ID_DEC_TULIP		0x0002
+#define PCI_DEVICE_ID_DEC_TGA		0x0004
+#define PCI_DEVICE_ID_DEC_TULIP_FAST	0x0009
+#define PCI_DEVICE_ID_DEC_TGA2		0x000D
+#define PCI_DEVICE_ID_DEC_FDDI		0x000F
+#define PCI_DEVICE_ID_DEC_TULIP_PLUS	0x0014
+#define PCI_DEVICE_ID_DEC_21142		0x0019
+#define PCI_DEVICE_ID_DEC_21052		0x0021
+#define PCI_DEVICE_ID_DEC_21150		0x0022
+#define PCI_DEVICE_ID_DEC_21152		0x0024
+#define PCI_DEVICE_ID_DEC_21153		0x0025
+#define PCI_DEVICE_ID_DEC_21154		0x0026
+#define PCI_DEVICE_ID_DEC_21285		0x1065
+#define PCI_DEVICE_ID_COMPAQ_42XX	0x0046
+
+#define PCI_VENDOR_ID_CIRRUS		0x1013
+#define PCI_DEVICE_ID_CIRRUS_7548	0x0038
+#define PCI_DEVICE_ID_CIRRUS_5430	0x00a0
+#define PCI_DEVICE_ID_CIRRUS_5434_4	0x00a4
+#define PCI_DEVICE_ID_CIRRUS_5434_8	0x00a8
+#define PCI_DEVICE_ID_CIRRUS_5436	0x00ac
+#define PCI_DEVICE_ID_CIRRUS_5446	0x00b8
+#define PCI_DEVICE_ID_CIRRUS_5480	0x00bc
+#define PCI_DEVICE_ID_CIRRUS_5462	0x00d0
+#define PCI_DEVICE_ID_CIRRUS_5464	0x00d4
+#define PCI_DEVICE_ID_CIRRUS_5465	0x00d6
+#define PCI_DEVICE_ID_CIRRUS_6729	0x1100
+#define PCI_DEVICE_ID_CIRRUS_6832	0x1110
+#define PCI_DEVICE_ID_CIRRUS_7543	0x1202
+#define PCI_DEVICE_ID_CIRRUS_4610	0x6001
+#define PCI_DEVICE_ID_CIRRUS_4612	0x6003
+#define PCI_DEVICE_ID_CIRRUS_4615	0x6004
+
+#define PCI_VENDOR_ID_IBM		0x1014
+#define PCI_DEVICE_ID_IBM_TR		0x0018
+#define PCI_DEVICE_ID_IBM_TR_WAKE	0x003e
+#define PCI_DEVICE_ID_IBM_CPC710_PCI64	0x00fc
+#define PCI_DEVICE_ID_IBM_SNIPE		0x0180
+#define PCI_DEVICE_ID_IBM_CITRINE		0x028C
+#define PCI_DEVICE_ID_IBM_GEMSTONE		0xB166
+#define PCI_DEVICE_ID_IBM_OBSIDIAN		0x02BD
+#define PCI_DEVICE_ID_IBM_ICOM_DEV_ID_1	0x0031
+#define PCI_DEVICE_ID_IBM_ICOM_DEV_ID_2	0x0219
+#define PCI_DEVICE_ID_IBM_ICOM_V2_TWO_PORTS_RVX		0x021A
+#define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM	0x0251
+#define PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL	0x252
+
+#define PCI_VENDOR_ID_COMPEX2		0x101a /* pci.ids says "AT&T GIS (NCR)" */
+#define PCI_DEVICE_ID_COMPEX2_100VG	0x0005
+
+#define PCI_VENDOR_ID_WD		0x101c
+#define PCI_DEVICE_ID_WD_90C		0xc24a
+
+#define PCI_VENDOR_ID_AMI		0x101e
+#define PCI_DEVICE_ID_AMI_MEGARAID3	0x1960
+#define PCI_DEVICE_ID_AMI_MEGARAID	0x9010
+#define PCI_DEVICE_ID_AMI_MEGARAID2	0x9060
+
+#define PCI_VENDOR_ID_AMD		0x1022
+#define PCI_DEVICE_ID_AMD_K8_NB		0x1100
+#define PCI_DEVICE_ID_AMD_LANCE		0x2000
+#define PCI_DEVICE_ID_AMD_LANCE_HOME	0x2001
+#define PCI_DEVICE_ID_AMD_SCSI		0x2020
+#define PCI_DEVICE_ID_AMD_SERENADE	0x36c0
+#define PCI_DEVICE_ID_AMD_FE_GATE_7006	0x7006
+#define PCI_DEVICE_ID_AMD_FE_GATE_7007	0x7007
+#define PCI_DEVICE_ID_AMD_FE_GATE_700C	0x700C
+#define PCI_DEVICE_ID_AMD_FE_GATE_700E	0x700E
+#define PCI_DEVICE_ID_AMD_COBRA_7401	0x7401
+#define PCI_DEVICE_ID_AMD_VIPER_7409	0x7409
+#define PCI_DEVICE_ID_AMD_VIPER_740B	0x740B
+#define PCI_DEVICE_ID_AMD_VIPER_7410	0x7410
+#define PCI_DEVICE_ID_AMD_VIPER_7411	0x7411
+#define PCI_DEVICE_ID_AMD_VIPER_7413	0x7413
+#define PCI_DEVICE_ID_AMD_VIPER_7440	0x7440
+#define PCI_DEVICE_ID_AMD_OPUS_7441	0x7441
+#define PCI_DEVICE_ID_AMD_OPUS_7443	0x7443
+#define PCI_DEVICE_ID_AMD_VIPER_7443	0x7443
+#define PCI_DEVICE_ID_AMD_OPUS_7445	0x7445
+#define PCI_DEVICE_ID_AMD_8111_LPC	0x7468
+#define PCI_DEVICE_ID_AMD_8111_IDE	0x7469
+#define PCI_DEVICE_ID_AMD_8111_SMBUS2	0x746a
+#define PCI_DEVICE_ID_AMD_8111_SMBUS	0x746b
+#define PCI_DEVICE_ID_AMD_8111_AUDIO	0x746d
+#define PCI_DEVICE_ID_AMD_8151_0	0x7454
+#define PCI_DEVICE_ID_AMD_8131_APIC     0x7450
+
+#define PCI_DEVICE_ID_AMD_CS5536_IDE	0x209A
+
+#define PCI_VENDOR_ID_TRIDENT		0x1023
+#define PCI_DEVICE_ID_TRIDENT_4DWAVE_DX	0x2000
+#define PCI_DEVICE_ID_TRIDENT_4DWAVE_NX	0x2001
+#define PCI_DEVICE_ID_TRIDENT_9320	0x9320
+#define PCI_DEVICE_ID_TRIDENT_9388	0x9388
+#define PCI_DEVICE_ID_TRIDENT_9397	0x9397
+#define PCI_DEVICE_ID_TRIDENT_939A	0x939A
+#define PCI_DEVICE_ID_TRIDENT_9520	0x9520
+#define PCI_DEVICE_ID_TRIDENT_9525	0x9525
+#define PCI_DEVICE_ID_TRIDENT_9420	0x9420
+#define PCI_DEVICE_ID_TRIDENT_9440	0x9440
+#define PCI_DEVICE_ID_TRIDENT_9660	0x9660
+#define PCI_DEVICE_ID_TRIDENT_9750	0x9750
+#define PCI_DEVICE_ID_TRIDENT_9850	0x9850
+#define PCI_DEVICE_ID_TRIDENT_9880	0x9880
+#define PCI_DEVICE_ID_TRIDENT_8400	0x8400
+#define PCI_DEVICE_ID_TRIDENT_8420	0x8420
+#define PCI_DEVICE_ID_TRIDENT_8500	0x8500
+
+#define PCI_VENDOR_ID_AI		0x1025
+#define PCI_DEVICE_ID_AI_M1435		0x1435
+
+#define PCI_VENDOR_ID_DELL		0x1028
+#define PCI_DEVICE_ID_DELL_RACIII	0x0008
+#define PCI_DEVICE_ID_DELL_RAC4		0x0012
+#define PCI_DEVICE_ID_DELL_PERC5	0x0015
+
+#define PCI_VENDOR_ID_MATROX		0x102B
+#define PCI_DEVICE_ID_MATROX_MGA_2	0x0518
+#define PCI_DEVICE_ID_MATROX_MIL	0x0519
+#define PCI_DEVICE_ID_MATROX_MYS	0x051A
+#define PCI_DEVICE_ID_MATROX_MIL_2	0x051b
+#define PCI_DEVICE_ID_MATROX_MYS_AGP	0x051e
+#define PCI_DEVICE_ID_MATROX_MIL_2_AGP	0x051f
+#define PCI_DEVICE_ID_MATROX_MGA_IMP	0x0d10
+#define PCI_DEVICE_ID_MATROX_G100_MM	0x1000
+#define PCI_DEVICE_ID_MATROX_G100_AGP	0x1001
+#define PCI_DEVICE_ID_MATROX_G200_PCI	0x0520
+#define PCI_DEVICE_ID_MATROX_G200_AGP	0x0521
+#define	PCI_DEVICE_ID_MATROX_G400	0x0525
+#define PCI_DEVICE_ID_MATROX_G550	0x2527
+#define PCI_DEVICE_ID_MATROX_VIA	0x4536
+
+#define PCI_VENDOR_ID_CT		0x102c
+#define PCI_DEVICE_ID_CT_69000		0x00c0
+#define PCI_DEVICE_ID_CT_65545		0x00d8
+#define PCI_DEVICE_ID_CT_65548		0x00dc
+#define PCI_DEVICE_ID_CT_65550		0x00e0
+#define PCI_DEVICE_ID_CT_65554		0x00e4
+#define PCI_DEVICE_ID_CT_65555		0x00e5
+
+#define PCI_VENDOR_ID_MIRO		0x1031
+#define PCI_DEVICE_ID_MIRO_36050	0x5601
+#define PCI_DEVICE_ID_MIRO_DC10PLUS	0x7efe
+#define PCI_DEVICE_ID_MIRO_DC30PLUS	0xd801
+
+#define PCI_VENDOR_ID_NEC		0x1033
+#define PCI_DEVICE_ID_NEC_CBUS_1	0x0001 /* PCI-Cbus Bridge */
+#define PCI_DEVICE_ID_NEC_LOCAL		0x0002 /* Local Bridge */
+#define PCI_DEVICE_ID_NEC_ATM		0x0003 /* ATM LAN Controller */
+#define PCI_DEVICE_ID_NEC_R4000		0x0004 /* R4000 Bridge */
+#define PCI_DEVICE_ID_NEC_486		0x0005 /* 486 Like Peripheral Bus Bridge */
+#define PCI_DEVICE_ID_NEC_ACCEL_1	0x0006 /* Graphic Accelerator */
+#define PCI_DEVICE_ID_NEC_UXBUS		0x0007 /* UX-Bus Bridge */
+#define PCI_DEVICE_ID_NEC_ACCEL_2	0x0008 /* Graphic Accelerator */
+#define PCI_DEVICE_ID_NEC_GRAPH		0x0009 /* PCI-CoreGraph Bridge */
+#define PCI_DEVICE_ID_NEC_VL		0x0016 /* PCI-VL Bridge */
+#define PCI_DEVICE_ID_NEC_STARALPHA2	0x002c /* STAR ALPHA2 */
+#define PCI_DEVICE_ID_NEC_CBUS_2	0x002d /* PCI-Cbus Bridge */
+#define PCI_DEVICE_ID_NEC_USB		0x0035 /* PCI-USB Host */
+#define PCI_DEVICE_ID_NEC_CBUS_3	0x003b
+#define PCI_DEVICE_ID_NEC_NAPCCARD	0x003e
+#define PCI_DEVICE_ID_NEC_PCX2		0x0046 /* PowerVR */
+#define PCI_DEVICE_ID_NEC_NILE4		0x005a
+#define PCI_DEVICE_ID_NEC_VRC5476       0x009b
+#define PCI_DEVICE_ID_NEC_VRC4173	0x00a5
+#define PCI_DEVICE_ID_NEC_VRC5477_AC97  0x00a6
+#define PCI_DEVICE_ID_NEC_PC9821CS01    0x800c /* PC-9821-CS01 */
+#define PCI_DEVICE_ID_NEC_PC9821NRB06   0x800d /* PC-9821NR-B06 */
+
+#define PCI_VENDOR_ID_FD		0x1036
+#define PCI_DEVICE_ID_FD_36C70		0x0000
+
+#define PCI_VENDOR_ID_SI		0x1039
+#define PCI_DEVICE_ID_SI_5591_AGP	0x0001
+#define PCI_DEVICE_ID_SI_6202		0x0002
+#define PCI_DEVICE_ID_SI_503		0x0008
+#define PCI_DEVICE_ID_SI_ACPI		0x0009
+#define PCI_DEVICE_ID_SI_SMBUS		0x0016
+#define PCI_DEVICE_ID_SI_LPC		0x0018
+#define PCI_DEVICE_ID_SI_5597_VGA	0x0200
+#define PCI_DEVICE_ID_SI_6205		0x0205
+#define PCI_DEVICE_ID_SI_501		0x0406
+#define PCI_DEVICE_ID_SI_496		0x0496
+#define PCI_DEVICE_ID_SI_300		0x0300
+#define PCI_DEVICE_ID_SI_315H		0x0310
+#define PCI_DEVICE_ID_SI_315		0x0315
+#define PCI_DEVICE_ID_SI_315PRO		0x0325
+#define PCI_DEVICE_ID_SI_530		0x0530
+#define PCI_DEVICE_ID_SI_540		0x0540
+#define PCI_DEVICE_ID_SI_550		0x0550
+#define PCI_DEVICE_ID_SI_540_VGA	0x5300
+#define PCI_DEVICE_ID_SI_550_VGA	0x5315
+#define PCI_DEVICE_ID_SI_620		0x0620
+#define PCI_DEVICE_ID_SI_630		0x0630
+#define PCI_DEVICE_ID_SI_633		0x0633
+#define PCI_DEVICE_ID_SI_635		0x0635
+#define PCI_DEVICE_ID_SI_640		0x0640
+#define PCI_DEVICE_ID_SI_645		0x0645
+#define PCI_DEVICE_ID_SI_646		0x0646
+#define PCI_DEVICE_ID_SI_648		0x0648
+#define PCI_DEVICE_ID_SI_650		0x0650
+#define PCI_DEVICE_ID_SI_651		0x0651
+#define PCI_DEVICE_ID_SI_655		0x0655
+#define PCI_DEVICE_ID_SI_661		0x0661
+#define PCI_DEVICE_ID_SI_730		0x0730
+#define PCI_DEVICE_ID_SI_733		0x0733
+#define PCI_DEVICE_ID_SI_630_VGA	0x6300
+#define PCI_DEVICE_ID_SI_735		0x0735
+#define PCI_DEVICE_ID_SI_740		0x0740
+#define PCI_DEVICE_ID_SI_741		0x0741
+#define PCI_DEVICE_ID_SI_745		0x0745
+#define PCI_DEVICE_ID_SI_746		0x0746
+#define PCI_DEVICE_ID_SI_755		0x0755
+#define PCI_DEVICE_ID_SI_760		0x0760
+#define PCI_DEVICE_ID_SI_900		0x0900
+#define PCI_DEVICE_ID_SI_961		0x0961
+#define PCI_DEVICE_ID_SI_962		0x0962
+#define PCI_DEVICE_ID_SI_963		0x0963
+#define PCI_DEVICE_ID_SI_965		0x0965
+#define PCI_DEVICE_ID_SI_5511		0x5511
+#define PCI_DEVICE_ID_SI_5513		0x5513
+#define PCI_DEVICE_ID_SI_5518		0x5518
+#define PCI_DEVICE_ID_SI_5571		0x5571
+#define PCI_DEVICE_ID_SI_5581		0x5581
+#define PCI_DEVICE_ID_SI_5582		0x5582
+#define PCI_DEVICE_ID_SI_5591		0x5591
+#define PCI_DEVICE_ID_SI_5596		0x5596
+#define PCI_DEVICE_ID_SI_5597		0x5597
+#define PCI_DEVICE_ID_SI_5598		0x5598
+#define PCI_DEVICE_ID_SI_5600		0x5600
+#define PCI_DEVICE_ID_SI_7012		0x7012
+#define PCI_DEVICE_ID_SI_7013		0x7013
+#define PCI_DEVICE_ID_SI_7016		0x7016
+#define PCI_DEVICE_ID_SI_7018		0x7018
+
+#define PCI_VENDOR_ID_HP		0x103c
+#define PCI_DEVICE_ID_HP_VISUALIZE_EG	0x1005
+#define PCI_DEVICE_ID_HP_VISUALIZE_FX6	0x1006
+#define PCI_DEVICE_ID_HP_VISUALIZE_FX4	0x1008
+#define PCI_DEVICE_ID_HP_VISUALIZE_FX2	0x100a
+#define PCI_DEVICE_ID_HP_TACHYON	0x1028
+#define PCI_DEVICE_ID_HP_TACHLITE	0x1029
+#define PCI_DEVICE_ID_HP_J2585A		0x1030
+#define PCI_DEVICE_ID_HP_J2585B		0x1031
+#define PCI_DEVICE_ID_HP_J2973A		0x1040
+#define PCI_DEVICE_ID_HP_J2970A		0x1042
+#define PCI_DEVICE_ID_HP_DIVA		0x1048
+#define PCI_DEVICE_ID_HP_DIVA_TOSCA1	0x1049
+#define PCI_DEVICE_ID_HP_DIVA_TOSCA2	0x104A
+#define PCI_DEVICE_ID_HP_DIVA_MAESTRO	0x104B
+#define PCI_DEVICE_ID_HP_REO_IOC	0x10f1
+#define PCI_DEVICE_ID_HP_VISUALIZE_FXE	0x108b
+#define PCI_DEVICE_ID_HP_DIVA_HALFDOME	0x1223
+#define PCI_DEVICE_ID_HP_DIVA_KEYSTONE	0x1226
+#define PCI_DEVICE_ID_HP_DIVA_POWERBAR	0x1227
+#define PCI_DEVICE_ID_HP_ZX1_IOC	0x122a
+#define PCI_DEVICE_ID_HP_PCIX_LBA	0x122e
+#define PCI_DEVICE_ID_HP_SX1000_IOC	0x127c
+#define PCI_DEVICE_ID_HP_DIVA_EVEREST	0x1282
+#define PCI_DEVICE_ID_HP_DIVA_AUX	0x1290
+#define PCI_DEVICE_ID_HP_DIVA_RMP3	0x1301
+#define PCI_DEVICE_ID_HP_DIVA_HURRICANE	0x132a
+#define PCI_DEVICE_ID_HP_CISSA		0x3220
+#define PCI_DEVICE_ID_HP_CISSC		0x3230
+#define PCI_DEVICE_ID_HP_CISSD		0x3238
+#define PCI_DEVICE_ID_HP_ZX2_IOC	0x4031
+
+#define PCI_VENDOR_ID_PCTECH		0x1042
+#define PCI_DEVICE_ID_PCTECH_RZ1000	0x1000
+#define PCI_DEVICE_ID_PCTECH_RZ1001	0x1001
+#define PCI_DEVICE_ID_PCTECH_SAMURAI_IDE 0x3020
+
+#define PCI_VENDOR_ID_ASUSTEK		0x1043
+#define PCI_DEVICE_ID_ASUSTEK_0675	0x0675
+
+#define PCI_VENDOR_ID_DPT		0x1044
+#define PCI_DEVICE_ID_DPT		0xa400
+
+#define PCI_VENDOR_ID_OPTI		0x1045
+#define PCI_DEVICE_ID_OPTI_82C558	0xc558
+#define PCI_DEVICE_ID_OPTI_82C621	0xc621
+#define PCI_DEVICE_ID_OPTI_82C700	0xc700
+#define PCI_DEVICE_ID_OPTI_82C825	0xd568
+
+#define PCI_VENDOR_ID_ELSA		0x1048
+#define PCI_DEVICE_ID_ELSA_MICROLINK	0x1000
+#define PCI_DEVICE_ID_ELSA_QS3000	0x3000
+
+
+#define PCI_VENDOR_ID_BUSLOGIC		      0x104B
+#define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC 0x0140
+#define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER    0x1040
+#define PCI_DEVICE_ID_BUSLOGIC_FLASHPOINT     0x8130
+
+#define PCI_VENDOR_ID_TI		0x104c
+#define PCI_DEVICE_ID_TI_TVP4020	0x3d07
+#define PCI_DEVICE_ID_TI_4450		0x8011
+#define PCI_DEVICE_ID_TI_XX21_XX11	0x8031
+#define PCI_DEVICE_ID_TI_X515		0x8036
+#define PCI_DEVICE_ID_TI_1130		0xac12
+#define PCI_DEVICE_ID_TI_1031		0xac13
+#define PCI_DEVICE_ID_TI_1131		0xac15
+#define PCI_DEVICE_ID_TI_1250		0xac16
+#define PCI_DEVICE_ID_TI_1220		0xac17
+#define PCI_DEVICE_ID_TI_1221		0xac19
+#define PCI_DEVICE_ID_TI_1210		0xac1a
+#define PCI_DEVICE_ID_TI_1450		0xac1b
+#define PCI_DEVICE_ID_TI_1225		0xac1c
+#define PCI_DEVICE_ID_TI_1251A		0xac1d
+#define PCI_DEVICE_ID_TI_1211		0xac1e
+#define PCI_DEVICE_ID_TI_1251B		0xac1f
+#define PCI_DEVICE_ID_TI_4410		0xac41
+#define PCI_DEVICE_ID_TI_4451		0xac42
+#define PCI_DEVICE_ID_TI_4510		0xac44
+#define PCI_DEVICE_ID_TI_4520		0xac46
+#define PCI_DEVICE_ID_TI_7510		0xac47
+#define PCI_DEVICE_ID_TI_7610		0xac48
+#define PCI_DEVICE_ID_TI_7410		0xac49
+#define PCI_DEVICE_ID_TI_1410		0xac50
+#define PCI_DEVICE_ID_TI_1420		0xac51
+#define PCI_DEVICE_ID_TI_1451A		0xac52
+#define PCI_DEVICE_ID_TI_1620		0xac54
+#define PCI_DEVICE_ID_TI_1520		0xac55
+#define PCI_DEVICE_ID_TI_1510		0xac56
+#define PCI_DEVICE_ID_TI_X620		0xac8d
+#define PCI_DEVICE_ID_TI_X420		0xac8e
+
+#define PCI_VENDOR_ID_SONY		0x104d
+
+
+/* Winbond have two vendor IDs! See 0x10ad as well */
+#define PCI_VENDOR_ID_WINBOND2		0x1050
+#define PCI_DEVICE_ID_WINBOND2_89C940F	0x5a5a
+#define PCI_DEVICE_ID_WINBOND2_6692	0x6692
+
+#define PCI_VENDOR_ID_ANIGMA		0x1051
+#define PCI_DEVICE_ID_ANIGMA_MC145575	0x0100
+  
+#define PCI_VENDOR_ID_EFAR		0x1055
+#define PCI_DEVICE_ID_EFAR_SLC90E66_1	0x9130
+#define PCI_DEVICE_ID_EFAR_SLC90E66_3	0x9463
+
+#define PCI_VENDOR_ID_MOTOROLA		0x1057
+#define PCI_DEVICE_ID_MOTOROLA_MPC105	0x0001
+#define PCI_DEVICE_ID_MOTOROLA_MPC106	0x0002
+#define PCI_DEVICE_ID_MOTOROLA_MPC107	0x0004
+#define PCI_DEVICE_ID_MOTOROLA_RAVEN	0x4801
+#define PCI_DEVICE_ID_MOTOROLA_FALCON	0x4802
+#define PCI_DEVICE_ID_MOTOROLA_HAWK	0x4803
+#define PCI_DEVICE_ID_MOTOROLA_HARRIER	0x480b
+#define PCI_DEVICE_ID_MOTOROLA_MPC5200	0x5803
+
+#define PCI_VENDOR_ID_PROMISE		0x105a
+#define PCI_DEVICE_ID_PROMISE_20265	0x0d30
+#define PCI_DEVICE_ID_PROMISE_20267	0x4d30
+#define PCI_DEVICE_ID_PROMISE_20246	0x4d33
+#define PCI_DEVICE_ID_PROMISE_20262	0x4d38
+#define PCI_DEVICE_ID_PROMISE_20263	0x0D38
+#define PCI_DEVICE_ID_PROMISE_20268	0x4d68
+#define PCI_DEVICE_ID_PROMISE_20269	0x4d69
+#define PCI_DEVICE_ID_PROMISE_20270	0x6268
+#define PCI_DEVICE_ID_PROMISE_20271	0x6269
+#define PCI_DEVICE_ID_PROMISE_20275	0x1275
+#define PCI_DEVICE_ID_PROMISE_20276	0x5275
+#define PCI_DEVICE_ID_PROMISE_20277	0x7275
+
+
+#define PCI_VENDOR_ID_UMC		0x1060
+#define PCI_DEVICE_ID_UMC_UM8673F	0x0101
+#define PCI_DEVICE_ID_UMC_UM8886BF	0x673a
+#define PCI_DEVICE_ID_UMC_UM8886A	0x886a
+
+
+#define PCI_VENDOR_ID_MYLEX		0x1069
+#define PCI_DEVICE_ID_MYLEX_DAC960_P	0x0001
+#define PCI_DEVICE_ID_MYLEX_DAC960_PD	0x0002
+#define PCI_DEVICE_ID_MYLEX_DAC960_PG	0x0010
+#define PCI_DEVICE_ID_MYLEX_DAC960_LA	0x0020
+#define PCI_DEVICE_ID_MYLEX_DAC960_LP	0x0050
+#define PCI_DEVICE_ID_MYLEX_DAC960_BA	0xBA56
+#define PCI_DEVICE_ID_MYLEX_DAC960_GEM	0xB166
+
+
+#define PCI_VENDOR_ID_APPLE		0x106b
+#define PCI_DEVICE_ID_APPLE_BANDIT	0x0001
+#define PCI_DEVICE_ID_APPLE_HYDRA	0x000e
+#define PCI_DEVICE_ID_APPLE_UNI_N_FW	0x0018
+#define PCI_DEVICE_ID_APPLE_UNI_N_AGP	0x0020
+#define PCI_DEVICE_ID_APPLE_UNI_N_GMAC	0x0021
+#define PCI_DEVICE_ID_APPLE_UNI_N_GMACP	0x0024
+#define PCI_DEVICE_ID_APPLE_UNI_N_AGP_P	0x0027
+#define PCI_DEVICE_ID_APPLE_UNI_N_AGP15	0x002d
+#define PCI_DEVICE_ID_APPLE_UNI_N_PCI15	0x002e
+#define PCI_DEVICE_ID_APPLE_UNI_N_GMAC2	0x0032
+#define PCI_DEVICE_ID_APPLE_UNI_N_ATA	0x0033
+#define PCI_DEVICE_ID_APPLE_UNI_N_AGP2	0x0034
+#define PCI_DEVICE_ID_APPLE_IPID_ATA100	0x003b
+#define PCI_DEVICE_ID_APPLE_K2_ATA100	0x0043
+#define PCI_DEVICE_ID_APPLE_U3_AGP	0x004b
+#define PCI_DEVICE_ID_APPLE_K2_GMAC	0x004c
+#define PCI_DEVICE_ID_APPLE_SH_ATA      0x0050
+#define PCI_DEVICE_ID_APPLE_SH_SUNGEM   0x0051
+#define PCI_DEVICE_ID_APPLE_U3L_AGP	0x0058
+#define PCI_DEVICE_ID_APPLE_U3H_AGP	0x0059
+#define PCI_DEVICE_ID_APPLE_IPID2_AGP	0x0066
+#define PCI_DEVICE_ID_APPLE_IPID2_ATA	0x0069
+#define PCI_DEVICE_ID_APPLE_IPID2_FW	0x006a
+#define PCI_DEVICE_ID_APPLE_IPID2_GMAC	0x006b
+#define PCI_DEVICE_ID_APPLE_TIGON3	0x1645
+
+#define PCI_VENDOR_ID_YAMAHA		0x1073
+#define PCI_DEVICE_ID_YAMAHA_724	0x0004
+#define PCI_DEVICE_ID_YAMAHA_724F	0x000d
+#define PCI_DEVICE_ID_YAMAHA_740	0x000a
+#define PCI_DEVICE_ID_YAMAHA_740C	0x000c
+#define PCI_DEVICE_ID_YAMAHA_744	0x0010
+#define PCI_DEVICE_ID_YAMAHA_754	0x0012
+
+
+#define PCI_VENDOR_ID_QLOGIC		0x1077
+#define PCI_DEVICE_ID_QLOGIC_ISP1020	0x1020
+#define PCI_DEVICE_ID_QLOGIC_ISP2100	0x2100
+#define PCI_DEVICE_ID_QLOGIC_ISP2200	0x2200
+#define PCI_DEVICE_ID_QLOGIC_ISP2300	0x2300
+#define PCI_DEVICE_ID_QLOGIC_ISP2312	0x2312
+#define PCI_DEVICE_ID_QLOGIC_ISP2322	0x2322
+#define PCI_DEVICE_ID_QLOGIC_ISP6312	0x6312
+#define PCI_DEVICE_ID_QLOGIC_ISP6322	0x6322
+#define PCI_DEVICE_ID_QLOGIC_ISP2422	0x2422
+#define PCI_DEVICE_ID_QLOGIC_ISP2432	0x2432
+#define PCI_DEVICE_ID_QLOGIC_ISP2512	0x2512
+#define PCI_DEVICE_ID_QLOGIC_ISP2522	0x2522
+
+#define PCI_VENDOR_ID_CYRIX		0x1078
+#define PCI_DEVICE_ID_CYRIX_5510	0x0000
+#define PCI_DEVICE_ID_CYRIX_PCI_MASTER	0x0001
+#define PCI_DEVICE_ID_CYRIX_5520	0x0002
+#define PCI_DEVICE_ID_CYRIX_5530_LEGACY	0x0100
+#define PCI_DEVICE_ID_CYRIX_5530_IDE	0x0102
+#define PCI_DEVICE_ID_CYRIX_5530_AUDIO	0x0103
+#define PCI_DEVICE_ID_CYRIX_5530_VIDEO	0x0104
+
+
+
+#define PCI_VENDOR_ID_CONTAQ		0x1080
+#define PCI_DEVICE_ID_CONTAQ_82C693	0xc693
+
+
+#define PCI_VENDOR_ID_OLICOM		0x108d
+#define PCI_DEVICE_ID_OLICOM_OC2325	0x0012
+#define PCI_DEVICE_ID_OLICOM_OC2183	0x0013
+#define PCI_DEVICE_ID_OLICOM_OC2326	0x0014
+
+#define PCI_VENDOR_ID_SUN		0x108e
+#define PCI_DEVICE_ID_SUN_EBUS		0x1000
+#define PCI_DEVICE_ID_SUN_HAPPYMEAL	0x1001
+#define PCI_DEVICE_ID_SUN_RIO_EBUS	0x1100
+#define PCI_DEVICE_ID_SUN_RIO_GEM	0x1101
+#define PCI_DEVICE_ID_SUN_RIO_1394	0x1102
+#define PCI_DEVICE_ID_SUN_RIO_USB	0x1103
+#define PCI_DEVICE_ID_SUN_GEM		0x2bad
+#define PCI_DEVICE_ID_SUN_SIMBA		0x5000
+#define PCI_DEVICE_ID_SUN_PBM		0x8000
+#define PCI_DEVICE_ID_SUN_SCHIZO	0x8001
+#define PCI_DEVICE_ID_SUN_SABRE		0xa000
+#define PCI_DEVICE_ID_SUN_HUMMINGBIRD	0xa001
+#define PCI_DEVICE_ID_SUN_TOMATILLO	0xa801
+#define PCI_DEVICE_ID_SUN_CASSINI	0xabba
+
+#define PCI_VENDOR_ID_CMD		0x1095
+#define PCI_DEVICE_ID_CMD_643		0x0643
+#define PCI_DEVICE_ID_CMD_646		0x0646
+#define PCI_DEVICE_ID_CMD_648		0x0648
+#define PCI_DEVICE_ID_CMD_649		0x0649
+
+#define PCI_DEVICE_ID_SII_680		0x0680
+#define PCI_DEVICE_ID_SII_3112		0x3112
+#define PCI_DEVICE_ID_SII_1210SA	0x0240
+
+
+#define PCI_VENDOR_ID_BROOKTREE		0x109e
+#define PCI_DEVICE_ID_BROOKTREE_878	0x0878
+#define PCI_DEVICE_ID_BROOKTREE_879	0x0879
+
+
+#define PCI_VENDOR_ID_SGI		0x10a9
+#define PCI_DEVICE_ID_SGI_IOC3		0x0003
+#define PCI_DEVICE_ID_SGI_IOC4		0x100a
+#define PCI_VENDOR_ID_SGI_LITHIUM	0x1002
+
+
+#define PCI_VENDOR_ID_WINBOND		0x10ad
+#define PCI_DEVICE_ID_WINBOND_82C105	0x0105
+#define PCI_DEVICE_ID_WINBOND_83C553	0x0565
+
+
+#define PCI_VENDOR_ID_PLX		0x10b5
+#define PCI_DEVICE_ID_PLX_R685		0x1030
+#define PCI_DEVICE_ID_PLX_ROMULUS	0x106a
+#define PCI_DEVICE_ID_PLX_SPCOM800	0x1076
+#define PCI_DEVICE_ID_PLX_1077		0x1077
+#define PCI_DEVICE_ID_PLX_SPCOM200	0x1103
+#define PCI_DEVICE_ID_PLX_DJINN_ITOO	0x1151
+#define PCI_DEVICE_ID_PLX_R753		0x1152
+#define PCI_DEVICE_ID_PLX_OLITEC	0x1187
+#define PCI_DEVICE_ID_PLX_9050		0x9050
+#define PCI_DEVICE_ID_PLX_9080		0x9080
+#define PCI_DEVICE_ID_PLX_GTEK_SERIAL2	0xa001
+
+#define PCI_VENDOR_ID_MADGE		0x10b6
+#define PCI_DEVICE_ID_MADGE_MK2		0x0002
+
+#define PCI_VENDOR_ID_3COM		0x10b7
+#define PCI_DEVICE_ID_3COM_3C985	0x0001
+#define PCI_DEVICE_ID_3COM_3C940	0x1700
+#define PCI_DEVICE_ID_3COM_3C339	0x3390
+#define PCI_DEVICE_ID_3COM_3C359	0x3590
+#define PCI_DEVICE_ID_3COM_3C940B	0x80eb
+#define PCI_DEVICE_ID_3COM_3CR990	0x9900
+#define PCI_DEVICE_ID_3COM_3CR990_TX_95	0x9902
+#define PCI_DEVICE_ID_3COM_3CR990_TX_97	0x9903
+#define PCI_DEVICE_ID_3COM_3CR990B	0x9904
+#define PCI_DEVICE_ID_3COM_3CR990_FX	0x9905
+#define PCI_DEVICE_ID_3COM_3CR990SVR95	0x9908
+#define PCI_DEVICE_ID_3COM_3CR990SVR97	0x9909
+#define PCI_DEVICE_ID_3COM_3CR990SVR	0x990a
+
+
+#define PCI_VENDOR_ID_AL		0x10b9
+#define PCI_DEVICE_ID_AL_M1533		0x1533
+#define PCI_DEVICE_ID_AL_M1535 		0x1535
+#define PCI_DEVICE_ID_AL_M1541		0x1541
+#define PCI_DEVICE_ID_AL_M1563		0x1563
+#define PCI_DEVICE_ID_AL_M1621		0x1621
+#define PCI_DEVICE_ID_AL_M1631		0x1631
+#define PCI_DEVICE_ID_AL_M1632		0x1632
+#define PCI_DEVICE_ID_AL_M1641		0x1641
+#define PCI_DEVICE_ID_AL_M1644		0x1644
+#define PCI_DEVICE_ID_AL_M1647		0x1647
+#define PCI_DEVICE_ID_AL_M1651		0x1651
+#define PCI_DEVICE_ID_AL_M1671		0x1671
+#define PCI_DEVICE_ID_AL_M1681		0x1681
+#define PCI_DEVICE_ID_AL_M1683		0x1683
+#define PCI_DEVICE_ID_AL_M1689		0x1689
+#define PCI_DEVICE_ID_AL_M5219		0x5219
+#define PCI_DEVICE_ID_AL_M5228		0x5228
+#define PCI_DEVICE_ID_AL_M5229		0x5229
+#define PCI_DEVICE_ID_AL_M5451		0x5451
+#define PCI_DEVICE_ID_AL_M7101		0x7101
+
+
+
+#define PCI_VENDOR_ID_NEOMAGIC		0x10c8
+#define PCI_DEVICE_ID_NEOMAGIC_NM256AV_AUDIO 0x8005
+#define PCI_DEVICE_ID_NEOMAGIC_NM256ZX_AUDIO 0x8006
+#define PCI_DEVICE_ID_NEOMAGIC_NM256XL_PLUS_AUDIO 0x8016
+
+
+#define PCI_VENDOR_ID_TCONRAD		0x10da
+#define PCI_DEVICE_ID_TCONRAD_TOKENRING	0x0508
+
+
+#define PCI_VENDOR_ID_NVIDIA			0x10de
+#define PCI_DEVICE_ID_NVIDIA_TNT		0x0020
+#define PCI_DEVICE_ID_NVIDIA_TNT2		0x0028
+#define PCI_DEVICE_ID_NVIDIA_UTNT2		0x0029
+#define PCI_DEVICE_ID_NVIDIA_TNT_UNKNOWN        0x002a
+#define PCI_DEVICE_ID_NVIDIA_VTNT2		0x002C
+#define PCI_DEVICE_ID_NVIDIA_UVTNT2		0x002D
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE	0x0035
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA	0x0036
+#define PCI_DEVICE_ID_NVIDIA_NVENET_10		0x0037
+#define PCI_DEVICE_ID_NVIDIA_NVENET_11		0x0038
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2	0x003e
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_ULTRA 0x0040
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800       0x0041
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_LE    0x0042
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_GT    0x0045
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_4000     0x004E
+#define PCI_DEVICE_ID_NVIDIA_NFORCE4_SMBUS	0x0052
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE	0x0053
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA	0x0054
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2	0x0055
+#define PCI_DEVICE_ID_NVIDIA_NVENET_8		0x0056
+#define PCI_DEVICE_ID_NVIDIA_NVENET_9		0x0057
+#define PCI_DEVICE_ID_NVIDIA_CK804_AUDIO	0x0059
+#define PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS	0x0064
+#define PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE	0x0065
+#define PCI_DEVICE_ID_NVIDIA_NVENET_2		0x0066
+#define PCI_DEVICE_ID_NVIDIA_MCP2_MODEM		0x0069
+#define PCI_DEVICE_ID_NVIDIA_MCP2_AUDIO		0x006a
+#define PCI_DEVICE_ID_NVIDIA_NFORCE2S_SMBUS	0x0084
+#define PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE	0x0085
+#define PCI_DEVICE_ID_NVIDIA_NVENET_4		0x0086
+#define PCI_DEVICE_ID_NVIDIA_MCP2S_MODEM	0x0089
+#define PCI_DEVICE_ID_NVIDIA_CK8_AUDIO		0x008a
+#define PCI_DEVICE_ID_NVIDIA_NVENET_5		0x008c
+#define PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA	0x008e
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_7800_GT   0x0090
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_7800_GTX	0x0091
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_7800   0x0098
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_7800_GTX 0x0099
+#define PCI_DEVICE_ID_NVIDIA_ITNT2		0x00A0
+#define PCI_DEVICE_ID_GEFORCE_6800A             0x00c1
+#define PCI_DEVICE_ID_GEFORCE_6800A_LE          0x00c2
+#define PCI_DEVICE_ID_GEFORCE_GO_6800           0x00c8
+#define PCI_DEVICE_ID_GEFORCE_GO_6800_ULTRA     0x00c9
+#define PCI_DEVICE_ID_QUADRO_FX_GO1400          0x00cc
+#define PCI_DEVICE_ID_QUADRO_FX_1400            0x00ce
+#define PCI_DEVICE_ID_NVIDIA_NFORCE3		0x00d1
+#define PCI_DEVICE_ID_NVIDIA_NFORCE3_SMBUS	0x00d4
+#define PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE	0x00d5
+#define PCI_DEVICE_ID_NVIDIA_NVENET_3		0x00d6
+#define PCI_DEVICE_ID_NVIDIA_MCP3_MODEM		0x00d9
+#define PCI_DEVICE_ID_NVIDIA_MCP3_AUDIO		0x00da
+#define PCI_DEVICE_ID_NVIDIA_NVENET_7		0x00df
+#define PCI_DEVICE_ID_NVIDIA_NFORCE3S		0x00e1
+#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA	0x00e3
+#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SMBUS	0x00e4
+#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE	0x00e5
+#define PCI_DEVICE_ID_NVIDIA_NVENET_6		0x00e6
+#define PCI_DEVICE_ID_NVIDIA_CK8S_AUDIO		0x00ea
+#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2	0x00ee
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_SDR	0x0100
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_DDR	0x0101
+#define PCI_DEVICE_ID_NVIDIA_QUADRO		0x0103
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_MX	0x0110
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_MX2	0x0111
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_GO	0x0112
+#define PCI_DEVICE_ID_NVIDIA_QUADRO2_MXR	0x0113
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6600_GT	0x0140
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6600	0x0141
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6610_XL	0x0145
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_540	0x014E
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6200	0x014F
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_GTS	0x0150
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_GTS2	0x0151
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE2_ULTRA	0x0152
+#define PCI_DEVICE_ID_NVIDIA_QUADRO2_PRO	0x0153
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6200_TURBOCACHE 0x0161
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6200    0x0164
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6250    0x0166
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6200_1  0x0167
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_GO_6250_1  0x0168
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_460	0x0170
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440	0x0171
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_420	0x0172
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440_SE	0x0173
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_440_GO	0x0174
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_420_GO	0x0175
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_420_GO_M32 0x0176
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_460_GO    0x0177
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_500XGL	0x0178
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_440_GO_M64 0x0179
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_200	0x017A
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_550XGL	0x017B
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_500_GOGL	0x017C
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_410_GO_M16 0x017D
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440_8X 0x0181
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_440SE_8X 0x0182
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_420_8X 0x0183
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_448_GO    0x0186
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_488_GO    0x0187
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_580_XGL    0x0188
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_MX_MAC    0x0189
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_280_NVS    0x018A
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_380_XGL    0x018B
+#define PCI_DEVICE_ID_NVIDIA_IGEFORCE2		0x01a0
+#define PCI_DEVICE_ID_NVIDIA_NFORCE		0x01a4
+#define PCI_DEVICE_ID_NVIDIA_MCP1_AUDIO		0x01b1
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_SMBUS	0x01b4
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_IDE		0x01bc
+#define PCI_DEVICE_ID_NVIDIA_MCP1_MODEM		0x01c1
+#define PCI_DEVICE_ID_NVIDIA_NVENET_1		0x01c3
+#define PCI_DEVICE_ID_NVIDIA_NFORCE2		0x01e0
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE3		0x0200
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE3_1		0x0201
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE3_2		0x0202
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_DDC		0x0203
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800B      0x0211
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800B_LE   0x0212
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800B_GT   0x0215
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4600	0x0250
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4400	0x0251
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4200	0x0253
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_900XGL	0x0258
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_750XGL	0x0259
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_700XGL	0x025B
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE	0x0265
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA	0x0266
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2	0x0267
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE	0x036E
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA	0x037E
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2	0x037F
+#define PCI_DEVICE_ID_NVIDIA_NVENET_12		0x0268
+#define PCI_DEVICE_ID_NVIDIA_NVENET_13		0x0269
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800	0x0280
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800_8X    0x0281
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800SE     0x0282
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_4200_GO       0x0286
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_980_XGL        0x0288
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_780_XGL        0x0289
+#define PCI_DEVICE_ID_NVIDIA_QUADRO4_700_GOGL       0x028C
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5800_ULTRA  0x0301
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5800        0x0302
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_2000         0x0308
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1000         0x0309
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5600_ULTRA  0x0311
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5600        0x0312
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5600SE      0x0314
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5600      0x031A
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5650      0x031B
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_GO700        0x031C
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200        0x0320
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200_ULTRA  0x0321
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200_1      0x0322
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5200SE      0x0323
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5200      0x0324
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5250      0x0325
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5500        0x0326
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5100        0x0327
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5250_32   0x0328
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO_5200	    0x0329
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_NVS_280_PCI     0x032A
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_500          0x032B
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5300      0x032C
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5100      0x032D
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900_ULTRA  0x0330
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900        0x0331
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900XT      0x0332
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5950_ULTRA  0x0333
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5900ZT      0x0334
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_3000         0x0338
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_700          0x033F
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700_ULTRA  0x0341
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700        0x0342
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700LE      0x0343
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_5700VE      0x0344
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5700_1    0x0347
+#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5700_2    0x0348
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_GO1000       0x034C
+#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1100         0x034E
+#define PCI_DEVICE_ID_NVIDIA_NVENET_14              0x0372
+#define PCI_DEVICE_ID_NVIDIA_NVENET_15              0x0373
+
+#define PCI_VENDOR_ID_IMS		0x10e0
+#define PCI_DEVICE_ID_IMS_TT128		0x9128
+#define PCI_DEVICE_ID_IMS_TT3D		0x9135
+
+
+
+
+#define PCI_VENDOR_ID_INTERG		0x10ea
+#define PCI_DEVICE_ID_INTERG_1682	0x1682
+#define PCI_DEVICE_ID_INTERG_2000	0x2000
+#define PCI_DEVICE_ID_INTERG_2010	0x2010
+#define PCI_DEVICE_ID_INTERG_5000	0x5000
+#define PCI_DEVICE_ID_INTERG_5050	0x5050
+
+#define PCI_VENDOR_ID_REALTEK		0x10ec
+#define PCI_DEVICE_ID_REALTEK_8139	0x8139
+
+#define PCI_VENDOR_ID_XILINX		0x10ee
+#define PCI_DEVICE_ID_RME_DIGI96	0x3fc0
+#define PCI_DEVICE_ID_RME_DIGI96_8	0x3fc1
+#define PCI_DEVICE_ID_RME_DIGI96_8_PRO	0x3fc2
+#define PCI_DEVICE_ID_RME_DIGI96_8_PAD_OR_PST 0x3fc3
+#define PCI_DEVICE_ID_XILINX_HAMMERFALL_DSP 0x3fc5
+#define PCI_DEVICE_ID_XILINX_HAMMERFALL_DSP_MADI 0x3fc6
+
+
+#define PCI_VENDOR_ID_INIT		0x1101
+
+#define PCI_VENDOR_ID_CREATIVE		0x1102 /* duplicate: ECTIVA */
+#define PCI_DEVICE_ID_CREATIVE_EMU10K1	0x0002
+
+#define PCI_VENDOR_ID_ECTIVA		0x1102 /* duplicate: CREATIVE */
+#define PCI_DEVICE_ID_ECTIVA_EV1938	0x8938
+
+#define PCI_VENDOR_ID_TTI		0x1103
+#define PCI_DEVICE_ID_TTI_HPT343	0x0003
+#define PCI_DEVICE_ID_TTI_HPT366	0x0004
+#define PCI_DEVICE_ID_TTI_HPT372	0x0005
+#define PCI_DEVICE_ID_TTI_HPT302	0x0006
+#define PCI_DEVICE_ID_TTI_HPT371	0x0007
+#define PCI_DEVICE_ID_TTI_HPT374	0x0008
+#define PCI_DEVICE_ID_TTI_HPT372N	0x0009	/* apparently a 372N variant? */
+
+#define PCI_VENDOR_ID_VIA		0x1106
+#define PCI_DEVICE_ID_VIA_8763_0	0x0198
+#define PCI_DEVICE_ID_VIA_8380_0	0x0204
+#define PCI_DEVICE_ID_VIA_3238_0	0x0238
+#define PCI_DEVICE_ID_VIA_PT880		0x0258
+#define PCI_DEVICE_ID_VIA_PX8X0_0	0x0259
+#define PCI_DEVICE_ID_VIA_3269_0	0x0269
+#define PCI_DEVICE_ID_VIA_K8T800PRO_0	0x0282
+#define PCI_DEVICE_ID_VIA_8363_0	0x0305
+#define PCI_DEVICE_ID_VIA_P4M800CE	0x0314
+#define PCI_DEVICE_ID_VIA_8371_0	0x0391
+#define PCI_DEVICE_ID_VIA_8501_0	0x0501
+#define PCI_DEVICE_ID_VIA_82C561	0x0561
+#define PCI_DEVICE_ID_VIA_82C586_1	0x0571
+#define PCI_DEVICE_ID_VIA_82C576	0x0576
+#define PCI_DEVICE_ID_VIA_82C586_0	0x0586
+#define PCI_DEVICE_ID_VIA_82C596	0x0596
+#define PCI_DEVICE_ID_VIA_82C597_0	0x0597
+#define PCI_DEVICE_ID_VIA_82C598_0	0x0598
+#define PCI_DEVICE_ID_VIA_8601_0	0x0601
+#define PCI_DEVICE_ID_VIA_8605_0	0x0605
+#define PCI_DEVICE_ID_VIA_82C686	0x0686
+#define PCI_DEVICE_ID_VIA_82C691_0	0x0691
+#define PCI_DEVICE_ID_VIA_82C576_1	0x1571
+#define PCI_DEVICE_ID_VIA_82C586_2	0x3038
+#define PCI_DEVICE_ID_VIA_82C586_3	0x3040
+#define PCI_DEVICE_ID_VIA_82C596_3	0x3050
+#define PCI_DEVICE_ID_VIA_82C596B_3	0x3051
+#define PCI_DEVICE_ID_VIA_82C686_4	0x3057
+#define PCI_DEVICE_ID_VIA_82C686_5	0x3058
+#define PCI_DEVICE_ID_VIA_8233_5	0x3059
+#define PCI_DEVICE_ID_VIA_8233_0	0x3074
+#define PCI_DEVICE_ID_VIA_8633_0	0x3091
+#define PCI_DEVICE_ID_VIA_8367_0	0x3099
+#define PCI_DEVICE_ID_VIA_8653_0	0x3101
+#define PCI_DEVICE_ID_VIA_8622		0x3102
+#define PCI_DEVICE_ID_VIA_8233C_0	0x3109
+#define PCI_DEVICE_ID_VIA_8361		0x3112
+#define PCI_DEVICE_ID_VIA_XM266		0x3116
+#define PCI_DEVICE_ID_VIA_612X		0x3119
+#define PCI_DEVICE_ID_VIA_862X_0	0x3123
+#define PCI_DEVICE_ID_VIA_8753_0	0x3128
+#define PCI_DEVICE_ID_VIA_8233A		0x3147
+#define PCI_DEVICE_ID_VIA_8703_51_0	0x3148
+#define PCI_DEVICE_ID_VIA_8237_SATA	0x3149
+#define PCI_DEVICE_ID_VIA_XN266		0x3156
+#define PCI_DEVICE_ID_VIA_6410		0x3164
+#define PCI_DEVICE_ID_VIA_8754C_0	0x3168
+#define PCI_DEVICE_ID_VIA_8235		0x3177
+#define PCI_DEVICE_ID_VIA_8385_0	0x3188
+#define PCI_DEVICE_ID_VIA_8377_0	0x3189
+#define PCI_DEVICE_ID_VIA_8378_0	0x3205
+#define PCI_DEVICE_ID_VIA_8783_0	0x3208
+#define PCI_DEVICE_ID_VIA_8237		0x3227
+#define PCI_DEVICE_ID_VIA_8251		0x3287
+#define PCI_DEVICE_ID_VIA_3296_0	0x0296
+#define PCI_DEVICE_ID_VIA_8231		0x8231
+#define PCI_DEVICE_ID_VIA_8231_4	0x8235
+#define PCI_DEVICE_ID_VIA_8365_1	0x8305
+#define PCI_DEVICE_ID_VIA_8371_1	0x8391
+#define PCI_DEVICE_ID_VIA_82C598_1	0x8598
+#define PCI_DEVICE_ID_VIA_838X_1	0xB188
+#define PCI_DEVICE_ID_VIA_83_87XX_1	0xB198
+
+#define PCI_VENDOR_ID_SIEMENS           0x110A
+#define PCI_DEVICE_ID_SIEMENS_DSCC4     0x2102
+
+
+#define PCI_VENDOR_ID_VORTEX		0x1119
+#define PCI_DEVICE_ID_VORTEX_GDT60x0	0x0000
+#define PCI_DEVICE_ID_VORTEX_GDT6000B	0x0001
+#define PCI_DEVICE_ID_VORTEX_GDT6x10	0x0002
+#define PCI_DEVICE_ID_VORTEX_GDT6x20	0x0003
+#define PCI_DEVICE_ID_VORTEX_GDT6530	0x0004
+#define PCI_DEVICE_ID_VORTEX_GDT6550	0x0005
+#define PCI_DEVICE_ID_VORTEX_GDT6x17	0x0006
+#define PCI_DEVICE_ID_VORTEX_GDT6x27	0x0007
+#define PCI_DEVICE_ID_VORTEX_GDT6537	0x0008
+#define PCI_DEVICE_ID_VORTEX_GDT6557	0x0009
+#define PCI_DEVICE_ID_VORTEX_GDT6x15	0x000a
+#define PCI_DEVICE_ID_VORTEX_GDT6x25	0x000b
+#define PCI_DEVICE_ID_VORTEX_GDT6535	0x000c
+#define PCI_DEVICE_ID_VORTEX_GDT6555	0x000d
+#define PCI_DEVICE_ID_VORTEX_GDT6x17RP	0x0100
+#define PCI_DEVICE_ID_VORTEX_GDT6x27RP	0x0101
+#define PCI_DEVICE_ID_VORTEX_GDT6537RP	0x0102
+#define PCI_DEVICE_ID_VORTEX_GDT6557RP	0x0103
+#define PCI_DEVICE_ID_VORTEX_GDT6x11RP	0x0104
+#define PCI_DEVICE_ID_VORTEX_GDT6x21RP	0x0105
+
+#define PCI_VENDOR_ID_EF		0x111a
+#define PCI_DEVICE_ID_EF_ATM_FPGA	0x0000
+#define PCI_DEVICE_ID_EF_ATM_ASIC	0x0002
+#define PCI_VENDOR_ID_EF_ATM_LANAI2	0x0003
+#define PCI_VENDOR_ID_EF_ATM_LANAIHB	0x0005
+
+#define PCI_VENDOR_ID_IDT		0x111d
+#define PCI_DEVICE_ID_IDT_IDT77201	0x0001
+
+#define PCI_VENDOR_ID_FORE		0x1127
+#define PCI_DEVICE_ID_FORE_PCA200E	0x0300
+
+
+#define PCI_VENDOR_ID_PHILIPS		0x1131
+#define PCI_DEVICE_ID_PHILIPS_SAA7146	0x7146
+#define PCI_DEVICE_ID_PHILIPS_SAA9730	0x9730
+
+#define PCI_VENDOR_ID_EICON		0x1133
+#define PCI_DEVICE_ID_EICON_DIVA20	0xe002
+#define PCI_DEVICE_ID_EICON_DIVA20_U	0xe004
+#define PCI_DEVICE_ID_EICON_DIVA201	0xe005
+#define PCI_DEVICE_ID_EICON_DIVA202	0xe00b
+#define PCI_DEVICE_ID_EICON_MAESTRA	0xe010
+#define PCI_DEVICE_ID_EICON_MAESTRAQ	0xe012
+#define PCI_DEVICE_ID_EICON_MAESTRAQ_U	0xe013
+#define PCI_DEVICE_ID_EICON_MAESTRAP	0xe014
+
+#define PCI_VENDOR_ID_ZIATECH		0x1138
+#define PCI_DEVICE_ID_ZIATECH_5550_HC	0x5550
+ 
+
+
+#define PCI_VENDOR_ID_SYSKONNECT	0x1148
+#define PCI_DEVICE_ID_SYSKONNECT_TR	0x4200
+#define PCI_DEVICE_ID_SYSKONNECT_GE	0x4300
+#define PCI_DEVICE_ID_SYSKONNECT_YU	0x4320
+#define PCI_DEVICE_ID_SYSKONNECT_9DXX	0x4400
+#define PCI_DEVICE_ID_SYSKONNECT_9MXX	0x4500
+
+
+#define PCI_VENDOR_ID_DIGI		0x114f
+#define PCI_DEVICE_ID_DIGI_DF_M_IOM2_E	0x0070
+#define PCI_DEVICE_ID_DIGI_DF_M_E	0x0071
+#define PCI_DEVICE_ID_DIGI_DF_M_IOM2_A	0x0072
+#define PCI_DEVICE_ID_DIGI_DF_M_A	0x0073
+#define PCI_DEVICE_ID_NEO_2DB9          0x00C8
+#define PCI_DEVICE_ID_NEO_2DB9PRI       0x00C9
+#define PCI_DEVICE_ID_NEO_2RJ45         0x00CA
+#define PCI_DEVICE_ID_NEO_2RJ45PRI      0x00CB
+
+
+#define PCI_VENDOR_ID_XIRCOM		0x115d
+#define PCI_DEVICE_ID_XIRCOM_RBM56G	0x0101
+#define PCI_DEVICE_ID_XIRCOM_X3201_MDM	0x0103
+
+
+#define PCI_VENDOR_ID_SERVERWORKS	  0x1166
+#define PCI_DEVICE_ID_SERVERWORKS_HE	  0x0008
+#define PCI_DEVICE_ID_SERVERWORKS_LE	  0x0009
+#define PCI_DEVICE_ID_SERVERWORKS_GCNB_LE 0x0017
+#define PCI_DEVICE_ID_SERVERWORKS_OSB4	  0x0200
+#define PCI_DEVICE_ID_SERVERWORKS_CSB5	  0x0201
+#define PCI_DEVICE_ID_SERVERWORKS_CSB6    0x0203
+#define PCI_DEVICE_ID_SERVERWORKS_OSB4IDE 0x0211
+#define PCI_DEVICE_ID_SERVERWORKS_CSB5IDE 0x0212
+#define PCI_DEVICE_ID_SERVERWORKS_CSB6IDE 0x0213
+#define PCI_DEVICE_ID_SERVERWORKS_HT1000IDE 0x0214
+#define PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2 0x0217
+#define PCI_DEVICE_ID_SERVERWORKS_CSB6LPC 0x0227
+
+#define PCI_VENDOR_ID_SBE		0x1176
+#define PCI_DEVICE_ID_SBE_WANXL100	0x0301
+#define PCI_DEVICE_ID_SBE_WANXL200	0x0302
+#define PCI_DEVICE_ID_SBE_WANXL400	0x0104
+
+#define PCI_VENDOR_ID_TOSHIBA		0x1179
+#define PCI_DEVICE_ID_TOSHIBA_PICCOLO	0x0102
+#define PCI_DEVICE_ID_TOSHIBA_PICCOLO_1	0x0103
+#define PCI_DEVICE_ID_TOSHIBA_PICCOLO_2	0x0105
+#define PCI_DEVICE_ID_TOSHIBA_TOPIC95	0x060a
+#define PCI_DEVICE_ID_TOSHIBA_TOPIC97	0x060f
+#define PCI_DEVICE_ID_TOSHIBA_TOPIC100	0x0617
+
+#define PCI_VENDOR_ID_TOSHIBA_2		0x102f
+#define PCI_DEVICE_ID_TOSHIBA_TC35815CF	0x0030
+#define PCI_DEVICE_ID_TOSHIBA_TC86C001_MISC	0x0108
+#define PCI_DEVICE_ID_TOSHIBA_SPIDER_NET 0x01b3
+
+#define PCI_VENDOR_ID_RICOH		0x1180
+#define PCI_DEVICE_ID_RICOH_RL5C465	0x0465
+#define PCI_DEVICE_ID_RICOH_RL5C466	0x0466
+#define PCI_DEVICE_ID_RICOH_RL5C475	0x0475
+#define PCI_DEVICE_ID_RICOH_RL5C476	0x0476
+#define PCI_DEVICE_ID_RICOH_RL5C478	0x0478
+
+#define PCI_VENDOR_ID_DLINK		0x1186
+#define PCI_DEVICE_ID_DLINK_DGE510T	0x4c00
+
+#define PCI_VENDOR_ID_ARTOP		0x1191
+#define PCI_DEVICE_ID_ARTOP_ATP850UF	0x0005
+#define PCI_DEVICE_ID_ARTOP_ATP860	0x0006
+#define PCI_DEVICE_ID_ARTOP_ATP860R	0x0007
+#define PCI_DEVICE_ID_ARTOP_ATP865	0x0008
+#define PCI_DEVICE_ID_ARTOP_ATP865R	0x0009
+#define PCI_DEVICE_ID_ARTOP_AEC7610	0x8002
+#define PCI_DEVICE_ID_ARTOP_AEC7612UW	0x8010
+#define PCI_DEVICE_ID_ARTOP_AEC7612U	0x8020
+#define PCI_DEVICE_ID_ARTOP_AEC7612S	0x8030
+#define PCI_DEVICE_ID_ARTOP_AEC7612D	0x8040
+#define PCI_DEVICE_ID_ARTOP_AEC7612SUW	0x8050
+#define PCI_DEVICE_ID_ARTOP_8060	0x8060
+
+#define PCI_VENDOR_ID_ZEITNET		0x1193
+#define PCI_DEVICE_ID_ZEITNET_1221	0x0001
+#define PCI_DEVICE_ID_ZEITNET_1225	0x0002
+
+
+#define PCI_VENDOR_ID_FUJITSU_ME	0x119e
+#define PCI_DEVICE_ID_FUJITSU_FS155	0x0001
+#define PCI_DEVICE_ID_FUJITSU_FS50	0x0003
+
+#define PCI_SUBVENDOR_ID_KEYSPAN	0x11a9
+#define PCI_SUBDEVICE_ID_KEYSPAN_SX2	0x5334
+
+#define PCI_VENDOR_ID_MARVELL		0x11ab
+#define PCI_DEVICE_ID_MARVELL_GT64111	0x4146
+#define PCI_DEVICE_ID_MARVELL_GT64260	0x6430
+#define PCI_DEVICE_ID_MARVELL_MV64360	0x6460
+#define PCI_DEVICE_ID_MARVELL_MV64460	0x6480
+#define PCI_DEVICE_ID_MARVELL_GT96100	0x9652
+#define PCI_DEVICE_ID_MARVELL_GT96100A	0x9653
+
+
+#define PCI_VENDOR_ID_V3		0x11b0
+#define PCI_DEVICE_ID_V3_V960		0x0001
+#define PCI_DEVICE_ID_V3_V351		0x0002
+
+
+#define PCI_VENDOR_ID_ATT		0x11c1
+#define PCI_DEVICE_ID_ATT_VENUS_MODEM	0x480
+
+
+#define PCI_VENDOR_ID_SPECIALIX		0x11cb
+#define PCI_DEVICE_ID_SPECIALIX_IO8	0x2000
+#define PCI_DEVICE_ID_SPECIALIX_RIO	0x8000
+#define PCI_SUBDEVICE_ID_SPECIALIX_SPEED4 0xa004
+
+
+#define PCI_VENDOR_ID_ANALOG_DEVICES	0x11d4
+#define PCI_DEVICE_ID_AD1889JS		0x1889
+
+
+#define PCI_DEVICE_ID_SEGA_BBA		0x1234
+
+#define PCI_VENDOR_ID_ZORAN		0x11de
+#define PCI_DEVICE_ID_ZORAN_36057	0x6057
+#define PCI_DEVICE_ID_ZORAN_36120	0x6120
+
+
+#define PCI_VENDOR_ID_COMPEX		0x11f6
+#define PCI_DEVICE_ID_COMPEX_ENET100VG4	0x0112
+
+#define PCI_VENDOR_ID_RP		0x11fe
+#define PCI_DEVICE_ID_RP32INTF		0x0001
+#define PCI_DEVICE_ID_RP8INTF		0x0002
+#define PCI_DEVICE_ID_RP16INTF		0x0003
+#define PCI_DEVICE_ID_RP4QUAD		0x0004
+#define PCI_DEVICE_ID_RP8OCTA		0x0005
+#define PCI_DEVICE_ID_RP8J		0x0006
+#define PCI_DEVICE_ID_RP4J		0x0007
+#define PCI_DEVICE_ID_RP8SNI		0x0008	
+#define PCI_DEVICE_ID_RP16SNI		0x0009	
+#define PCI_DEVICE_ID_RPP4		0x000A
+#define PCI_DEVICE_ID_RPP8		0x000B
+#define PCI_DEVICE_ID_RP4M		0x000D
+#define PCI_DEVICE_ID_RP2_232		0x000E
+#define PCI_DEVICE_ID_RP2_422		0x000F
+#define PCI_DEVICE_ID_URP32INTF		0x0801
+#define PCI_DEVICE_ID_URP8INTF		0x0802
+#define PCI_DEVICE_ID_URP16INTF		0x0803
+#define PCI_DEVICE_ID_URP8OCTA		0x0805
+#define PCI_DEVICE_ID_UPCI_RM3_8PORT	0x080C       
+#define PCI_DEVICE_ID_UPCI_RM3_4PORT	0x080D
+#define PCI_DEVICE_ID_CRP16INTF		0x0903       
+
+#define PCI_VENDOR_ID_CYCLADES		0x120e
+#define PCI_DEVICE_ID_CYCLOM_Y_Lo	0x0100
+#define PCI_DEVICE_ID_CYCLOM_Y_Hi	0x0101
+#define PCI_DEVICE_ID_CYCLOM_4Y_Lo	0x0102
+#define PCI_DEVICE_ID_CYCLOM_4Y_Hi	0x0103
+#define PCI_DEVICE_ID_CYCLOM_8Y_Lo	0x0104
+#define PCI_DEVICE_ID_CYCLOM_8Y_Hi	0x0105
+#define PCI_DEVICE_ID_CYCLOM_Z_Lo	0x0200
+#define PCI_DEVICE_ID_CYCLOM_Z_Hi	0x0201
+#define PCI_DEVICE_ID_PC300_RX_2	0x0300
+#define PCI_DEVICE_ID_PC300_RX_1	0x0301
+#define PCI_DEVICE_ID_PC300_TE_2	0x0310
+#define PCI_DEVICE_ID_PC300_TE_1	0x0311
+#define PCI_DEVICE_ID_PC300_TE_M_2	0x0320
+#define PCI_DEVICE_ID_PC300_TE_M_1	0x0321
+
+#define PCI_VENDOR_ID_ESSENTIAL		0x120f
+#define PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER	0x0001
+
+#define PCI_VENDOR_ID_O2		0x1217
+#define PCI_DEVICE_ID_O2_6729		0x6729
+#define PCI_DEVICE_ID_O2_6730		0x673a
+#define PCI_DEVICE_ID_O2_6832		0x6832
+#define PCI_DEVICE_ID_O2_6836		0x6836
+
+#define PCI_VENDOR_ID_3DFX		0x121a
+#define PCI_DEVICE_ID_3DFX_VOODOO	0x0001
+#define PCI_DEVICE_ID_3DFX_VOODOO2	0x0002
+#define PCI_DEVICE_ID_3DFX_BANSHEE	0x0003
+#define PCI_DEVICE_ID_3DFX_VOODOO3	0x0005
+#define PCI_DEVICE_ID_3DFX_VOODOO5	0x0009
+
+
+
+#define PCI_VENDOR_ID_AVM		0x1244
+#define PCI_DEVICE_ID_AVM_B1		0x0700
+#define PCI_DEVICE_ID_AVM_C4		0x0800
+#define PCI_DEVICE_ID_AVM_A1		0x0a00
+#define PCI_DEVICE_ID_AVM_A1_V2		0x0e00
+#define PCI_DEVICE_ID_AVM_C2		0x1100
+#define PCI_DEVICE_ID_AVM_T1		0x1200
+
+
+#define PCI_VENDOR_ID_STALLION		0x124d
+
+/* Allied Telesyn */
+#define PCI_VENDOR_ID_AT    		0x1259
+#define PCI_SUBDEVICE_ID_AT_2700FX	0x2701
+#define PCI_SUBDEVICE_ID_AT_2701FX	0x2703
+
+#define PCI_VENDOR_ID_ESS		0x125d
+#define PCI_DEVICE_ID_ESS_ESS1968	0x1968
+#define PCI_DEVICE_ID_ESS_ESS1978	0x1978
+#define PCI_DEVICE_ID_ESS_ALLEGRO_1	0x1988
+#define PCI_DEVICE_ID_ESS_ALLEGRO	0x1989
+#define PCI_DEVICE_ID_ESS_CANYON3D_2LE	0x1990
+#define PCI_DEVICE_ID_ESS_CANYON3D_2	0x1992
+#define PCI_DEVICE_ID_ESS_MAESTRO3	0x1998
+#define PCI_DEVICE_ID_ESS_MAESTRO3_1	0x1999
+#define PCI_DEVICE_ID_ESS_MAESTRO3_HW	0x199a
+#define PCI_DEVICE_ID_ESS_MAESTRO3_2	0x199b
+
+#define PCI_VENDOR_ID_SATSAGEM		0x1267
+#define PCI_DEVICE_ID_SATSAGEM_NICCY	0x1016
+
+
+#define PCI_VENDOR_ID_ENSONIQ		0x1274
+#define PCI_DEVICE_ID_ENSONIQ_CT5880	0x5880
+#define PCI_DEVICE_ID_ENSONIQ_ES1370	0x5000
+#define PCI_DEVICE_ID_ENSONIQ_ES1371	0x1371
+
+#define PCI_VENDOR_ID_TRANSMETA		0x1279
+#define PCI_DEVICE_ID_EFFICEON		0x0060
+
+#define PCI_VENDOR_ID_ROCKWELL		0x127A
+
+#define PCI_VENDOR_ID_ITE		0x1283
+#define PCI_DEVICE_ID_ITE_IT8172G	0x8172
+#define PCI_DEVICE_ID_ITE_IT8172G_AUDIO 0x0801
+#define PCI_DEVICE_ID_ITE_8211		0x8211
+#define PCI_DEVICE_ID_ITE_8212		0x8212
+#define PCI_DEVICE_ID_ITE_8872		0x8872
+#define PCI_DEVICE_ID_ITE_IT8330G_0	0xe886
+
+/* formerly Platform Tech */
+#define PCI_DEVICE_ID_ESS_ESS0100	0x0100
+
+#define PCI_VENDOR_ID_ALTEON		0x12ae
+
+
+#define PCI_SUBVENDOR_ID_CONNECT_TECH			0x12c4
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_232		0x0001
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_232		0x0002
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_232		0x0003
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485		0x0004
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485_4_4	0x0005
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_485		0x0006
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH4_485_2_2	0x0007
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH2_485		0x0008
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH8_485_2_6	0x0009
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH081101V1	0x000A
+#define PCI_SUBDEVICE_ID_CONNECT_TECH_BH041101V1	0x000B
+
+
+#define PCI_VENDOR_ID_NVIDIA_SGS	0x12d2
+#define PCI_DEVICE_ID_NVIDIA_SGS_RIVA128 0x0018
+
+#define PCI_SUBVENDOR_ID_CHASE_PCIFAST		0x12E0
+#define PCI_SUBDEVICE_ID_CHASE_PCIFAST4		0x0031
+#define PCI_SUBDEVICE_ID_CHASE_PCIFAST8		0x0021
+#define PCI_SUBDEVICE_ID_CHASE_PCIFAST16	0x0011
+#define PCI_SUBDEVICE_ID_CHASE_PCIFAST16FMC	0x0041
+#define PCI_SUBVENDOR_ID_CHASE_PCIRAS		0x124D
+#define PCI_SUBDEVICE_ID_CHASE_PCIRAS4		0xF001
+#define PCI_SUBDEVICE_ID_CHASE_PCIRAS8		0xF010
+
+#define PCI_VENDOR_ID_AUREAL		0x12eb
+#define PCI_DEVICE_ID_AUREAL_VORTEX_1	0x0001
+#define PCI_DEVICE_ID_AUREAL_VORTEX_2	0x0002
+#define PCI_DEVICE_ID_AUREAL_ADVANTAGE	0x0003
+
+#define PCI_VENDOR_ID_ELECTRONICDESIGNGMBH 0x12f8
+#define PCI_DEVICE_ID_LML_33R10		0x8a02
+
+
+#define PCI_VENDOR_ID_SIIG		0x131f
+#define PCI_SUBVENDOR_ID_SIIG		0x131f
+#define PCI_DEVICE_ID_SIIG_1S_10x_550	0x1000
+#define PCI_DEVICE_ID_SIIG_1S_10x_650	0x1001
+#define PCI_DEVICE_ID_SIIG_1S_10x_850	0x1002
+#define PCI_DEVICE_ID_SIIG_1S1P_10x_550	0x1010
+#define PCI_DEVICE_ID_SIIG_1S1P_10x_650	0x1011
+#define PCI_DEVICE_ID_SIIG_1S1P_10x_850	0x1012
+#define PCI_DEVICE_ID_SIIG_1P_10x	0x1020
+#define PCI_DEVICE_ID_SIIG_2P_10x	0x1021
+#define PCI_DEVICE_ID_SIIG_2S_10x_550	0x1030
+#define PCI_DEVICE_ID_SIIG_2S_10x_650	0x1031
+#define PCI_DEVICE_ID_SIIG_2S_10x_850	0x1032
+#define PCI_DEVICE_ID_SIIG_2S1P_10x_550	0x1034
+#define PCI_DEVICE_ID_SIIG_2S1P_10x_650	0x1035
+#define PCI_DEVICE_ID_SIIG_2S1P_10x_850	0x1036
+#define PCI_DEVICE_ID_SIIG_4S_10x_550	0x1050
+#define PCI_DEVICE_ID_SIIG_4S_10x_650	0x1051
+#define PCI_DEVICE_ID_SIIG_4S_10x_850	0x1052
+#define PCI_DEVICE_ID_SIIG_1S_20x_550	0x2000
+#define PCI_DEVICE_ID_SIIG_1S_20x_650	0x2001
+#define PCI_DEVICE_ID_SIIG_1S_20x_850	0x2002
+#define PCI_DEVICE_ID_SIIG_1P_20x	0x2020
+#define PCI_DEVICE_ID_SIIG_2P_20x	0x2021
+#define PCI_DEVICE_ID_SIIG_2S_20x_550	0x2030
+#define PCI_DEVICE_ID_SIIG_2S_20x_650	0x2031
+#define PCI_DEVICE_ID_SIIG_2S_20x_850	0x2032
+#define PCI_DEVICE_ID_SIIG_2P1S_20x_550	0x2040
+#define PCI_DEVICE_ID_SIIG_2P1S_20x_650	0x2041
+#define PCI_DEVICE_ID_SIIG_2P1S_20x_850	0x2042
+#define PCI_DEVICE_ID_SIIG_1S1P_20x_550	0x2010
+#define PCI_DEVICE_ID_SIIG_1S1P_20x_650	0x2011
+#define PCI_DEVICE_ID_SIIG_1S1P_20x_850	0x2012
+#define PCI_DEVICE_ID_SIIG_4S_20x_550	0x2050
+#define PCI_DEVICE_ID_SIIG_4S_20x_650	0x2051
+#define PCI_DEVICE_ID_SIIG_4S_20x_850	0x2052
+#define PCI_DEVICE_ID_SIIG_2S1P_20x_550	0x2060
+#define PCI_DEVICE_ID_SIIG_2S1P_20x_650	0x2061
+#define PCI_DEVICE_ID_SIIG_2S1P_20x_850	0x2062
+#define PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL	0x2050
+
+#define PCI_VENDOR_ID_RADISYS		0x1331
+
+#define PCI_VENDOR_ID_DOMEX		0x134a
+#define PCI_DEVICE_ID_DOMEX_DMX3191D	0x0001
+
+#define PCI_VENDOR_ID_QUATECH		0x135C
+#define PCI_DEVICE_ID_QUATECH_QSC100	0x0010
+#define PCI_DEVICE_ID_QUATECH_DSC100	0x0020
+#define PCI_DEVICE_ID_QUATECH_ESC100D	0x0050
+#define PCI_DEVICE_ID_QUATECH_ESC100M	0x0060
+
+#define PCI_VENDOR_ID_SEALEVEL		0x135e
+#define PCI_DEVICE_ID_SEALEVEL_U530	0x7101
+#define PCI_DEVICE_ID_SEALEVEL_UCOMM2	0x7201
+#define PCI_DEVICE_ID_SEALEVEL_UCOMM422	0x7402
+#define PCI_DEVICE_ID_SEALEVEL_UCOMM232	0x7202
+#define PCI_DEVICE_ID_SEALEVEL_COMM4	0x7401
+#define PCI_DEVICE_ID_SEALEVEL_COMM8	0x7801
+#define PCI_DEVICE_ID_SEALEVEL_UCOMM8	0x7804
+
+#define PCI_VENDOR_ID_HYPERCOPE		0x1365
+#define PCI_DEVICE_ID_HYPERCOPE_PLX	0x9050
+#define PCI_SUBDEVICE_ID_HYPERCOPE_OLD_ERGO	0x0104
+#define PCI_SUBDEVICE_ID_HYPERCOPE_ERGO		0x0106
+#define PCI_SUBDEVICE_ID_HYPERCOPE_METRO	0x0107
+#define PCI_SUBDEVICE_ID_HYPERCOPE_CHAMP2	0x0108
+
+#define PCI_VENDOR_ID_KAWASAKI		0x136b
+#define PCI_DEVICE_ID_MCHIP_KL5A72002	0xff01
+
+#define PCI_VENDOR_ID_CNET		0x1371
+#define PCI_DEVICE_ID_CNET_GIGACARD	0x434e
+
+#define PCI_VENDOR_ID_LMC		0x1376
+#define PCI_DEVICE_ID_LMC_HSSI		0x0003
+#define PCI_DEVICE_ID_LMC_DS3		0x0004
+#define PCI_DEVICE_ID_LMC_SSI		0x0005
+#define PCI_DEVICE_ID_LMC_T1		0x0006
+
+
+#define PCI_VENDOR_ID_NETGEAR		0x1385
+#define PCI_DEVICE_ID_NETGEAR_GA620	0x620a
+
+#define PCI_VENDOR_ID_APPLICOM		0x1389
+#define PCI_DEVICE_ID_APPLICOM_PCIGENERIC 0x0001
+#define PCI_DEVICE_ID_APPLICOM_PCI2000IBS_CAN 0x0002
+#define PCI_DEVICE_ID_APPLICOM_PCI2000PFB 0x0003
+
+#define PCI_VENDOR_ID_MOXA		0x1393
+#define PCI_DEVICE_ID_MOXA_RC7000	0x0001
+#define PCI_DEVICE_ID_MOXA_CP102	0x1020
+#define PCI_DEVICE_ID_MOXA_CP102UL	0x1021
+#define PCI_DEVICE_ID_MOXA_CP102U	0x1022
+#define PCI_DEVICE_ID_MOXA_C104		0x1040
+#define PCI_DEVICE_ID_MOXA_CP104U	0x1041
+#define PCI_DEVICE_ID_MOXA_CP104JU	0x1042
+#define PCI_DEVICE_ID_MOXA_CT114	0x1140
+#define PCI_DEVICE_ID_MOXA_CP114	0x1141
+#define PCI_DEVICE_ID_MOXA_CP118U	0x1180
+#define PCI_DEVICE_ID_MOXA_CP132	0x1320
+#define PCI_DEVICE_ID_MOXA_CP132U	0x1321
+#define PCI_DEVICE_ID_MOXA_CP134U	0x1340
+#define PCI_DEVICE_ID_MOXA_C168		0x1680
+#define PCI_DEVICE_ID_MOXA_CP168U	0x1681
+
+#define PCI_VENDOR_ID_CCD		0x1397
+#define PCI_DEVICE_ID_CCD_2BD0		0x2bd0
+#define PCI_DEVICE_ID_CCD_B000		0xb000
+#define PCI_DEVICE_ID_CCD_B006		0xb006
+#define PCI_DEVICE_ID_CCD_B007		0xb007
+#define PCI_DEVICE_ID_CCD_B008		0xb008
+#define PCI_DEVICE_ID_CCD_B009		0xb009
+#define PCI_DEVICE_ID_CCD_B00A		0xb00a
+#define PCI_DEVICE_ID_CCD_B00B		0xb00b
+#define PCI_DEVICE_ID_CCD_B00C		0xb00c
+#define PCI_DEVICE_ID_CCD_B100		0xb100
+
+#define PCI_VENDOR_ID_EXAR		0x13a8
+#define PCI_DEVICE_ID_EXAR_XR17C152	0x0152
+#define PCI_DEVICE_ID_EXAR_XR17C154	0x0154
+#define PCI_DEVICE_ID_EXAR_XR17C158	0x0158
+
+#define PCI_VENDOR_ID_MICROGATE		0x13c0
+#define PCI_DEVICE_ID_MICROGATE_USC	0x0010
+#define PCI_DEVICE_ID_MICROGATE_SCA	0x0030
+
+#define PCI_VENDOR_ID_3WARE		0x13C1
+#define PCI_DEVICE_ID_3WARE_1000	0x1000
+#define PCI_DEVICE_ID_3WARE_7000	0x1001
+#define PCI_DEVICE_ID_3WARE_9000	0x1002
+
+#define PCI_VENDOR_ID_IOMEGA		0x13ca
+#define PCI_DEVICE_ID_IOMEGA_BUZ	0x4231
+
+#define PCI_VENDOR_ID_ABOCOM		0x13D1
+#define PCI_DEVICE_ID_ABOCOM_2BD1       0x2BD1
+
+#define PCI_VENDOR_ID_CMEDIA		0x13f6
+#define PCI_DEVICE_ID_CMEDIA_CM8338A	0x0100
+#define PCI_DEVICE_ID_CMEDIA_CM8338B	0x0101
+#define PCI_DEVICE_ID_CMEDIA_CM8738	0x0111
+#define PCI_DEVICE_ID_CMEDIA_CM8738B	0x0112
+
+#define PCI_VENDOR_ID_LAVA		0x1407
+#define PCI_DEVICE_ID_LAVA_DSERIAL	0x0100 /* 2x 16550 */
+#define PCI_DEVICE_ID_LAVA_QUATRO_A	0x0101 /* 2x 16550, half of 4 port */
+#define PCI_DEVICE_ID_LAVA_QUATRO_B	0x0102 /* 2x 16550, half of 4 port */
+#define PCI_DEVICE_ID_LAVA_OCTO_A	0x0180 /* 4x 16550A, half of 8 port */
+#define PCI_DEVICE_ID_LAVA_OCTO_B	0x0181 /* 4x 16550A, half of 8 port */
+#define PCI_DEVICE_ID_LAVA_PORT_PLUS	0x0200 /* 2x 16650 */
+#define PCI_DEVICE_ID_LAVA_QUAD_A	0x0201 /* 2x 16650, half of 4 port */
+#define PCI_DEVICE_ID_LAVA_QUAD_B	0x0202 /* 2x 16650, half of 4 port */
+#define PCI_DEVICE_ID_LAVA_SSERIAL	0x0500 /* 1x 16550 */
+#define PCI_DEVICE_ID_LAVA_PORT_650	0x0600 /* 1x 16650 */
+#define PCI_DEVICE_ID_LAVA_PARALLEL	0x8000
+#define PCI_DEVICE_ID_LAVA_DUAL_PAR_A	0x8002 /* The Lava Dual Parallel is */
+#define PCI_DEVICE_ID_LAVA_DUAL_PAR_B	0x8003 /* two PCI devices on a card */
+#define PCI_DEVICE_ID_LAVA_BOCA_IOPPAR	0x8800
+
+#define PCI_VENDOR_ID_TIMEDIA		0x1409
+#define PCI_DEVICE_ID_TIMEDIA_1889	0x7168
+
+#define PCI_VENDOR_ID_ICE		0x1412
+#define PCI_DEVICE_ID_ICE_1712		0x1712
+#define PCI_DEVICE_ID_VT1724		0x1724
+
+#define PCI_VENDOR_ID_OXSEMI		0x1415
+#define PCI_DEVICE_ID_OXSEMI_12PCI840	0x8403
+#define PCI_DEVICE_ID_OXSEMI_16PCI954	0x9501
+#define PCI_DEVICE_ID_OXSEMI_16PCI95N	0x9511
+#define PCI_DEVICE_ID_OXSEMI_16PCI954PP	0x9513
+#define PCI_DEVICE_ID_OXSEMI_16PCI952	0x9521
+
+#define PCI_VENDOR_ID_SAMSUNG		0x144d
+
+
+#define PCI_VENDOR_ID_TITAN		0x14D2
+#define PCI_DEVICE_ID_TITAN_010L	0x8001
+#define PCI_DEVICE_ID_TITAN_100L	0x8010
+#define PCI_DEVICE_ID_TITAN_110L	0x8011
+#define PCI_DEVICE_ID_TITAN_200L	0x8020
+#define PCI_DEVICE_ID_TITAN_210L	0x8021
+#define PCI_DEVICE_ID_TITAN_400L	0x8040
+#define PCI_DEVICE_ID_TITAN_800L	0x8080
+#define PCI_DEVICE_ID_TITAN_100		0xA001
+#define PCI_DEVICE_ID_TITAN_200		0xA005
+#define PCI_DEVICE_ID_TITAN_400		0xA003
+#define PCI_DEVICE_ID_TITAN_800B	0xA004
+
+#define PCI_VENDOR_ID_PANACOM		0x14d4
+#define PCI_DEVICE_ID_PANACOM_QUADMODEM	0x0400
+#define PCI_DEVICE_ID_PANACOM_DUALMODEM	0x0402
+
+
+#define PCI_VENDOR_ID_AFAVLAB		0x14db
+#define PCI_DEVICE_ID_AFAVLAB_P028	0x2180
+#define PCI_DEVICE_ID_AFAVLAB_P030	0x2182
+
+#define PCI_VENDOR_ID_BROADCOM		0x14e4
+#define PCI_DEVICE_ID_TIGON3_5752	0x1600
+#define PCI_DEVICE_ID_TIGON3_5752M	0x1601
+#define PCI_DEVICE_ID_TIGON3_5700	0x1644
+#define PCI_DEVICE_ID_TIGON3_5701	0x1645
+#define PCI_DEVICE_ID_TIGON3_5702	0x1646
+#define PCI_DEVICE_ID_TIGON3_5703	0x1647
+#define PCI_DEVICE_ID_TIGON3_5704	0x1648
+#define PCI_DEVICE_ID_TIGON3_5704S_2	0x1649
+#define PCI_DEVICE_ID_NX2_5706		0x164a
+#define PCI_DEVICE_ID_NX2_5708		0x164c
+#define PCI_DEVICE_ID_TIGON3_5702FE	0x164d
+#define PCI_DEVICE_ID_TIGON3_5705	0x1653
+#define PCI_DEVICE_ID_TIGON3_5705_2	0x1654
+#define PCI_DEVICE_ID_TIGON3_5720	0x1658
+#define PCI_DEVICE_ID_TIGON3_5721	0x1659
+#define PCI_DEVICE_ID_TIGON3_5705M	0x165d
+#define PCI_DEVICE_ID_TIGON3_5705M_2	0x165e
+#define PCI_DEVICE_ID_TIGON3_5714	0x1668
+#define PCI_DEVICE_ID_TIGON3_5780	0x166a
+#define PCI_DEVICE_ID_TIGON3_5780S	0x166b
+#define PCI_DEVICE_ID_TIGON3_5705F	0x166e
+#define PCI_DEVICE_ID_TIGON3_5750	0x1676
+#define PCI_DEVICE_ID_TIGON3_5751	0x1677
+#define PCI_DEVICE_ID_TIGON3_5715	0x1678
+#define PCI_DEVICE_ID_TIGON3_5750M	0x167c
+#define PCI_DEVICE_ID_TIGON3_5751M	0x167d
+#define PCI_DEVICE_ID_TIGON3_5751F	0x167e
+#define PCI_DEVICE_ID_TIGON3_5782	0x1696
+#define PCI_DEVICE_ID_TIGON3_5788	0x169c
+#define PCI_DEVICE_ID_TIGON3_5789	0x169d
+#define PCI_DEVICE_ID_TIGON3_5702X	0x16a6
+#define PCI_DEVICE_ID_TIGON3_5703X	0x16a7
+#define PCI_DEVICE_ID_TIGON3_5704S	0x16a8
+#define PCI_DEVICE_ID_NX2_5706S		0x16aa
+#define PCI_DEVICE_ID_NX2_5708S		0x16ac
+#define PCI_DEVICE_ID_TIGON3_5702A3	0x16c6
+#define PCI_DEVICE_ID_TIGON3_5703A3	0x16c7
+#define PCI_DEVICE_ID_TIGON3_5781	0x16dd
+#define PCI_DEVICE_ID_TIGON3_5753	0x16f7
+#define PCI_DEVICE_ID_TIGON3_5753M	0x16fd
+#define PCI_DEVICE_ID_TIGON3_5753F	0x16fe
+#define PCI_DEVICE_ID_TIGON3_5901	0x170d
+#define PCI_DEVICE_ID_BCM4401B1		0x170c
+#define PCI_DEVICE_ID_TIGON3_5901_2	0x170e
+#define PCI_DEVICE_ID_BCM4401		0x4401
+#define PCI_DEVICE_ID_BCM4401B0		0x4402
+
+#define PCI_VENDOR_ID_TOPIC		0x151f
+#define PCI_DEVICE_ID_TOPIC_TP560	0x0000
+
+#define PCI_VENDOR_ID_ENE		0x1524
+#define PCI_DEVICE_ID_ENE_1211		0x1211
+#define PCI_DEVICE_ID_ENE_1225		0x1225
+#define PCI_DEVICE_ID_ENE_1410		0x1410
+#define PCI_DEVICE_ID_ENE_710		0x1411
+#define PCI_DEVICE_ID_ENE_712		0x1412
+#define PCI_DEVICE_ID_ENE_1420		0x1420
+#define PCI_DEVICE_ID_ENE_720		0x1421
+#define PCI_DEVICE_ID_ENE_722		0x1422
+
+#define PCI_VENDOR_ID_CHELSIO		0x1425
+
+
+#define PCI_VENDOR_ID_SYBA		0x1592
+#define PCI_DEVICE_ID_SYBA_2P_EPP	0x0782
+#define PCI_DEVICE_ID_SYBA_1P_ECP	0x0783
+
+#define PCI_VENDOR_ID_MORETON		0x15aa
+#define PCI_DEVICE_ID_RASTEL_2PORT	0x2000
+
+#define PCI_VENDOR_ID_ZOLTRIX		0x15b0
+#define PCI_DEVICE_ID_ZOLTRIX_2BD0	0x2bd0 
+
+#define PCI_VENDOR_ID_MELLANOX		0x15b3
+#define PCI_DEVICE_ID_MELLANOX_TAVOR	0x5a44
+#define PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT 0x6278
+#define PCI_DEVICE_ID_MELLANOX_ARBEL	0x6282
+#define PCI_DEVICE_ID_MELLANOX_SINAI_OLD 0x5e8c
+#define PCI_DEVICE_ID_MELLANOX_SINAI	0x6274
+
+#define PCI_VENDOR_ID_PDC		0x15e9
+
+
+#define PCI_VENDOR_ID_FARSITE           0x1619
+#define PCI_DEVICE_ID_FARSITE_T2P       0x0400
+#define PCI_DEVICE_ID_FARSITE_T4P       0x0440
+#define PCI_DEVICE_ID_FARSITE_T1U       0x0610
+#define PCI_DEVICE_ID_FARSITE_T2U       0x0620
+#define PCI_DEVICE_ID_FARSITE_T4U       0x0640
+#define PCI_DEVICE_ID_FARSITE_TE1       0x1610
+#define PCI_DEVICE_ID_FARSITE_TE1C      0x1612
+
+#define PCI_VENDOR_ID_SIBYTE		0x166d
+#define PCI_DEVICE_ID_BCM1250_HT	0x0002
+
+#define PCI_VENDOR_ID_NETCELL		0x169c
+#define PCI_DEVICE_ID_REVOLUTION	0x0044
+
+#define PCI_VENDOR_ID_LINKSYS		0x1737
+#define PCI_DEVICE_ID_LINKSYS_EG1064	0x1064
+
+#define PCI_VENDOR_ID_ALTIMA		0x173b
+#define PCI_DEVICE_ID_ALTIMA_AC1000	0x03e8
+#define PCI_DEVICE_ID_ALTIMA_AC1001	0x03e9
+#define PCI_DEVICE_ID_ALTIMA_AC9100	0x03ea
+#define PCI_DEVICE_ID_ALTIMA_AC1003	0x03eb
+
+#define PCI_VENDOR_ID_S2IO		0x17d5
+#define	PCI_DEVICE_ID_S2IO_WIN		0x5731
+#define	PCI_DEVICE_ID_S2IO_UNI		0x5831
+#define PCI_DEVICE_ID_HERC_WIN		0x5732
+#define PCI_DEVICE_ID_HERC_UNI		0x5832
+
+
+#define PCI_VENDOR_ID_SITECOM		0x182d
+#define PCI_DEVICE_ID_SITECOM_DC105V2	0x3069
+
+#define PCI_VENDOR_ID_TOPSPIN		0x1867
+
+#define PCI_VENDOR_ID_TDI               0x192E
+#define PCI_DEVICE_ID_TDI_EHCI          0x0101
+
+
+#define PCI_VENDOR_ID_TEKRAM		0x1de1
+#define PCI_DEVICE_ID_TEKRAM_DC290	0xdc29
+
+#define PCI_VENDOR_ID_HINT             0x3388
+#define PCI_DEVICE_ID_HINT_VXPROII_IDE 0x8013
+
+#define PCI_VENDOR_ID_3DLABS		0x3d3d
+#define PCI_DEVICE_ID_3DLABS_PERMEDIA2	0x0007
+#define PCI_DEVICE_ID_3DLABS_PERMEDIA2V	0x0009
+
+
+#define PCI_VENDOR_ID_AKS		0x416c
+#define PCI_DEVICE_ID_AKS_ALADDINCARD	0x0100
+
+
+
+#define PCI_VENDOR_ID_S3		0x5333
+#define PCI_DEVICE_ID_S3_TRIO		0x8811
+#define PCI_DEVICE_ID_S3_868		0x8880
+#define PCI_DEVICE_ID_S3_968		0x88f0
+#define PCI_DEVICE_ID_S3_SAVAGE4	0x8a25
+#define PCI_DEVICE_ID_S3_PROSAVAGE8	0x8d04
+#define PCI_DEVICE_ID_S3_SONICVIBES	0xca00
+
+#define PCI_VENDOR_ID_DUNORD		0x5544
+#define PCI_DEVICE_ID_DUNORD_I3000	0x0001
+
+
+#define PCI_VENDOR_ID_DCI		0x6666
+#define PCI_DEVICE_ID_DCI_PCCOM4	0x0001
+#define PCI_DEVICE_ID_DCI_PCCOM8	0x0002
+
+#define PCI_VENDOR_ID_INTEL		0x8086
+#define PCI_DEVICE_ID_INTEL_EESSC	0x0008
+#define PCI_DEVICE_ID_INTEL_PXHD_0	0x0320
+#define PCI_DEVICE_ID_INTEL_PXHD_1	0x0321
+#define PCI_DEVICE_ID_INTEL_PXH_0	0x0329
+#define PCI_DEVICE_ID_INTEL_PXH_1	0x032A
+#define PCI_DEVICE_ID_INTEL_PXHV	0x032C
+#define PCI_DEVICE_ID_INTEL_82375	0x0482
+#define PCI_DEVICE_ID_INTEL_82424	0x0483
+#define PCI_DEVICE_ID_INTEL_82378	0x0484
+#define PCI_DEVICE_ID_INTEL_I960	0x0960
+#define PCI_DEVICE_ID_INTEL_I960RM	0x0962
+#define PCI_DEVICE_ID_INTEL_82815_MC	0x1130
+#define PCI_DEVICE_ID_INTEL_82815_CGC	0x1132
+#define PCI_DEVICE_ID_INTEL_82092AA_0	0x1221
+#define PCI_DEVICE_ID_INTEL_7505_0	0x2550  
+#define PCI_DEVICE_ID_INTEL_7205_0	0x255d
+#define PCI_DEVICE_ID_INTEL_82437	0x122d
+#define PCI_DEVICE_ID_INTEL_82371FB_0	0x122e
+#define PCI_DEVICE_ID_INTEL_82371FB_1	0x1230
+#define PCI_DEVICE_ID_INTEL_82371MX	0x1234
+#define PCI_DEVICE_ID_INTEL_82441	0x1237
+#define PCI_DEVICE_ID_INTEL_82380FB	0x124b
+#define PCI_DEVICE_ID_INTEL_82439	0x1250
+#define PCI_DEVICE_ID_INTEL_80960_RP	0x1960
+#define PCI_DEVICE_ID_INTEL_82840_HB	0x1a21
+#define PCI_DEVICE_ID_INTEL_82845_HB	0x1a30
+#define PCI_DEVICE_ID_INTEL_82801AA_0	0x2410
+#define PCI_DEVICE_ID_INTEL_82801AA_1	0x2411
+#define PCI_DEVICE_ID_INTEL_82801AA_3	0x2413
+#define PCI_DEVICE_ID_INTEL_82801AA_5	0x2415
+#define PCI_DEVICE_ID_INTEL_82801AA_6	0x2416
+#define PCI_DEVICE_ID_INTEL_82801AA_8	0x2418
+#define PCI_DEVICE_ID_INTEL_82801AB_0	0x2420
+#define PCI_DEVICE_ID_INTEL_82801AB_1	0x2421
+#define PCI_DEVICE_ID_INTEL_82801AB_3	0x2423
+#define PCI_DEVICE_ID_INTEL_82801AB_5	0x2425
+#define PCI_DEVICE_ID_INTEL_82801AB_6	0x2426
+#define PCI_DEVICE_ID_INTEL_82801AB_8	0x2428
+#define PCI_DEVICE_ID_INTEL_82801BA_0	0x2440
+#define PCI_DEVICE_ID_INTEL_82801BA_2	0x2443
+#define PCI_DEVICE_ID_INTEL_82801BA_4	0x2445
+#define PCI_DEVICE_ID_INTEL_82801BA_6	0x2448
+#define PCI_DEVICE_ID_INTEL_82801BA_8	0x244a
+#define PCI_DEVICE_ID_INTEL_82801BA_9	0x244b
+#define PCI_DEVICE_ID_INTEL_82801BA_10	0x244c
+#define PCI_DEVICE_ID_INTEL_82801BA_11	0x244e
+#define PCI_DEVICE_ID_INTEL_82801E_0	0x2450
+#define PCI_DEVICE_ID_INTEL_82801E_11	0x245b
+#define PCI_DEVICE_ID_INTEL_82801CA_0	0x2480
+#define PCI_DEVICE_ID_INTEL_82801CA_3	0x2483
+#define PCI_DEVICE_ID_INTEL_82801CA_5	0x2485
+#define PCI_DEVICE_ID_INTEL_82801CA_6	0x2486
+#define PCI_DEVICE_ID_INTEL_82801CA_10	0x248a
+#define PCI_DEVICE_ID_INTEL_82801CA_11	0x248b
+#define PCI_DEVICE_ID_INTEL_82801CA_12	0x248c
+#define PCI_DEVICE_ID_INTEL_82801DB_0	0x24c0
+#define PCI_DEVICE_ID_INTEL_82801DB_1	0x24c1
+#define PCI_DEVICE_ID_INTEL_82801DB_3	0x24c3
+#define PCI_DEVICE_ID_INTEL_82801DB_5	0x24c5
+#define PCI_DEVICE_ID_INTEL_82801DB_6	0x24c6
+#define PCI_DEVICE_ID_INTEL_82801DB_9	0x24c9
+#define PCI_DEVICE_ID_INTEL_82801DB_10	0x24ca
+#define PCI_DEVICE_ID_INTEL_82801DB_11	0x24cb
+#define PCI_DEVICE_ID_INTEL_82801DB_12  0x24cc
+#define PCI_DEVICE_ID_INTEL_82801EB_0	0x24d0
+#define PCI_DEVICE_ID_INTEL_82801EB_1	0x24d1
+#define PCI_DEVICE_ID_INTEL_82801EB_3	0x24d3
+#define PCI_DEVICE_ID_INTEL_82801EB_5	0x24d5
+#define PCI_DEVICE_ID_INTEL_82801EB_6	0x24d6
+#define PCI_DEVICE_ID_INTEL_82801EB_11	0x24db
+#define PCI_DEVICE_ID_INTEL_ESB_1	0x25a1
+#define PCI_DEVICE_ID_INTEL_ESB_2	0x25a2
+#define PCI_DEVICE_ID_INTEL_ESB_4	0x25a4
+#define PCI_DEVICE_ID_INTEL_ESB_5	0x25a6
+#define PCI_DEVICE_ID_INTEL_ESB_9	0x25ab
+#define PCI_DEVICE_ID_INTEL_82820_HB	0x2500
+#define PCI_DEVICE_ID_INTEL_82820_UP_HB	0x2501
+#define PCI_DEVICE_ID_INTEL_82850_HB	0x2530
+#define PCI_DEVICE_ID_INTEL_82860_HB	0x2531
+#define PCI_DEVICE_ID_INTEL_82845G_HB	0x2560
+#define PCI_DEVICE_ID_INTEL_82845G_IG	0x2562
+#define PCI_DEVICE_ID_INTEL_82865_HB	0x2570
+#define PCI_DEVICE_ID_INTEL_82865_IG	0x2572
+#define PCI_DEVICE_ID_INTEL_82875_HB	0x2578
+#define PCI_DEVICE_ID_INTEL_82915G_HB	0x2580
+#define PCI_DEVICE_ID_INTEL_82915G_IG	0x2582
+#define PCI_DEVICE_ID_INTEL_82915GM_HB	0x2590
+#define PCI_DEVICE_ID_INTEL_82915GM_IG	0x2592
+#define PCI_DEVICE_ID_INTEL_82945G_HB	0x2770
+#define PCI_DEVICE_ID_INTEL_82945G_IG	0x2772
+#define PCI_DEVICE_ID_INTEL_ICH6_0	0x2640
+#define PCI_DEVICE_ID_INTEL_ICH6_1	0x2641
+#define PCI_DEVICE_ID_INTEL_ICH6_2	0x2642
+#define PCI_DEVICE_ID_INTEL_ICH6_16	0x266a
+#define PCI_DEVICE_ID_INTEL_ICH6_17	0x266d
+#define PCI_DEVICE_ID_INTEL_ICH6_18	0x266e
+#define PCI_DEVICE_ID_INTEL_ICH6_19	0x266f
+#define PCI_DEVICE_ID_INTEL_ESB2_0	0x2670
+#define PCI_DEVICE_ID_INTEL_ESB2_14	0x2698
+#define PCI_DEVICE_ID_INTEL_ESB2_17	0x269b
+#define PCI_DEVICE_ID_INTEL_ESB2_18	0x269e
+#define PCI_DEVICE_ID_INTEL_ICH7_0	0x27b8
+#define PCI_DEVICE_ID_INTEL_ICH7_1	0x27b9
+#define PCI_DEVICE_ID_INTEL_ICH7_30	0x27b0
+#define PCI_DEVICE_ID_INTEL_ICH7_31	0x27bd
+#define PCI_DEVICE_ID_INTEL_ICH7_17	0x27da
+#define PCI_DEVICE_ID_INTEL_ICH7_19	0x27dd
+#define PCI_DEVICE_ID_INTEL_ICH7_20	0x27de
+#define PCI_DEVICE_ID_INTEL_ICH7_21	0x27df
+#define PCI_DEVICE_ID_INTEL_82855PM_HB	0x3340
+#define PCI_DEVICE_ID_INTEL_82830_HB	0x3575
+#define PCI_DEVICE_ID_INTEL_82830_CGC	0x3577
+#define PCI_DEVICE_ID_INTEL_82855GM_HB	0x3580
+#define PCI_DEVICE_ID_INTEL_82855GM_IG	0x3582
+#define PCI_DEVICE_ID_INTEL_E7520_MCH	0x3590
+#define PCI_DEVICE_ID_INTEL_E7320_MCH	0x3592
+#define PCI_DEVICE_ID_INTEL_MCH_PA	0x3595
+#define PCI_DEVICE_ID_INTEL_MCH_PA1	0x3596
+#define PCI_DEVICE_ID_INTEL_MCH_PB	0x3597
+#define PCI_DEVICE_ID_INTEL_MCH_PB1	0x3598
+#define PCI_DEVICE_ID_INTEL_MCH_PC	0x3599
+#define PCI_DEVICE_ID_INTEL_MCH_PC1	0x359a
+#define PCI_DEVICE_ID_INTEL_E7525_MCH	0x359e
+#define PCI_DEVICE_ID_INTEL_82371SB_0	0x7000
+#define PCI_DEVICE_ID_INTEL_82371SB_1	0x7010
+#define PCI_DEVICE_ID_INTEL_82371SB_2	0x7020
+#define PCI_DEVICE_ID_INTEL_82437VX	0x7030
+#define PCI_DEVICE_ID_INTEL_82439TX	0x7100
+#define PCI_DEVICE_ID_INTEL_82371AB_0	0x7110
+#define PCI_DEVICE_ID_INTEL_82371AB	0x7111
+#define PCI_DEVICE_ID_INTEL_82371AB_2	0x7112
+#define PCI_DEVICE_ID_INTEL_82371AB_3	0x7113
+#define PCI_DEVICE_ID_INTEL_82810_MC1	0x7120
+#define PCI_DEVICE_ID_INTEL_82810_IG1	0x7121
+#define PCI_DEVICE_ID_INTEL_82810_MC3	0x7122
+#define PCI_DEVICE_ID_INTEL_82810_IG3	0x7123
+#define PCI_DEVICE_ID_INTEL_82810E_MC	0x7124
+#define PCI_DEVICE_ID_INTEL_82810E_IG	0x7125
+#define PCI_DEVICE_ID_INTEL_82443LX_0	0x7180
+#define PCI_DEVICE_ID_INTEL_82443LX_1	0x7181
+#define PCI_DEVICE_ID_INTEL_82443BX_0	0x7190
+#define PCI_DEVICE_ID_INTEL_82443BX_1	0x7191
+#define PCI_DEVICE_ID_INTEL_82443BX_2	0x7192
+#define PCI_DEVICE_ID_INTEL_440MX	0x7195
+#define PCI_DEVICE_ID_INTEL_440MX_6	0x7196
+#define PCI_DEVICE_ID_INTEL_82443MX_0	0x7198
+#define PCI_DEVICE_ID_INTEL_82443MX_1	0x7199
+#define PCI_DEVICE_ID_INTEL_82443MX_3	0x719b
+#define PCI_DEVICE_ID_INTEL_82443GX_0	0x71a0
+#define PCI_DEVICE_ID_INTEL_82443GX_2	0x71a2
+#define PCI_DEVICE_ID_INTEL_82372FB_1	0x7601
+#define PCI_DEVICE_ID_INTEL_82454GX	0x84c4
+#define PCI_DEVICE_ID_INTEL_82451NX	0x84ca
+#define PCI_DEVICE_ID_INTEL_82454NX     0x84cb
+#define PCI_DEVICE_ID_INTEL_84460GX	0x84ea
+#define PCI_DEVICE_ID_INTEL_IXP4XX	0x8500
+#define PCI_DEVICE_ID_INTEL_IXP2800	0x9004
+#define PCI_DEVICE_ID_INTEL_S21152BB	0xb152
+
+#define PCI_VENDOR_ID_COMPUTONE		0x8e0e
+#define PCI_DEVICE_ID_COMPUTONE_IP2EX	0x0291
+#define PCI_DEVICE_ID_COMPUTONE_PG	0x0302
+#define PCI_SUBVENDOR_ID_COMPUTONE	0x8e0e
+#define PCI_SUBDEVICE_ID_COMPUTONE_PG4	0x0001
+#define PCI_SUBDEVICE_ID_COMPUTONE_PG8	0x0002
+#define PCI_SUBDEVICE_ID_COMPUTONE_PG6	0x0003
+
+#define PCI_VENDOR_ID_KTI		0x8e2e
+
+#define PCI_VENDOR_ID_ADAPTEC		0x9004
+#define PCI_DEVICE_ID_ADAPTEC_7810	0x1078
+#define PCI_DEVICE_ID_ADAPTEC_7821	0x2178
+#define PCI_DEVICE_ID_ADAPTEC_38602	0x3860
+#define PCI_DEVICE_ID_ADAPTEC_7850	0x5078
+#define PCI_DEVICE_ID_ADAPTEC_7855	0x5578
+#define PCI_DEVICE_ID_ADAPTEC_3860	0x6038
+#define PCI_DEVICE_ID_ADAPTEC_1480A	0x6075
+#define PCI_DEVICE_ID_ADAPTEC_7860	0x6078
+#define PCI_DEVICE_ID_ADAPTEC_7861	0x6178
+#define PCI_DEVICE_ID_ADAPTEC_7870	0x7078
+#define PCI_DEVICE_ID_ADAPTEC_7871	0x7178
+#define PCI_DEVICE_ID_ADAPTEC_7872	0x7278
+#define PCI_DEVICE_ID_ADAPTEC_7873	0x7378
+#define PCI_DEVICE_ID_ADAPTEC_7874	0x7478
+#define PCI_DEVICE_ID_ADAPTEC_7895	0x7895
+#define PCI_DEVICE_ID_ADAPTEC_7880	0x8078
+#define PCI_DEVICE_ID_ADAPTEC_7881	0x8178
+#define PCI_DEVICE_ID_ADAPTEC_7882	0x8278
+#define PCI_DEVICE_ID_ADAPTEC_7883	0x8378
+#define PCI_DEVICE_ID_ADAPTEC_7884	0x8478
+#define PCI_DEVICE_ID_ADAPTEC_7885	0x8578
+#define PCI_DEVICE_ID_ADAPTEC_7886	0x8678
+#define PCI_DEVICE_ID_ADAPTEC_7887	0x8778
+#define PCI_DEVICE_ID_ADAPTEC_7888	0x8878
+
+#define PCI_VENDOR_ID_ADAPTEC2		0x9005
+#define PCI_DEVICE_ID_ADAPTEC2_2940U2	0x0010
+#define PCI_DEVICE_ID_ADAPTEC2_2930U2	0x0011
+#define PCI_DEVICE_ID_ADAPTEC2_7890B	0x0013
+#define PCI_DEVICE_ID_ADAPTEC2_7890	0x001f
+#define PCI_DEVICE_ID_ADAPTEC2_3940U2	0x0050
+#define PCI_DEVICE_ID_ADAPTEC2_3950U2D	0x0051
+#define PCI_DEVICE_ID_ADAPTEC2_7896	0x005f
+#define PCI_DEVICE_ID_ADAPTEC2_7892A	0x0080
+#define PCI_DEVICE_ID_ADAPTEC2_7892B	0x0081
+#define PCI_DEVICE_ID_ADAPTEC2_7892D	0x0083
+#define PCI_DEVICE_ID_ADAPTEC2_7892P	0x008f
+#define PCI_DEVICE_ID_ADAPTEC2_7899A	0x00c0
+#define PCI_DEVICE_ID_ADAPTEC2_7899B	0x00c1
+#define PCI_DEVICE_ID_ADAPTEC2_7899D	0x00c3
+#define PCI_DEVICE_ID_ADAPTEC2_7899P	0x00cf
+#define PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN   0x0500
+#define PCI_DEVICE_ID_ADAPTEC2_SCAMP	0x0503
+
+
+#define PCI_VENDOR_ID_HOLTEK		0x9412
+#define PCI_DEVICE_ID_HOLTEK_6565	0x6565
+
+#define PCI_VENDOR_ID_NETMOS		0x9710
+#define PCI_DEVICE_ID_NETMOS_9705	0x9705
+#define PCI_DEVICE_ID_NETMOS_9715	0x9715
+#define PCI_DEVICE_ID_NETMOS_9735	0x9735
+#define PCI_DEVICE_ID_NETMOS_9745	0x9745
+#define PCI_DEVICE_ID_NETMOS_9755	0x9755
+#define PCI_DEVICE_ID_NETMOS_9805	0x9805
+#define PCI_DEVICE_ID_NETMOS_9815	0x9815
+#define PCI_DEVICE_ID_NETMOS_9835	0x9835
+#define PCI_DEVICE_ID_NETMOS_9845	0x9845
+#define PCI_DEVICE_ID_NETMOS_9855	0x9855
+
+#define PCI_SUBVENDOR_ID_EXSYS		0xd84d
+#define PCI_SUBDEVICE_ID_EXSYS_4014	0x4014
+#define PCI_SUBDEVICE_ID_EXSYS_4055	0x4055
+
+#define PCI_VENDOR_ID_TIGERJET		0xe159
+#define PCI_DEVICE_ID_TIGERJET_300	0x0001
+#define PCI_DEVICE_ID_TIGERJET_100	0x0002
+
+#define PCI_VENDOR_ID_TTTECH		0x0357
+#define PCI_DEVICE_ID_TTTECH_MC322	0x000A
+
+#define PCI_VENDOR_ID_XILINX_RME	0xea60
+#define PCI_DEVICE_ID_RME_DIGI32	0x9896
+#define PCI_DEVICE_ID_RME_DIGI32_PRO	0x9897
+#define PCI_DEVICE_ID_RME_DIGI32_8	0x9898
+
diff -urN linux-2.6.15/include/linux/poll.h linux-2.6.15-no1/include/linux/poll.h
--- linux-2.6.15/include/linux/poll.h	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/include/linux/poll.h	2006-01-26 15:41:14.008758112 +0000
@@ -11,6 +11,15 @@
 #include <linux/mm.h>
 #include <asm/uaccess.h>
 
+/* ~832 bytes of stack space used max in sys_select/sys_poll before allocating
+   additional memory. */
+#define MAX_STACK_ALLOC 832
+#define FRONTEND_STACK_ALLOC  256
+#define SELECT_STACK_ALLOC    FRONTEND_STACK_ALLOC
+#define POLL_STACK_ALLOC      FRONTEND_STACK_ALLOC
+#define WQUEUES_STACK_ALLOC   (MAX_STACK_ALLOC - FRONTEND_STACK_ALLOC)
+#define N_INLINE_POLL_ENTRIES (WQUEUES_STACK_ALLOC / sizeof(struct poll_table_entry))
+
 struct poll_table_struct;
 
 /* 
@@ -33,6 +42,12 @@
 	pt->qproc = qproc;
 }
 
+struct poll_table_entry {
+	struct file * filp;
+	wait_queue_t wait;
+	wait_queue_head_t * wait_address;
+};
+
 /*
  * Structures and helpers for sys_poll/sys_poll
  */
@@ -40,6 +55,8 @@
 	poll_table pt;
 	struct poll_table_page * table;
 	int error;
+	int inline_index;
+	struct poll_table_entry inline_entries[N_INLINE_POLL_ENTRIES];
 };
 
 extern void poll_initwait(struct poll_wqueues *pwq);
diff -urN linux-2.6.15/include/linux/sched.h linux-2.6.15-no1/include/linux/sched.h
--- linux-2.6.15/include/linux/sched.h	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/include/linux/sched.h	2006-01-26 15:41:14.009757960 +0000
@@ -38,6 +38,7 @@
 #include <linux/auxvec.h>	/* For AT_VECTOR_SIZE */
 
 struct exec_domain;
+struct bio;
 
 /*
  * cloning flags:
@@ -823,6 +824,9 @@
 /* journalling filesystem info */
 	void *journal_info;
 
+/* stacked block device info */
+	struct bio *bio_list, **bio_tail;
+
 /* VM state */
 	struct reclaim_state *reclaim_state;
 
diff -urN linux-2.6.15/scripts/lxdialog/colors.h linux-2.6.15-no1/scripts/lxdialog/colors.h
--- linux-2.6.15/scripts/lxdialog/colors.h	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/scripts/lxdialog/colors.h	2006-01-26 15:41:14.009757960 +0000
@@ -38,7 +38,7 @@
 #define DIALOG_BG                    COLOR_WHITE
 #define DIALOG_HL                    FALSE
 
-#define TITLE_FG                     COLOR_YELLOW
+#define TITLE_FG                     COLOR_BLUE
 #define TITLE_BG                     COLOR_WHITE
 #define TITLE_HL                     TRUE
 
@@ -110,7 +110,7 @@
 #define ITEM_SELECTED_BG             COLOR_BLUE
 #define ITEM_SELECTED_HL             TRUE
 
-#define TAG_FG                       COLOR_YELLOW
+#define TAG_FG                       COLOR_BLUE
 #define TAG_BG                       COLOR_WHITE
 #define TAG_HL                       TRUE
 
@@ -118,7 +118,7 @@
 #define TAG_SELECTED_BG              COLOR_BLUE
 #define TAG_SELECTED_HL              TRUE
 
-#define TAG_KEY_FG                   COLOR_YELLOW
+#define TAG_KEY_FG                   COLOR_BLUE
 #define TAG_KEY_BG                   COLOR_WHITE
 #define TAG_KEY_HL                   TRUE
 
diff -urN linux-2.6.15/sound/core/info.c linux-2.6.15-no1/sound/core/info.c
--- linux-2.6.15/sound/core/info.c	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/sound/core/info.c	2006-01-26 15:41:14.010757808 +0000
@@ -444,8 +444,8 @@
 	return mask;
 }
 
-static inline int _snd_info_entry_ioctl(struct inode *inode, struct file *file,
-					unsigned int cmd, unsigned long arg)
+static long snd_info_entry_ioctl(struct file *file, unsigned int cmd,
+				unsigned long arg)
 {
 	snd_info_private_data_t *data;
 	struct snd_info_entry *entry;
@@ -465,17 +465,6 @@
 	return -ENOTTY;
 }
 
-/* FIXME: need to unlock BKL to allow preemption */
-static int snd_info_entry_ioctl(struct inode *inode, struct file *file,
-				unsigned int cmd, unsigned long arg)
-{
-	int err;
-	unlock_kernel();
-	err = _snd_info_entry_ioctl(inode, file, cmd, arg);
-	lock_kernel();
-	return err;
-}
-
 static int snd_info_entry_mmap(struct file *file, struct vm_area_struct *vma)
 {
 	struct inode *inode = file->f_dentry->d_inode;
@@ -499,15 +488,15 @@
 
 static struct file_operations snd_info_entry_operations =
 {
-	.owner =	THIS_MODULE,
-	.llseek =	snd_info_entry_llseek,
-	.read =		snd_info_entry_read,
-	.write =	snd_info_entry_write,
-	.poll =		snd_info_entry_poll,
-	.ioctl =	snd_info_entry_ioctl,
-	.mmap =		snd_info_entry_mmap,
-	.open =		snd_info_entry_open,
-	.release =	snd_info_entry_release,
+	.owner =		THIS_MODULE,
+	.llseek =		snd_info_entry_llseek,
+	.read =			snd_info_entry_read,
+	.write =		snd_info_entry_write,
+	.poll =			snd_info_entry_poll,
+	.unlocked_ioctl =	snd_info_entry_ioctl,
+	.mmap =			snd_info_entry_mmap,
+	.open =			snd_info_entry_open,
+	.release =		snd_info_entry_release,
 };
 
 /**
diff -urN linux-2.6.15/sound/pci/hda/hda_intel.c linux-2.6.15-no1/sound/pci/hda/hda_intel.c
--- linux-2.6.15/sound/pci/hda/hda_intel.c	2006-01-03 03:21:10.000000000 +0000
+++ linux-2.6.15-no1/sound/pci/hda/hda_intel.c	2006-01-26 15:41:14.011757656 +0000
@@ -70,6 +70,7 @@
 			 "{Intel, ICH6M},"
 			 "{Intel, ICH7},"
 			 "{Intel, ESB2},"
+			 "{Intel, ICH8 },"
 			 "{ATI, SB450},"
 			 "{VIA, VT8251},"
 			 "{VIA, VT8237A},"
@@ -1603,6 +1604,7 @@
 	{ 0x8086, 0x2668, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ICH6 */
 	{ 0x8086, 0x27d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ICH7 */
 	{ 0x8086, 0x269a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ESB2 */
+	{ 0x8086, 0x284b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ICH8 */
 	{ 0x1002, 0x437b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ATI }, /* ATI SB450 */
 	{ 0x1106, 0x3288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_VIA }, /* VIA VT8251/VT8237A */
 	{ 0x1039, 0x7502, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_SIS }, /* SIS966 */
