diff -urN oldtree/arch/i386/Makefile newtree/arch/i386/Makefile
--- oldtree/arch/i386/Makefile	2006-06-18 01:49:35.000000000 +0000
+++ newtree/arch/i386/Makefile	2006-06-21 14:56:33.355812000 +0000
@@ -38,6 +38,10 @@
 include $(srctree)/arch/i386/Makefile.cpu
 
 cflags-$(CONFIG_REGPARM) += -mregparm=3
+#
+# Prevent tail-call optimizations, to get clearer backtraces:
+#
+cflags-$(CONFIG_FRAME_POINTER) += -fno-optimize-sibling-calls
 
 # temporary until string.h is fixed
 cflags-y += -ffreestanding
diff -urN oldtree/arch/x86_64/Makefile newtree/arch/x86_64/Makefile
--- oldtree/arch/x86_64/Makefile	2006-06-18 01:49:35.000000000 +0000
+++ newtree/arch/x86_64/Makefile	2006-06-21 14:56:33.359812250 +0000
@@ -36,6 +36,11 @@
 cflags-y += -mcmodel=kernel
 cflags-y += -pipe
 cflags-$(CONFIG_REORDER) += -ffunction-sections
+#
+# Prevent tail-call optimizations, to get clearer backtraces:
+#
+cflags-$(CONFIG_FRAME_POINTER) += -fno-optimize-sibling-calls
+
 # this makes reading assembly source easier, but produces worse code
 # actually it makes the kernel smaller too.
 cflags-y += -fno-reorder-blocks
diff -urN oldtree/fs/super.c newtree/fs/super.c
--- oldtree/fs/super.c	2006-06-21 13:11:56.953739500 +0000
+++ newtree/fs/super.c	2006-06-21 14:56:33.367812750 +0000
@@ -71,7 +71,7 @@
 		INIT_LIST_HEAD(&s->s_instances);
 		INIT_HLIST_HEAD(&s->s_anon);
 		INIT_LIST_HEAD(&s->s_inodes);
-		init_rwsem_key(&s->s_umount, &type->s_umount_key);
+		init_rwsem_key(&s->s_umount, type->name, &type->s_umount_key);
 		/*
 		 * The locking rules for s_lock are up to the
 		 * filesystem. For example ext3fs has different
diff -urN oldtree/include/asm-i386/rwsem.h newtree/include/asm-i386/rwsem.h
--- oldtree/include/asm-i386/rwsem.h	2006-06-21 13:12:01.134000750 +0000
+++ newtree/include/asm-i386/rwsem.h	2006-06-21 14:56:33.371813000 +0000
@@ -62,23 +62,11 @@
 #define RWSEM_ACTIVE_WRITE_BIAS		(RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
 	spinlock_t		wait_lock;
 	struct list_head	wait_list;
-#if RWSEM_DEBUG
-	int			debug;
-#endif
 #ifdef CONFIG_DEBUG_RWSEM_ALLOC
 	struct lockdep_map dep_map;
 #endif
 };
 
-/*
- * initialisation
- */
-#if RWSEM_DEBUG
-#define __RWSEM_DEBUG_INIT      , 0
-#else
-#define __RWSEM_DEBUG_INIT	/* */
-#endif
-
 #ifdef CONFIG_DEBUG_RWSEM_ALLOC
 # define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
 #else
@@ -88,7 +76,7 @@
 
 #define __RWSEM_INITIALIZER(name) \
 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \
-	__RWSEM_DEBUG_INIT __RWSEM_DEP_MAP_INIT(name) }
+	__RWSEM_DEP_MAP_INIT(name) }
 
 #define DECLARE_RWSEM(name) \
 	struct rw_semaphore name = __RWSEM_INITIALIZER(name)
diff -urN oldtree/include/linux/hardirq.h newtree/include/linux/hardirq.h
--- oldtree/include/linux/hardirq.h	2006-06-21 13:12:02.790104250 +0000
+++ newtree/include/linux/hardirq.h	2006-06-21 14:56:33.375813250 +0000
@@ -86,13 +86,6 @@
 # define synchronize_irq(irq)	barrier()
 #endif
 
-#define nmi_enter()		irq_enter()
-#define nmi_exit()					\
-	do {						\
-		sub_preempt_count(HARDIRQ_OFFSET);	\
-		trace_hardirq_exit();			\
-	} while (0)
-
 struct task_struct;
 
 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
@@ -114,6 +107,22 @@
 		trace_hardirq_enter();			\
 	} while (0)
 
+/*
+ * Exit irq context without processing softirqs:
+ */
+#define __irq_exit()					\
+	do {						\
+		trace_hardirq_exit();			\
+		account_system_vtime(current);		\
+		sub_preempt_count(HARDIRQ_OFFSET);	\
+	} while (0)
+
+/*
+ * Exit irq context and process softirqs if needed:
+ */
 extern void irq_exit(void);
 
+#define nmi_enter()		irq_enter()
+#define nmi_exit()		__irq_exit()
+
 #endif /* LINUX_HARDIRQ_H */
diff -urN oldtree/include/linux/rwsem-spinlock.h newtree/include/linux/rwsem-spinlock.h
--- oldtree/include/linux/rwsem-spinlock.h	2006-06-21 13:12:03.178128500 +0000
+++ newtree/include/linux/rwsem-spinlock.h	2006-06-21 14:56:33.383813750 +0000
@@ -32,23 +32,11 @@
 	__s32			activity;
 	spinlock_t		wait_lock;
 	struct list_head	wait_list;
-#if RWSEM_DEBUG
-	int			debug;
-#endif
 #ifdef CONFIG_DEBUG_RWSEM_ALLOC
 	struct lockdep_map dep_map;
 #endif
 };
 
-/*
- * initialisation
- */
-#if RWSEM_DEBUG
-#define __RWSEM_DEBUG_INIT      , 0
-#else
-#define __RWSEM_DEBUG_INIT	/* */
-#endif
-
 #ifdef CONFIG_DEBUG_RWSEM_ALLOC
 # define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
 #else
@@ -56,7 +44,7 @@
 #endif
 
 #define __RWSEM_INITIALIZER(name) \
-{ 0, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __RWSEM_DEBUG_INIT __RWSEM_DEP_MAP_INIT(name) }
+{ 0, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
 
 #define DECLARE_RWSEM(name) \
 	struct rw_semaphore name = __RWSEM_INITIALIZER(name)
diff -urN oldtree/include/linux/rwsem.h newtree/include/linux/rwsem.h
--- oldtree/include/linux/rwsem.h	2006-06-21 13:12:03.178128500 +0000
+++ newtree/include/linux/rwsem.h	2006-06-21 14:56:33.387814000 +0000
@@ -9,8 +9,6 @@
 
 #include <linux/linkage.h>
 
-#define RWSEM_DEBUG 0
-
 #ifdef __KERNEL__
 
 #include <linux/types.h>
@@ -31,157 +29,66 @@
  * example there are per-CPU dynamically allocated locks:
  */
 #ifdef CONFIG_DEBUG_RWSEM_ALLOC
-#define init_rwsem_key(sem, key)				\
-	__init_rwsem((sem), #sem, key)
+#define init_rwsem_key(sem, name, key)				\
+	__init_rwsem((sem), name, key)
 #else
-# define init_rwsem_key(sem, key)	init_rwsem(sem)
-#endif
-
-#ifndef rwsemtrace
-#if RWSEM_DEBUG
-extern void FASTCALL(rwsemtrace(struct rw_semaphore *sem, const char *str));
-#else
-#define rwsemtrace(SEM,FMT)
-#endif
+# define init_rwsem_key(sem, name, key)		init_rwsem(sem)
 #endif
 
 /*
  * lock for reading
  */
-static inline void down_read(struct rw_semaphore *sem)
-{
-	might_sleep();
-	rwsem_acquire_read(&sem->dep_map, 0, 0, _THIS_IP_);
-
-	rwsemtrace(sem,"Entering down_read");
-	__down_read(sem);
-	rwsemtrace(sem,"Leaving down_read");
-}
-
-/*
- * Take a lock when not the owner will release it:
- */
-static inline void down_read_non_owner(struct rw_semaphore *sem)
-{
-	might_sleep();
-
-	rwsemtrace(sem,"Entering down_read");
-	__down_read(sem);
-	rwsemtrace(sem,"Leaving down_read");
-}
+extern void down_read(struct rw_semaphore *sem);
 
 /*
  * trylock for reading -- returns 1 if successful, 0 if contention
  */
-static inline int down_read_trylock(struct rw_semaphore *sem)
-{
-	int ret;
-	rwsemtrace(sem,"Entering down_read_trylock");
-	ret = __down_read_trylock(sem);
-	if (ret == 1)
-		rwsem_acquire_read(&sem->dep_map, 0, 1, _THIS_IP_);
-	rwsemtrace(sem,"Leaving down_read_trylock");
-	return ret;
-}
+extern int down_read_trylock(struct rw_semaphore *sem);
 
 /*
  * lock for writing
  */
-static inline void down_write(struct rw_semaphore *sem)
-{
-	might_sleep();
-	rwsem_acquire(&sem->dep_map, 0, 0, _THIS_IP_);
-
-	rwsemtrace(sem,"Entering down_write");
-	__down_write(sem);
-	rwsemtrace(sem,"Leaving down_write");
-}
-
-/*
- * lock for writing
- */
-#ifdef CONFIG_DEBUG_RWSEM_ALLOC
-static inline void down_write_nested(struct rw_semaphore *sem, int subtype)
-{
-	might_sleep();
-	rwsem_acquire(&sem->dep_map, subtype, 0, _THIS_IP_);
-
-	rwsemtrace(sem,"Entering down_write_nested");
-	__down_write_nested(sem, subtype);
-	rwsemtrace(sem,"Leaving down_write_nested");
-}
-#else
-static inline void down_write_nested(struct rw_semaphore *sem, int subtype)
-{
-	down_write(sem);
-}
-#endif
+extern void down_write(struct rw_semaphore *sem);
 
 /*
  * trylock for writing -- returns 1 if successful, 0 if contention
  */
-static inline int down_write_trylock(struct rw_semaphore *sem)
-{
-	int ret;
-	rwsemtrace(sem,"Entering down_write_trylock");
-	ret = __down_write_trylock(sem);
-	if (ret == 1)
-		rwsem_acquire(&sem->dep_map, 0, 0, _THIS_IP_);
-	rwsemtrace(sem,"Leaving down_write_trylock");
-	return ret;
-}
+extern int down_write_trylock(struct rw_semaphore *sem);
 
 /*
  * release a read lock
  */
-static inline void up_read(struct rw_semaphore *sem)
-{
-	rwsem_release(&sem->dep_map, 1, _THIS_IP_);
-
-	rwsemtrace(sem,"Entering up_read");
-	__up_read(sem);
-	rwsemtrace(sem,"Leaving up_read");
-}
-
-static inline void up_read_non_nested(struct rw_semaphore *sem)
-{
-	rwsem_release(&sem->dep_map, 0, _THIS_IP_);
-	__up_read(sem);
-}
-
-/*
- * Not the owner will release it:
- */
-static inline void up_read_non_owner(struct rw_semaphore *sem)
-{
-	__up_read(sem);
-}
+extern void up_read(struct rw_semaphore *sem);
 
 /*
  * release a write lock
  */
-static inline void up_write(struct rw_semaphore *sem)
-{
-	rwsem_release(&sem->dep_map, 1, _THIS_IP_);
-
-	rwsemtrace(sem,"Entering up_write");
-	__up_write(sem);
-	rwsemtrace(sem,"Leaving up_write");
-}
+extern void up_write(struct rw_semaphore *sem);
 
 /*
  * downgrade write lock to read lock
  */
-static inline void downgrade_write(struct rw_semaphore *sem)
-{
-	/*
-	 * lockdep: a downgraded write will live on as a write
-	 * dependency.
-	 */
-	rwsemtrace(sem,"Entering downgrade_write");
-	__downgrade_write(sem);
-	rwsemtrace(sem,"Leaving downgrade_write");
-}
+extern void downgrade_write(struct rw_semaphore *sem);
+
+#ifdef CONFIG_DEBUG_RWSEM_ALLOC
+/*
+ * nested locking:
+ */
+extern void down_read_nested(struct rw_semaphore *sem, int subtype);
+extern void down_write_nested(struct rw_semaphore *sem, int subtype);
+/*
+ * Take/release a lock when not the owner will release it:
+ */
+extern void down_read_non_owner(struct rw_semaphore *sem);
+extern void up_read_non_owner(struct rw_semaphore *sem);
+extern void up_read_non_nested(struct rw_semaphore *sem);
+#else
+# define down_read_nested(sem, subtype)		down_read(sem)
+# define down_write_nested(sem, subtype)	down_write(sem)
+# define down_read_non_owner(sem)		down_read(sem)
+# define up_read_non_owner(sem)			up_read(sem)
+# define up_read_non_nested(sem)		up_read(sem)
+#endif
 
 #endif /* __KERNEL__ */
 #endif /* _LINUX_RWSEM_H */
diff -urN oldtree/kernel/Makefile newtree/kernel/Makefile
--- oldtree/kernel/Makefile	2006-06-21 13:12:03.746164000 +0000
+++ newtree/kernel/Makefile	2006-06-21 14:56:33.391814250 +0000
@@ -8,7 +8,7 @@
 	    signal.o sys.o kmod.o workqueue.o pid.o \
 	    rcupdate.o extable.o params.o posix-timers.o \
 	    kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
-	    hrtimer.o nsproxy.o stacktrace.o
+	    hrtimer.o nsproxy.o stacktrace.o rwsem.o
 
 obj-y += time/
 obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
diff -urN oldtree/kernel/printk.c newtree/kernel/printk.c
--- oldtree/kernel/printk.c	2006-06-21 13:12:03.782166250 +0000
+++ newtree/kernel/printk.c	2006-06-21 14:56:33.399814750 +0000
@@ -341,7 +341,10 @@
 static void _call_console_drivers(unsigned long start,
 				unsigned long end, int msg_log_level)
 {
-	if (msg_log_level < console_loglevel &&
+	if (
+#ifndef CONFIG_PRINTK_IGNORE_LOGLEVEL
+			msg_log_level < console_loglevel &&
+#endif
 			console_drivers && start != end) {
 		if ((start & LOG_BUF_MASK) > (end & LOG_BUF_MASK)) {
 			/* wrapped write */
diff -urN oldtree/kernel/rwsem.c newtree/kernel/rwsem.c
--- oldtree/kernel/rwsem.c	1970-01-01 00:00:00.000000000 +0000
+++ newtree/kernel/rwsem.c	2006-06-21 14:56:33.403815000 +0000
@@ -0,0 +1,155 @@
+/* kernel/rwsem.c: R/W semaphores, public implementation
+ *
+ * Written by David Howells (dhowells@redhat.com).
+ * Derived from asm-i386/semaphore.h
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/rwsem.h>
+
+#include <asm/system.h>
+#include <asm/atomic.h>
+
+/*
+ * lock for reading
+ */
+void down_read(struct rw_semaphore *sem)
+{
+	might_sleep();
+	rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
+
+	__down_read(sem);
+}
+
+EXPORT_SYMBOL(down_read);
+
+/*
+ * trylock for reading -- returns 1 if successful, 0 if contention
+ */
+int down_read_trylock(struct rw_semaphore *sem)
+{
+	int ret = __down_read_trylock(sem);
+
+	if (ret == 1)
+		rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
+	return ret;
+}
+
+EXPORT_SYMBOL(down_read_trylock);
+
+/*
+ * lock for writing
+ */
+void down_write(struct rw_semaphore *sem)
+{
+	might_sleep();
+	rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
+
+	__down_write(sem);
+}
+
+EXPORT_SYMBOL(down_write);
+
+/*
+ * trylock for writing -- returns 1 if successful, 0 if contention
+ */
+int down_write_trylock(struct rw_semaphore *sem)
+{
+	int ret = __down_write_trylock(sem);
+
+	if (ret == 1)
+		rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
+	return ret;
+}
+
+EXPORT_SYMBOL(down_write_trylock);
+
+/*
+ * release a read lock
+ */
+void up_read(struct rw_semaphore *sem)
+{
+	rwsem_release(&sem->dep_map, 1, _RET_IP_);
+
+	__up_read(sem);
+}
+
+EXPORT_SYMBOL(up_read);
+
+/*
+ * release a write lock
+ */
+void up_write(struct rw_semaphore *sem)
+{
+	rwsem_release(&sem->dep_map, 1, _RET_IP_);
+
+	__up_write(sem);
+}
+
+EXPORT_SYMBOL(up_write);
+
+/*
+ * downgrade write lock to read lock
+ */
+void downgrade_write(struct rw_semaphore *sem)
+{
+	/*
+	 * lockdep: a downgraded write will live on as a write
+	 * dependency.
+	 */
+	__downgrade_write(sem);
+}
+
+EXPORT_SYMBOL(downgrade_write);
+
+#ifdef CONFIG_DEBUG_RWSEM_ALLOC
+
+void down_read_nested(struct rw_semaphore *sem, int subtype)
+{
+	might_sleep();
+	rwsem_acquire_read(&sem->dep_map, subtype, 0, _RET_IP_);
+
+	__down_read(sem);
+}
+
+EXPORT_SYMBOL(down_read_nested);
+
+void down_read_non_owner(struct rw_semaphore *sem)
+{
+	might_sleep();
+
+	__down_read(sem);
+}
+
+EXPORT_SYMBOL(down_read_non_owner);
+
+void down_write_nested(struct rw_semaphore *sem, int subtype)
+{
+	might_sleep();
+	rwsem_acquire(&sem->dep_map, subtype, 0, _RET_IP_);
+
+	__down_write_nested(sem, subtype);
+}
+
+EXPORT_SYMBOL(down_write_nested);
+
+void up_read_non_nested(struct rw_semaphore *sem)
+{
+	rwsem_release(&sem->dep_map, 0, _RET_IP_);
+	__up_read(sem);
+}
+
+EXPORT_SYMBOL(up_read_non_nested);
+
+void up_read_non_owner(struct rw_semaphore *sem)
+{
+	__up_read(sem);
+}
+
+EXPORT_SYMBOL(up_read_non_owner);
+
+#endif
+
+
diff -urN oldtree/kernel/sysctl.c newtree/kernel/sysctl.c
--- oldtree/kernel/sysctl.c	2006-06-21 13:12:03.834169500 +0000
+++ newtree/kernel/sysctl.c	2006-06-21 14:56:33.407815250 +0000
@@ -556,6 +556,7 @@
 		.proc_handler	= &proc_do_ipc_string,
 	},
 #endif
+#if 0
 #ifdef CONFIG_MAGIC_SYSRQ
 	{
 		.ctl_name	= KERN_SYSRQ,
@@ -566,6 +567,7 @@
 		.proc_handler	= &proc_dointvec,
 	},
 #endif
+#endif
 	{
 		.ctl_name	= KERN_CADPID,
 		.procname	= "cad_pid",
diff -urN oldtree/lib/Kconfig.debug newtree/lib/Kconfig.debug
--- oldtree/lib/Kconfig.debug	2006-06-21 13:12:03.886172750 +0000
+++ newtree/lib/Kconfig.debug	2006-06-21 14:56:33.415815750 +0000
@@ -8,6 +8,22 @@
 	  operations.  This is useful for identifying long delays
 	  in kernel startup.
 
+config PRINTK_IGNORE_LOGLEVEL
+	bool "Ignore loglevel on printks"
+	default n
+	help
+	  Selecting this option causes all printk messages to go
+	  to the console.  This allows you to serial-log kernel
+	  messages, no matter what userspace does. (e.g. some
+	  distributions disable kernel log messages during
+	  certain phases of system startup.)
+
+	  NOTE: this option also makes printk non-preemptible,
+	  which might improve the output of debugging info or
+	  crash info, but it might also cause latencies if your
+	  kernel is printk-ing alot.
+
+	  Normally you dont need or want this option.
 
 config MAGIC_SYSRQ
 	bool "Magic SysRq key"
diff -urN oldtree/lib/locking-selftest.c newtree/lib/locking-selftest.c
--- oldtree/lib/locking-selftest.c	2006-06-21 13:12:03.902173750 +0000
+++ newtree/lib/locking-selftest.c	2006-06-21 14:56:33.419816000 +0000
@@ -144,11 +144,11 @@
 
 #define HARDIRQ_ENTER()				\
 	local_irq_disable();			\
-	nmi_enter();				\
+	irq_enter();				\
 	WARN_ON(!in_irq());
 
 #define HARDIRQ_EXIT()				\
-	nmi_exit();				\
+	__irq_exit();				\
 	local_irq_enable();
 
 #define SOFTIRQ_DISABLE		local_bh_disable
diff -urN oldtree/lib/rwsem-spinlock.c newtree/lib/rwsem-spinlock.c
--- oldtree/lib/rwsem-spinlock.c	2006-06-21 13:12:03.954177000 +0000
+++ newtree/lib/rwsem-spinlock.c	2006-06-21 14:56:33.423816250 +0000
@@ -17,16 +17,6 @@
 #define RWSEM_WAITING_FOR_WRITE	0x00000002
 };
 
-#if RWSEM_DEBUG
-void rwsemtrace(struct rw_semaphore *sem, const char *str)
-{
-	if (sem->debug)
-		printk("[%d] %s({%d,%d})\n",
-		       current->pid, str, sem->activity,
-		       list_empty(&sem->wait_list) ? 0 : 1);
-}
-#endif
-
 /*
  * initialise the semaphore
  */
@@ -43,9 +33,6 @@
 	sem->activity = 0;
 	spin_lock_init(&sem->wait_lock);
 	INIT_LIST_HEAD(&sem->wait_list);
-#if RWSEM_DEBUG
-	sem->debug = 0;
-#endif
 }
 
 /*
@@ -64,8 +51,6 @@
 	struct task_struct *tsk;
 	int woken;
 
-	rwsemtrace(sem, "Entering __rwsem_do_wake");
-
 	waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
 
 	if (!wakewrite) {
@@ -112,7 +97,6 @@
 	sem->activity += woken;
 
  out:
-	rwsemtrace(sem, "Leaving __rwsem_do_wake");
 	return sem;
 }
 
@@ -146,8 +130,6 @@
 	struct rwsem_waiter waiter;
 	struct task_struct *tsk;
 
-	rwsemtrace(sem, "Entering __down_read");
-
 	spin_lock_irq(&sem->wait_lock);
 
 	if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
@@ -179,9 +161,8 @@
 	}
 
 	tsk->state = TASK_RUNNING;
-
  out:
-	rwsemtrace(sem, "Leaving __down_read");
+	;
 }
 
 /*
@@ -192,7 +173,6 @@
 	unsigned long flags;
 	int ret = 0;
 
-	rwsemtrace(sem, "Entering __down_read_trylock");
 
 	spin_lock_irqsave(&sem->wait_lock, flags);
 
@@ -204,7 +184,6 @@
 
 	spin_unlock_irqrestore(&sem->wait_lock, flags);
 
-	rwsemtrace(sem, "Leaving __down_read_trylock");
 	return ret;
 }
 
@@ -217,8 +196,6 @@
 	struct rwsem_waiter waiter;
 	struct task_struct *tsk;
 
-	rwsemtrace(sem, "Entering __down_write");
-
 	spin_lock_irq(&sem->wait_lock);
 
 	if (sem->activity == 0 && list_empty(&sem->wait_list)) {
@@ -250,9 +227,8 @@
 	}
 
 	tsk->state = TASK_RUNNING;
-
  out:
-	rwsemtrace(sem, "Leaving __down_write");
+	;
 }
 
 void fastcall __sched __down_write(struct rw_semaphore *sem)
@@ -268,8 +244,6 @@
 	unsigned long flags;
 	int ret = 0;
 
-	rwsemtrace(sem, "Entering __down_write_trylock");
-
 	spin_lock_irqsave(&sem->wait_lock, flags);
 
 	if (sem->activity == 0 && list_empty(&sem->wait_list)) {
@@ -280,7 +254,6 @@
 
 	spin_unlock_irqrestore(&sem->wait_lock, flags);
 
-	rwsemtrace(sem, "Leaving __down_write_trylock");
 	return ret;
 }
 
@@ -291,16 +264,12 @@
 {
 	unsigned long flags;
 
-	rwsemtrace(sem, "Entering __up_read");
-
 	spin_lock_irqsave(&sem->wait_lock, flags);
 
 	if (--sem->activity == 0 && !list_empty(&sem->wait_list))
 		sem = __rwsem_wake_one_writer(sem);
 
 	spin_unlock_irqrestore(&sem->wait_lock, flags);
-
-	rwsemtrace(sem, "Leaving __up_read");
 }
 
 /*
@@ -310,8 +279,6 @@
 {
 	unsigned long flags;
 
-	rwsemtrace(sem, "Entering __up_write");
-
 	spin_lock_irqsave(&sem->wait_lock, flags);
 
 	sem->activity = 0;
@@ -319,8 +286,6 @@
 		sem = __rwsem_do_wake(sem, 1);
 
 	spin_unlock_irqrestore(&sem->wait_lock, flags);
-
-	rwsemtrace(sem, "Leaving __up_write");
 }
 
 /*
@@ -331,8 +296,6 @@
 {
 	unsigned long flags;
 
-	rwsemtrace(sem, "Entering __downgrade_write");
-
 	spin_lock_irqsave(&sem->wait_lock, flags);
 
 	sem->activity = 1;
@@ -340,8 +303,6 @@
 		sem = __rwsem_do_wake(sem, 0);
 
 	spin_unlock_irqrestore(&sem->wait_lock, flags);
-
-	rwsemtrace(sem, "Leaving __downgrade_write");
 }
 
 EXPORT_SYMBOL(__init_rwsem);
@@ -353,6 +314,3 @@
 EXPORT_SYMBOL(__up_read);
 EXPORT_SYMBOL(__up_write);
 EXPORT_SYMBOL(__downgrade_write);
-#if RWSEM_DEBUG
-EXPORT_SYMBOL(rwsemtrace);
-#endif
diff -urN oldtree/lib/rwsem.c newtree/lib/rwsem.c
--- oldtree/lib/rwsem.c	2006-06-21 13:12:03.950176750 +0000
+++ newtree/lib/rwsem.c	2006-06-21 14:56:33.431816750 +0000
@@ -24,9 +24,6 @@
 	sem->count = RWSEM_UNLOCKED_VALUE;
 	spin_lock_init(&sem->wait_lock);
 	INIT_LIST_HEAD(&sem->wait_list);
-#if RWSEM_DEBUG
-	sem->debug = 0;
-#endif
 }
 
 EXPORT_SYMBOL(__init_rwsem);
@@ -39,17 +36,6 @@
 #define RWSEM_WAITING_FOR_WRITE	0x00000002
 };
 
-#if RWSEM_DEBUG
-#undef rwsemtrace
-void rwsemtrace(struct rw_semaphore *sem, const char *str)
-{
-	printk("sem=%p\n", sem);
-	printk("(sem)=%08lx\n", sem->count);
-	if (sem->debug)
-		printk("[%d] %s({%08lx})\n", current->pid, str, sem->count);
-}
-#endif
-
 /*
  * handle the lock release when processes blocked on it that can now run
  * - if we come here from up_xxxx(), then:
@@ -68,8 +54,6 @@
 	struct list_head *next;
 	signed long oldcount, woken, loop;
 
-	rwsemtrace(sem, "Entering __rwsem_do_wake");
-
 	if (downgrading)
 		goto dont_wake_writers;
 
@@ -150,7 +134,6 @@
 	next->prev = &sem->wait_list;
 
  out:
-	rwsemtrace(sem, "Leaving __rwsem_do_wake");
 	return sem;
 
 	/* undo the change to count, but check for a transition 1->0 */
@@ -209,13 +192,9 @@
 {
 	struct rwsem_waiter waiter;
 
-	rwsemtrace(sem, "Entering rwsem_down_read_failed");
-
 	waiter.flags = RWSEM_WAITING_FOR_READ;
 	rwsem_down_failed_common(sem, &waiter,
 				RWSEM_WAITING_BIAS - RWSEM_ACTIVE_BIAS);
-
-	rwsemtrace(sem, "Leaving rwsem_down_read_failed");
 	return sem;
 }
 
@@ -227,12 +206,9 @@
 {
 	struct rwsem_waiter waiter;
 
-	rwsemtrace(sem, "Entering rwsem_down_write_failed");
-
 	waiter.flags = RWSEM_WAITING_FOR_WRITE;
 	rwsem_down_failed_common(sem, &waiter, -RWSEM_ACTIVE_BIAS);
 
-	rwsemtrace(sem, "Leaving rwsem_down_write_failed");
 	return sem;
 }
 
@@ -244,8 +220,6 @@
 {
 	unsigned long flags;
 
-	rwsemtrace(sem, "Entering rwsem_wake");
-
 	spin_lock_irqsave(&sem->wait_lock, flags);
 
 	/* do nothing if list empty */
@@ -254,8 +228,6 @@
 
 	spin_unlock_irqrestore(&sem->wait_lock, flags);
 
-	rwsemtrace(sem, "Leaving rwsem_wake");
-
 	return sem;
 }
 
@@ -268,8 +240,6 @@
 {
 	unsigned long flags;
 
-	rwsemtrace(sem, "Entering rwsem_downgrade_wake");
-
 	spin_lock_irqsave(&sem->wait_lock, flags);
 
 	/* do nothing if list empty */
@@ -278,7 +248,6 @@
 
 	spin_unlock_irqrestore(&sem->wait_lock, flags);
 
-	rwsemtrace(sem, "Leaving rwsem_downgrade_wake");
 	return sem;
 }
 
@@ -286,6 +255,3 @@
 EXPORT_SYMBOL(rwsem_down_write_failed);
 EXPORT_SYMBOL(rwsem_wake);
 EXPORT_SYMBOL(rwsem_downgrade_wake);
-#if RWSEM_DEBUG
-EXPORT_SYMBOL(rwsemtrace);
-#endif
diff -urN oldtree/lib/spinlock_debug.c newtree/lib/spinlock_debug.c
--- oldtree/lib/spinlock_debug.c	2006-06-21 13:12:03.954177000 +0000
+++ newtree/lib/spinlock_debug.c	2006-06-21 14:56:33.435817000 +0000
@@ -104,7 +104,7 @@
 	u64 i;
 
 	for (;;) {
-		for (i = 0; i < loops_per_jiffy * HZ; i++) {
+		for (i = 0; i < (u64)loops_per_jiffy * HZ; i++) {
 			if (__raw_spin_trylock(&lock->raw_lock))
 				return;
 			__delay(1);
@@ -112,10 +112,10 @@
 		/* lockup suspected: */
 		if (print_once) {
 			print_once = 0;
-			printk(KERN_EMERG "BUG: spinlock lockup on CPU#%d, "
-					"%s/%d, %p\n",
+			printk(KERN_EMERG "BUG: possible spinlock lockup on CPU#%d, "
+					"%s/%d, %p [%Ld/%ld]\n",
 				raw_smp_processor_id(), current->comm,
-				current->pid, lock);
+				current->pid, lock, i, loops_per_jiffy);
 			dump_stack();
 		}
 	}
@@ -169,7 +169,7 @@
 	u64 i;
 
 	for (;;) {
-		for (i = 0; i < loops_per_jiffy * HZ; i++) {
+		for (i = 0; i < (u64)loops_per_jiffy * HZ; i++) {
 			if (__raw_read_trylock(&lock->raw_lock))
 				return;
 			__delay(1);
@@ -177,10 +177,10 @@
 		/* lockup suspected: */
 		if (print_once) {
 			print_once = 0;
-			printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, "
-					"%s/%d, %p\n",
+			printk(KERN_EMERG "BUG: possible read-lock lockup on CPU#%d, "
+					"%s/%d, %p [%Ld/%ld]\n",
 				raw_smp_processor_id(), current->comm,
-				current->pid, lock);
+				current->pid, lock, i, loops_per_jiffy);
 			dump_stack();
 		}
 	}
@@ -242,7 +242,7 @@
 	u64 i;
 
 	for (;;) {
-		for (i = 0; i < loops_per_jiffy * HZ; i++) {
+		for (i = 0; i < (u64)loops_per_jiffy * HZ; i++) {
 			if (__raw_write_trylock(&lock->raw_lock))
 				return;
 			__delay(1);
@@ -250,10 +250,10 @@
 		/* lockup suspected: */
 		if (print_once) {
 			print_once = 0;
-			printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, "
-					"%s/%d, %p\n",
+			printk(KERN_EMERG "BUG: possible write-lock lockup on CPU#%d, "
+					"%s/%d, %p [%Ld/%ld]\n",
 				raw_smp_processor_id(), current->comm,
-				current->pid, lock);
+				current->pid, lock, i, loops_per_jiffy);
 			dump_stack();
 		}
 	}
diff -urN oldtree/net/8021q/vlan.c newtree/net/8021q/vlan.c
--- oldtree/net/8021q/vlan.c	2006-06-18 01:49:35.000000000 +0000
+++ newtree/net/8021q/vlan.c	2006-06-21 14:56:33.439817250 +0000
@@ -463,6 +463,8 @@
 	if (new_dev == NULL)
 		goto out_unlock;
 
+	spin_lock_init(&dev->xmit_lock);
+
 #ifdef VLAN_DEBUG
 	printk(VLAN_DBG "Allocated new name -:%s:-\n", new_dev->name);
 #endif
diff -urN oldtree/net/core/sock.c newtree/net/core/sock.c
--- oldtree/net/core/sock.c	2006-06-21 13:12:04.170190500 +0000
+++ newtree/net/core/sock.c	2006-06-21 14:56:33.443817500 +0000
@@ -846,7 +846,7 @@
 		/* SANITY */
 		sk_node_init(&newsk->sk_node);
 		sock_lock_init(newsk);
-		bh_lock_sock(newsk);
+		bh_lock_sock_nested(newsk);
 
 		atomic_set(&newsk->sk_rmem_alloc, 0);
 		atomic_set(&newsk->sk_wmem_alloc, 0);
