diff -urN linux.orig/include/asm-i386/spinlock.h linux/include/asm-i386/spinlock.h
--- linux.orig/include/asm-i386/spinlock.h	Wed May  9 13:12:43 2001
+++ linux/include/asm-i386/spinlock.h	Wed May  9 14:44:21 2001
@@ -5,40 +5,21 @@
 #include <asm/rwlock.h>
 #include <asm/page.h>
 #include <linux/config.h>
+#include <linux/spinlock_debug.h>
 
 extern int printk(const char * fmt, ...)
 	__attribute__ ((format (printf, 1, 2)));
 
-/* It seems that people are forgetting to
- * initialize their spinlocks properly, tsk tsk.
- * Remember to turn this off in 2.4. -ben
- */
-#if defined(CONFIG_DEBUG_SPINLOCK)
-#define SPINLOCK_DEBUG	1
-#else
-#define SPINLOCK_DEBUG	0
-#endif
-
 /*
  * Your basic SMP spinlocks, allowing only a single CPU anywhere
  */
 
 typedef struct {
 	volatile unsigned int lock;
-#if SPINLOCK_DEBUG
-	unsigned magic;
-#endif
+	DECLARE_SPINLOCK_DEBUG
 } spinlock_t;
 
-#define SPINLOCK_MAGIC	0xdead4ead
-
-#if SPINLOCK_DEBUG
-#define SPINLOCK_MAGIC_INIT	, SPINLOCK_MAGIC
-#else
-#define SPINLOCK_MAGIC_INIT	/* */
-#endif
-
-#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
+#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPIN_LOCK_DEBUG_INIT }
 
 #define spin_lock_init(x)	do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
 
@@ -73,39 +54,32 @@
 static inline int spin_trylock(spinlock_t *lock)
 {
 	char oldval;
+	debug_spin_lock_pre(lock);
 	__asm__ __volatile__(
 		"xchgb %b0,%1"
 		:"=q" (oldval), "=m" (lock->lock)
 		:"0" (0) : "memory");
+	if (oldval > 0)
+		debug_spin_lock_post(lock);
 	return oldval > 0;
 }
 
 static inline void spin_lock(spinlock_t *lock)
 {
-#if SPINLOCK_DEBUG
-	__label__ here;
-here:
-	if (lock->magic != SPINLOCK_MAGIC) {
-printk("eip: %p\n", &&here);
-		BUG();
-	}
-#endif
+	debug_spin_lock_pre(lock);
 	__asm__ __volatile__(
 		spin_lock_string
 		:"=m" (lock->lock) : : "memory");
+	debug_spin_lock_post(lock);
 }
 
 static inline void spin_unlock(spinlock_t *lock)
 {
-#if SPINLOCK_DEBUG
-	if (lock->magic != SPINLOCK_MAGIC)
-		BUG();
-	if (!spin_is_locked(lock))
-		BUG();
-#endif
+	debug_spin_unlock_pre(lock);
 	__asm__ __volatile__(
 		spin_unlock_string
 		:"=m" (lock->lock) : : "memory");
+	debug_spin_unlock_post(lock);
 }
 
 /*
@@ -120,20 +94,10 @@
  */
 typedef struct {
 	volatile unsigned int lock;
-#if SPINLOCK_DEBUG
-	unsigned magic;
-#endif
+	DECLARE_RW_LOCK_DEBUG
 } rwlock_t;
 
-#define RWLOCK_MAGIC	0xdeaf1eed
-
-#if SPINLOCK_DEBUG
-#define RWLOCK_MAGIC_INIT	, RWLOCK_MAGIC
-#else
-#define RWLOCK_MAGIC_INIT	/* */
-#endif
-
-#define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
+#define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RW_LOCK_DEBUG_INIT }
 
 #define rwlock_init(x)	do { *(x) = RW_LOCK_UNLOCKED; } while(0)
 
@@ -150,20 +114,16 @@
 
 static inline void read_lock(rwlock_t *rw)
 {
-#if SPINLOCK_DEBUG
-	if (rw->magic != RWLOCK_MAGIC)
-		BUG();
-#endif
+	debug_read_lock_pre(rw);
 	__build_read_lock(rw, "__read_lock_failed");
+	debug_read_lock_post(rw);
 }
 
 static inline void write_lock(rwlock_t *rw)
 {
-#if SPINLOCK_DEBUG
-	if (rw->magic != RWLOCK_MAGIC)
-		BUG();
-#endif
+	debug_read_lock_pre(rw);
 	__build_write_lock(rw, "__write_lock_failed");
+	debug_read_lock_post(rw);
 }
 
 #define read_unlock(rw)		asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
@@ -172,8 +132,11 @@
 static inline int write_trylock(rwlock_t *lock)
 {
 	atomic_t *count = (atomic_t *)lock;
-	if (atomic_sub_and_test(RW_LOCK_BIAS, count))
+	debug_write_lock_pre(lock);
+	if (atomic_sub_and_test(RW_LOCK_BIAS, count)) {
+		debug_write_lock_post(lock);
 		return 1;
+	}
 	atomic_add(RW_LOCK_BIAS, count);
 	return 0;
 }
diff -urN linux.orig/include/linux/spinlock_debug.h linux/include/linux/spinlock_debug.h
--- linux.orig/include/linux/spinlock_debug.h	Wed Dec 31 19:00:00 1969
+++ linux/include/linux/spinlock_debug.h	Wed May  9 14:44:17 2001
@@ -0,0 +1,45 @@
+#ifndef __LINUX_SPINLOCK_DEBUG_H
+#define __LINUX_SPINLOCK_DEBUG_H
+/* linux/spinlock_debug.h
+ *	Copyright 2001 Red Hat.
+ *	written by Benjamin LaHaise <bcrl@redhat.com>
+ *
+ *	Second generation debugging code for spinlocks.  Provides the
+ *	following tests at runtime:
+ *		1. halts when schedule() is called holding a spinlock
+ *		2. basic deadlock detection
+ */
+
+#if !defined(CONFIG_DEBUG_SPINLOCK)
+/*--- dummy non-debug definitions for spinlock debugging----*/
+#define DECLARE_SPIN_LOCK_DEBUG		/**/
+#define SPIN_LOCK_DEBUG_INIT		/**/
+#define debug_spin_lock_pre(lock)	do{}while(0)
+#define debug_spin_lock_post(lock)	do{}while(0)
+#define debug_spin_unlock_pre(lock)	do{}while(0)
+#define debug_spin_unlock_post(lock)	do{}while(0)
+
+#define DECLARE_RW_LOCK_DEBUG		/**/
+#define RW_LOCK_DEBUG_INIT		/**/
+#define debug_read_lock_pre(lock)	do{}while(0)
+#define debug_read_lock_post(lock)	do{}while(0)
+#define debug_write_lock_pre(lock)	do{}while(0)
+#define debug_write_lock_post(lock)	do{}while(0)
+
+#else
+/*--- definitions for spinlock debugging----*/
+#define SPINLOCK_MAGIC  0xdead4ead
+
+#define RWLOCK_MAGIC    0xdeaf1eed
+
+#define DECLARE_RW_LOCK_DEBUG		unsigned long magic;
+#define RW_LOCK_DEBUG_INIT		, RWLOCK_MAGIC
+#define debug_read_lock_pre(lock)	check_magic((lock)->magic, RWLOCK_MAGIC)
+#define debug_write_lock_pre(lock)	check_magic((lock)->magic, RWLOCK_MAGIC)
+
+#define debug_read_lock_post(lock)	debug_enter_spinlock()
+#define debug_write_lock_post(lock)	debug_enter_spinlock()
+
+#endif
+
+#endif