# This is a BitKeeper generated patch for the following project:
# Project Name: Linux kernel tree
# This patch format is intended for GNU patch command version 2.5 or higher.
# This patch includes the following deltas:
#	           ChangeSet	1.502   -> 1.503  
#	include/linux/smp_lock.h	1.4     -> 1.5    
#
# The following is the BitKeeper ChangeSet Log
# --------------------------------------------
# 02/08/21	mulix@alhambra.merseine.nu	1.503
# cleanups for smp_lock.h
# --------------------------------------------
#
diff -Nru a/include/linux/smp_lock.h b/include/linux/smp_lock.h
--- a/include/linux/smp_lock.h	Wed Aug 21 12:44:06 2002
+++ b/include/linux/smp_lock.h	Wed Aug 21 12:44:06 2002
@@ -9,9 +9,10 @@
 #define unlock_kernel()				do { } while(0)
 #define release_kernel_lock(task)		do { } while(0)
 #define reacquire_kernel_lock(task)		do { } while(0)
-#define kernel_locked() 1
+#define kernel_locked()                         (1)
+#define kernel_locked_for_task(task)            (1)
 
-#else
+#else /* defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) */ 
 
 #include <linux/interrupt.h>
 #include <linux/spinlock.h>
@@ -20,29 +21,36 @@
 
 extern spinlock_t kernel_flag;
 
-#define kernel_locked()		(current->lock_depth >= 0)
+static inline int kernel_locked_for_task(struct task_struct* task)
+{
+	return (task->lock_depth >= 0); 
+}
+
+static inline int kernel_locked(void)
+{
+	return kernel_locked_for_task(current); 
+}
 
-#define get_kernel_lock()	spin_lock(&kernel_flag)
-#define put_kernel_lock()	spin_unlock(&kernel_flag)
+#define get_kernel_lock()	do { spin_lock(&kernel_flag); } while (0)
+#define put_kernel_lock()	do { spin_unlock(&kernel_flag); } while (0)
 
 /*
  * Release global kernel lock and global interrupt lock
  */
-#define release_kernel_lock(task)		\
-do {						\
-	if (unlikely(task->lock_depth >= 0))	\
-		put_kernel_lock();		\
-} while (0)
+static inline void release_kernel_lock(struct task_struct* task)
+{
+	if (unlikely(kernel_locked_for_task(task)))
+		put_kernel_lock();
+}
 
 /*
  * Re-acquire the kernel lock
  */
-#define reacquire_kernel_lock(task)		\
-do {						\
-	if (unlikely(task->lock_depth >= 0))	\
-		get_kernel_lock();		\
-} while (0)
-
+static inline void reacquire_kernel_lock(struct task_struct* task)
+{
+	if (unlikely(kernel_locked_for_task(task)))
+		get_kernel_lock(); 
+}
 
 /*
  * Getting the big kernel lock.
@@ -51,7 +59,7 @@
  * so we only need to worry about other
  * CPU's.
  */
-static __inline__ void lock_kernel(void)
+static inline void lock_kernel(void)
 {
 	int depth = current->lock_depth+1;
 	if (!depth)
@@ -59,14 +67,14 @@
 	current->lock_depth = depth;
 }
 
-static __inline__ void unlock_kernel(void)
+static inline void unlock_kernel(void)
 {
-	if (current->lock_depth < 0)
+	if (!kernel_locked())
 		BUG();
 	if (--current->lock_depth < 0)
 		put_kernel_lock();
 }
 
-#endif /* CONFIG_SMP */
+#endif /* CONFIG_SMP || CONFIG_PREEMPT*/
 
-#endif
+#endif /* __LINUX_SMPLOCK_H */ 
