patch-2.4.8 linux/include/asm-ia64/spinlock.h
Next file: linux/include/asm-ia64/string.h
Previous file: linux/include/asm-ia64/softirq.h
Back to the patch index
Back to the overall index
- Lines: 40
- Date:
Tue Jul 31 10:30:09 2001
- Orig file:
v2.4.7/linux/include/asm-ia64/spinlock.h
- Orig date:
Thu Jan 4 12:50:18 2001
diff -u --recursive --new-file v2.4.7/linux/include/asm-ia64/spinlock.h linux/include/asm-ia64/spinlock.h
@@ -19,12 +19,12 @@
#ifdef NEW_LOCK
-typedef struct {
+typedef struct {
volatile unsigned int lock;
} spinlock_t;
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
-#define spin_lock_init(x) ((x)->lock = 0)
+#define spin_lock_init(x) ((x)->lock = 0)
/*
* Streamlined test_and_set_bit(0, (x)). We use test-and-test-and-set
@@ -62,12 +62,12 @@
})
#define spin_is_locked(x) ((x)->lock != 0)
-#define spin_unlock(x) do {((spinlock_t *) x)->lock = 0;} while (0)
-#define spin_unlock_wait(x) do {} while ((x)->lock)
+#define spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0;} while (0)
+#define spin_unlock_wait(x) do { barrier(); } while ((x)->lock)
#else /* !NEW_LOCK */
-typedef struct {
+typedef struct {
volatile unsigned int lock;
} spinlock_t;
@@ -96,7 +96,7 @@
:: "r"(&(x)->lock) : "r2", "r29", "memory")
#define spin_is_locked(x) ((x)->lock != 0)
-#define spin_unlock(x) do {((spinlock_t *) x)->lock = 0; barrier(); } while (0)
+#define spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0; } while (0)
#define spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
#define spin_unlock_wait(x) do { barrier(); } while ((x)->lock)
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)