patch-2.4.22 linux-2.4.22/include/asm-arm/proc-armv/system.h
Next file: linux-2.4.22/include/asm-arm/proc-armv/uncompress.h
Previous file: linux-2.4.22/include/asm-arm/proc-armv/processor.h
Back to the patch index
Back to the overall index
- Lines: 146
- Date:
2003-08-25 04:44:43.000000000 -0700
- Orig file:
linux-2.4.21/include/asm-arm/proc-armv/system.h
- Orig date:
2003-06-13 07:51:38.000000000 -0700
diff -urN linux-2.4.21/include/asm-arm/proc-armv/system.h linux-2.4.22/include/asm-arm/proc-armv/system.h
@@ -36,24 +36,20 @@
extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
extern unsigned long cr_alignment; /* defined in entry-armv.S */
-#ifdef __ARM_ARCH_4__
+#if __LINUX_ARM_ARCH__ >= 4
#define vectors_base() ((cr_alignment & CR_V) ? 0xffff0000 : 0)
#else
#define vectors_base() (0)
#endif
/*
- * A couple of speedups for the ARM
- */
-
-/*
* Save the current interrupt enable state & disable IRQs
*/
-#define __save_flags_cli(x) \
+#define local_irq_save(x) \
({ \
unsigned long temp; \
__asm__ __volatile__( \
- "mrs %0, cpsr @ save_flags_cli\n" \
+ "mrs %0, cpsr @ local_irq_save\n" \
" orr %1, %0, #128\n" \
" msr cpsr_c, %1" \
: "=r" (x), "=r" (temp) \
@@ -61,14 +57,26 @@
: "memory"); \
})
+#define local_irq_set(x) \
+ ({ \
+ unsigned long temp; \
+ __asm__ __volatile__( \
+ "mrs %0, cpsr @ local_irq_set\n" \
+" bic %1, %0, #128\n" \
+" msr cpsr_c, %1" \
+ : "=r" (x), "=r" (temp) \
+ : \
+ : "memory"); \
+ })
+
/*
* Enable IRQs
*/
-#define __sti() \
+#define local_irq_enable() \
({ \
unsigned long temp; \
__asm__ __volatile__( \
- "mrs %0, cpsr @ sti\n" \
+ "mrs %0, cpsr @ local_irq_enable\n" \
" bic %0, %0, #128\n" \
" msr cpsr_c, %0" \
: "=r" (temp) \
@@ -79,11 +87,11 @@
/*
* Disable IRQs
*/
-#define __cli() \
+#define local_irq_disable() \
({ \
unsigned long temp; \
__asm__ __volatile__( \
- "mrs %0, cpsr @ cli\n" \
+ "mrs %0, cpsr @ local_irq_disable\n" \
" orr %0, %0, #128\n" \
" msr cpsr_c, %0" \
: "=r" (temp) \
@@ -122,27 +130,27 @@
})
/*
- * save current IRQ & FIQ state
+ * Save the current interrupt enable state.
*/
-#define __save_flags(x) \
+#define local_save_flags(x) \
+ ({ \
__asm__ __volatile__( \
- "mrs %0, cpsr @ save_flags\n" \
+ "mrs %0, cpsr @ local_save_flags\n" \
: "=r" (x) \
: \
- : "memory")
+ : "memory"); \
+ })
/*
* restore saved IRQ & FIQ state
*/
-#define __restore_flags(x) \
+#define local_irq_restore(x) \
__asm__ __volatile__( \
- "msr cpsr_c, %0 @ restore_flags\n" \
+ "msr cpsr_c, %0 @ local_irq_restore\n" \
: \
: "r" (x) \
: "memory")
-#define __save_and_sti(x) ({__save_flags(x);__sti();})
-
#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
/*
* On the StrongARM, "swp" is terminally broken since it bypasses the
@@ -170,31 +178,31 @@
switch (size) {
#ifdef swp_is_buggy
case 1:
- __save_flags_cli(flags);
+ local_irq_save(flags);
ret = *(volatile unsigned char *)ptr;
*(volatile unsigned char *)ptr = x;
- __restore_flags(flags);
+ local_irq_restore(flags);
break;
case 4:
- __save_flags_cli(flags);
+ local_irq_save(flags);
ret = *(volatile unsigned long *)ptr;
*(volatile unsigned long *)ptr = x;
- __restore_flags(flags);
+ local_irq_restore(flags);
break;
#else
case 1: __asm__ __volatile__ ("swpb %0, %1, [%2]"
- : "=r" (ret)
+ : "=&r" (ret)
: "r" (x), "r" (ptr)
: "memory");
break;
case 4: __asm__ __volatile__ ("swp %0, %1, [%2]"
- : "=r" (ret)
+ : "=&r" (ret)
: "r" (x), "r" (ptr)
: "memory");
break;
#endif
- default: __bad_xchg(ptr, size);
+ default: __bad_xchg(ptr, size), ret = 0;
}
return ret;
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)