2 * linux/include/asm-arm/proc-armv/system.h
4 * Copyright (C) 1996 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #ifndef __ASM_PROC_SYSTEM_H
11 #define __ASM_PROC_SYSTEM_H
13 #include <linux/config.h>
16 * Save the current interrupt enable state & disable IRQs
18 #define local_irq_save(x) \
21 __asm__ __volatile__( \
22 "mrs %0, cpsr @ local_irq_save\n" \
23 " orr %1, %0, #128\n" \
25 : "=r" (x), "=r" (temp) \
33 #define local_irq_enable() \
36 __asm__ __volatile__( \
37 "mrs %0, cpsr @ local_irq_enable\n" \
38 " bic %0, %0, #128\n" \
48 #define local_irq_disable() \
51 __asm__ __volatile__( \
52 "mrs %0, cpsr @ local_irq_disable\n" \
53 " orr %0, %0, #128\n" \
66 __asm__ __volatile__( \
67 "mrs %0, cpsr @ stf\n" \
68 " bic %0, %0, #64\n" \
81 __asm__ __volatile__( \
82 "mrs %0, cpsr @ clf\n" \
83 " orr %0, %0, #64\n" \
91 * Save the current interrupt enable state.
93 #define local_save_flags(x) \
95 __asm__ __volatile__( \
96 "mrs %0, cpsr @ local_save_flags\n" \
103 * restore saved IRQ & FIQ state
105 #define local_irq_restore(x) \
106 __asm__ __volatile__( \
107 "msr cpsr_c, %0 @ local_irq_restore\n" \
112 #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
114 * On the StrongARM, "swp" is terminally broken since it bypasses the
115 * cache totally. This means that the cache becomes inconsistent, and,
116 * since we use normal loads/stores as well, this is really bad.
117 * Typically, this causes oopsen in filp_close, but could have other,
118 * more disasterous effects. There are two work-arounds:
119 * 1. Disable interrupts and emulate the atomic swap
120 * 2. Clean the cache, perform atomic swap, flush the cache
122 * We choose (1) since its the "easiest" to achieve here and is not
123 * dependent on the processor type.
128 static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
130 extern void __bad_xchg(volatile void *, int);
139 local_irq_save(flags);
140 ret = *(volatile unsigned char *)ptr;
141 *(volatile unsigned char *)ptr = x;
142 local_irq_restore(flags);
146 local_irq_save(flags);
147 ret = *(volatile unsigned long *)ptr;
148 *(volatile unsigned long *)ptr = x;
149 local_irq_restore(flags);
152 case 1: __asm__ __volatile__ ("swpb %0, %1, [%2]"
157 case 4: __asm__ __volatile__ ("swp %0, %1, [%2]"
163 default: __bad_xchg(ptr, size), ret = 0;