2 * linux/include/asm-arm/proc-armv/system.h
4 * Copyright (C) 1996 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #ifndef __ASM_PROC_SYSTEM_H
11 #define __ASM_PROC_SYSTEM_H
14 * Save the current interrupt enable state & disable IRQs
19 * Save the current interrupt enable state
20 * and disable IRQs/FIQs
22 #define local_irq_save(flags) \
33 * restore saved IRQ & FIQ state
35 #define local_irq_restore(flags) \
47 #define local_irq_enable() \
59 #define local_irq_disable() \
68 #else /* CONFIG_ARM64 */
70 #define local_irq_save(x) \
73 __asm__ __volatile__( \
74 "mrs %0, cpsr @ local_irq_save\n" \
75 " orr %1, %0, #128\n" \
77 : "=r" (x), "=r" (temp) \
85 #define local_irq_enable() \
88 __asm__ __volatile__( \
89 "mrs %0, cpsr @ local_irq_enable\n" \
90 " bic %0, %0, #128\n" \
100 #define local_irq_disable() \
102 unsigned long temp; \
103 __asm__ __volatile__( \
104 "mrs %0, cpsr @ local_irq_disable\n" \
105 " orr %0, %0, #128\n" \
117 unsigned long temp; \
118 __asm__ __volatile__( \
119 "mrs %0, cpsr @ stf\n" \
120 " bic %0, %0, #64\n" \
132 unsigned long temp; \
133 __asm__ __volatile__( \
134 "mrs %0, cpsr @ clf\n" \
135 " orr %0, %0, #64\n" \
143 * Save the current interrupt enable state.
145 #define local_save_flags(x) \
147 __asm__ __volatile__( \
148 "mrs %0, cpsr @ local_save_flags\n" \
155 * restore saved IRQ & FIQ state
157 #define local_irq_restore(x) \
158 __asm__ __volatile__( \
159 "msr cpsr_c, %0 @ local_irq_restore\n" \
164 #endif /* CONFIG_ARM64 */
166 #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) || \
167 defined(CONFIG_ARM64)
169 * On the StrongARM, "swp" is terminally broken since it bypasses the
170 * cache totally. This means that the cache becomes inconsistent, and,
171 * since we use normal loads/stores as well, this is really bad.
172 * Typically, this causes oopsen in filp_close, but could have other,
173 * more disasterous effects. There are two work-arounds:
174 * 1. Disable interrupts and emulate the atomic swap
175 * 2. Clean the cache, perform atomic swap, flush the cache
177 * We choose (1) since its the "easiest" to achieve here and is not
178 * dependent on the processor type.
183 static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
185 extern void __bad_xchg(volatile void *, int);
194 local_irq_save(flags);
195 ret = *(volatile unsigned char *)ptr;
196 *(volatile unsigned char *)ptr = x;
197 local_irq_restore(flags);
201 local_irq_save(flags);
202 ret = *(volatile unsigned long *)ptr;
203 *(volatile unsigned long *)ptr = x;
204 local_irq_restore(flags);
207 case 1: __asm__ __volatile__ ("swpb %0, %1, [%2]"
212 case 4: __asm__ __volatile__ ("swp %0, %1, [%2]"
218 default: __bad_xchg(ptr, size), ret = 0;