1 /* SPDX-License-Identifier: GPL-2.0+ */
3 * Copyright (C) 2013-2014, 2020 Synopsys, Inc. All rights reserved.
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
13 * Compiler barrier. It prevents compiler from reordering instructions before
14 * and after it. It doesn't prevent HW (CPU) from any reordering though.
16 #define __comp_b() asm volatile("" : : : "memory")
21 * ARCv2 based HS38 cores are in-order issue, but still weakly ordered
22 * due to micro-arch buffering/queuing of load/store, cache hit vs. miss ...
24 * Explicit barrier provided by DMB instruction
25 * - Operand supports fine grained load/store/load+store semantics
26 * - Ensures that selected memory operation issued before it will complete
27 * before any subsequent memory operation of same type
28 * - DMB guarantees SMP as well as local barrier semantics
29 * (asm-generic/barrier.h ensures sane smp_*mb if not defined here, i.e.
30 * UP: barrier(), SMP: smp_*mb == *mb)
31 * - DSYNC provides DMB+completion_of_cache_bpu_maintenance_ops hence not needed
32 * in the general case. Plus it only provides full barrier.
35 #define mb() asm volatile("dmb 3\n" : : : "memory")
36 #define rmb() asm volatile("dmb 1\n" : : : "memory")
37 #define wmb() asm volatile("dmb 2\n" : : : "memory")
42 * ARCompact based cores (ARC700) only have SYNC instruction which is super
43 * heavy weight as it flushes the pipeline as well.
44 * There are no real SMP implementations of such cores.
47 #define mb() asm volatile("sync\n" : : : "memory")
51 #define __iormb() rmb()
52 #define __iowmb() wmb()
54 #define __iormb() __comp_b()
55 #define __iowmb() __comp_b()
58 static inline void sync(void)
60 /* Not yet implemented */
64 * We must use 'volatile' in C-version read/write IO accessors implementation
65 * to avoid merging several reads (writes) into one read (write), or optimizing
66 * them out by compiler.
67 * We must use compiler barriers before and after operation (read or write) so
68 * it won't be reordered by compiler.
70 #define __arch_getb(a) ({ u8 __v; __comp_b(); __v = *(volatile u8 *)(a); __comp_b(); __v; })
71 #define __arch_getw(a) ({ u16 __v; __comp_b(); __v = *(volatile u16 *)(a); __comp_b(); __v; })
72 #define __arch_getl(a) ({ u32 __v; __comp_b(); __v = *(volatile u32 *)(a); __comp_b(); __v; })
73 #define __arch_getq(a) ({ u64 __v; __comp_b(); __v = *(volatile u64 *)(a); __comp_b(); __v; })
75 #define __arch_putb(v, a) ({ __comp_b(); *(volatile u8 *)(a) = (v); __comp_b(); })
76 #define __arch_putw(v, a) ({ __comp_b(); *(volatile u16 *)(a) = (v); __comp_b(); })
77 #define __arch_putl(v, a) ({ __comp_b(); *(volatile u32 *)(a) = (v); __comp_b(); })
78 #define __arch_putq(v, a) ({ __comp_b(); *(volatile u64 *)(a) = (v); __comp_b(); })
81 #define __raw_writeb(v, a) __arch_putb(v, a)
82 #define __raw_writew(v, a) __arch_putw(v, a)
83 #define __raw_writel(v, a) __arch_putl(v, a)
84 #define __raw_writeq(v, a) __arch_putq(v, a)
86 #define __raw_readb(a) __arch_getb(a)
87 #define __raw_readw(a) __arch_getw(a)
88 #define __raw_readl(a) __arch_getl(a)
89 #define __raw_readq(a) __arch_getq(a)
91 static inline void __raw_writesb(unsigned long addr, const void *data,
94 u8 *buf = (uint8_t *)data;
97 __arch_putb(*buf++, addr);
100 static inline void __raw_writesw(unsigned long addr, const void *data,
103 u16 *buf = (uint16_t *)data;
106 __arch_putw(*buf++, addr);
109 static inline void __raw_writesl(unsigned long addr, const void *data,
112 u32 *buf = (uint32_t *)data;
115 __arch_putl(*buf++, addr);
118 static inline void __raw_readsb(unsigned long addr, void *data, int bytelen)
120 u8 *buf = (uint8_t *)data;
123 *buf++ = __arch_getb(addr);
126 static inline void __raw_readsw(unsigned long addr, void *data, int wordlen)
128 u16 *buf = (uint16_t *)data;
131 *buf++ = __arch_getw(addr);
134 static inline void __raw_readsl(unsigned long addr, void *data, int longlen)
136 u32 *buf = (uint32_t *)data;
139 *buf++ = __arch_getl(addr);
143 * Relaxed I/O memory access primitives. These follow the Device memory
144 * ordering rules but do not guarantee any ordering relative to Normal memory
147 #define readb_relaxed(c) ({ u8 __r = __raw_readb(c); __r; })
148 #define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \
149 __raw_readw(c)); __r; })
150 #define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
151 __raw_readl(c)); __r; })
152 #define readq_relaxed(c) ({ u64 __r = le64_to_cpu((__force __le64) \
153 __raw_readq(c)); __r; })
155 #define writeb_relaxed(v, c) ((void)__raw_writeb((v), (c)))
156 #define writew_relaxed(v, c) ((void)__raw_writew((__force u16) \
157 cpu_to_le16(v), (c)))
158 #define writel_relaxed(v, c) ((void)__raw_writel((__force u32) \
159 cpu_to_le32(v), (c)))
160 #define writeq_relaxed(v, c) ((void)__raw_writeq((__force u64) \
161 cpu_to_le64(v), (c)))
164 * MMIO can also get buffered/optimized in micro-arch, so barriers needed
165 * Based on ARM model for the typical use case
168 * <writel MMIO "go" reg>
170 * <readl MMIO "status" reg>
173 * http://lkml.kernel.org/r/20150622133656.GG1583@arm.com
175 #define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
176 #define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
177 #define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
178 #define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(); __v; })
180 #define writeb(v, c) ({ __iowmb(); writeb_relaxed(v, c); })
181 #define writew(v, c) ({ __iowmb(); writew_relaxed(v, c); })
182 #define writel(v, c) ({ __iowmb(); writel_relaxed(v, c); })
183 #define writeq(v, c) ({ __iowmb(); writeq_relaxed(v, c); })
185 #define out_arch(type, endian, a, v) __raw_write##type(cpu_to_##endian(v), a)
186 #define in_arch(type, endian, a) endian##_to_cpu(__raw_read##type(a))
188 #define out_le32(a, v) out_arch(l, le32, a, v)
189 #define out_le16(a, v) out_arch(w, le16, a, v)
191 #define in_le32(a) in_arch(l, le32, a)
192 #define in_le16(a) in_arch(w, le16, a)
194 #define out_be32(a, v) out_arch(l, be32, a, v)
195 #define out_be16(a, v) out_arch(w, be16, a, v)
197 #define in_be32(a) in_arch(l, be32, a)
198 #define in_be16(a) in_arch(w, be16, a)
200 #define out_8(a, v) __raw_writeb(v, a)
201 #define in_8(a) __raw_readb(a)
204 * Clear and set bits in one shot. These macros can be used to clear and
205 * set multiple bits in a register using a single call. These macros can
206 * also be used to set a multiple-bit bit pattern using a mask, by
207 * specifying the mask in the 'clear' parameter and the new bit pattern
208 * in the 'set' parameter.
211 #define clrbits(type, addr, clear) \
212 out_##type((addr), in_##type(addr) & ~(clear))
214 #define setbits(type, addr, set) \
215 out_##type((addr), in_##type(addr) | (set))
217 #define clrsetbits(type, addr, clear, set) \
218 out_##type((addr), (in_##type(addr) & ~(clear)) | (set))
220 #define clrbits_be32(addr, clear) clrbits(be32, addr, clear)
221 #define setbits_be32(addr, set) setbits(be32, addr, set)
222 #define clrsetbits_be32(addr, clear, set) clrsetbits(be32, addr, clear, set)
224 #define clrbits_le32(addr, clear) clrbits(le32, addr, clear)
225 #define setbits_le32(addr, set) setbits(le32, addr, set)
226 #define clrsetbits_le32(addr, clear, set) clrsetbits(le32, addr, clear, set)
228 #define clrbits_be16(addr, clear) clrbits(be16, addr, clear)
229 #define setbits_be16(addr, set) setbits(be16, addr, set)
230 #define clrsetbits_be16(addr, clear, set) clrsetbits(be16, addr, clear, set)
232 #define clrbits_le16(addr, clear) clrbits(le16, addr, clear)
233 #define setbits_le16(addr, set) setbits(le16, addr, set)
234 #define clrsetbits_le16(addr, clear, set) clrsetbits(le16, addr, clear, set)
236 #define clrbits_8(addr, clear) clrbits(8, addr, clear)
237 #define setbits_8(addr, set) setbits(8, addr, set)
238 #define clrsetbits_8(addr, clear, set) clrsetbits(8, addr, clear, set)
240 #include <asm-generic/io.h>
242 #endif /* __ASM_ARC_IO_H */