+/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
- *
- * SPDX-License-Identifier: GPL-2.0+
+ * Copyright (C) 2013-2014, 2020 Synopsys, Inc. All rights reserved.
*/
#ifndef __ASM_ARC_IO_H
#include <linux/types.h>
#include <asm/byteorder.h>
+#ifdef __ARCHS__
+
/*
- * Given a physical address and a length, return a virtual address
- * that can be used to access the memory range with the caching
- * properties specified by "flags".
+ * ARCv2 based HS38 cores are in-order issue, but still weakly ordered
+ * due to micro-arch buffering/queuing of load/store, cache hit vs. miss ...
+ *
+ * Explicit barrier provided by DMB instruction
+ * - Operand supports fine grained load/store/load+store semantics
+ * - Ensures that selected memory operation issued before it will complete
+ * before any subsequent memory operation of same type
+ * - DMB guarantees SMP as well as local barrier semantics
+ * (asm-generic/barrier.h ensures sane smp_*mb if not defined here, i.e.
+ * UP: barrier(), SMP: smp_*mb == *mb)
+ * - DSYNC provides DMB+completion_of_cache_bpu_maintenance_ops hence not needed
+ * in the general case. Plus it only provides full barrier.
*/
-#define MAP_NOCACHE (0)
-#define MAP_WRCOMBINE (0)
-#define MAP_WRBACK (0)
-#define MAP_WRTHROUGH (0)
-static inline void *
-map_physmem(phys_addr_t paddr, unsigned long len, unsigned long flags)
-{
- return (void *)((unsigned long)paddr);
-}
+#define mb() asm volatile("dmb 3\n" : : : "memory")
+#define rmb() asm volatile("dmb 1\n" : : : "memory")
+#define wmb() asm volatile("dmb 2\n" : : : "memory")
+
+#else
/*
- * Take down a mapping set up by map_physmem().
+ * ARCompact based cores (ARC700) only have SYNC instruction which is super
+ * heavy weight as it flushes the pipeline as well.
+ * There are no real SMP implementations of such cores.
*/
-static inline void unmap_physmem(void *vaddr, unsigned long flags)
-{
-}
+#define mb() asm volatile("sync\n" : : : "memory")
+#endif
+
+#ifdef __ARCHS__
+#define __iormb() rmb()
+#define __iowmb() wmb()
+#else
+#define __iormb() asm volatile("" : : : "memory")
+#define __iowmb() asm volatile("" : : : "memory")
+#endif
static inline void sync(void)
{
/* Not yet implemented */
}
-static inline u8 __raw_readb(const volatile void __iomem *addr)
-{
- u8 b;
+/*
+ * We must use 'volatile' in C-version read/write IO accessors implementation
+ * to avoid merging several reads (writes) into one read (write), or optimizing
+ * them out by compiler.
+ */
+#define __arch_getb(a) (*(volatile u8 *)(a))
+#define __arch_getw(a) (*(volatile u16 *)(a))
+#define __arch_getl(a) (*(volatile u32 *)(a))
+#define __arch_getq(a) (*(volatile u64 *)(a))
- __asm__ __volatile__("ldb%U1 %0, %1\n"
- : "=r" (b)
- : "m" (*(volatile u8 __force *)addr)
- : "memory");
- return b;
-}
+#define __arch_putb(v, a) (*(volatile u8 *)(a) = (v))
+#define __arch_putw(v, a) (*(volatile u16 *)(a) = (v))
+#define __arch_putl(v, a) (*(volatile u32 *)(a) = (v))
+#define __arch_putq(v, a) (*(volatile u64 *)(a) = (v))
-static inline u16 __raw_readw(const volatile void __iomem *addr)
-{
- u16 s;
- __asm__ __volatile__("ldw%U1 %0, %1\n"
- : "=r" (s)
- : "m" (*(volatile u16 __force *)addr)
- : "memory");
- return s;
-}
+#define __raw_writeb(v, a) __arch_putb(v, a)
+#define __raw_writew(v, a) __arch_putw(v, a)
+#define __raw_writel(v, a) __arch_putl(v, a)
+#define __raw_writeq(v, a) __arch_putq(v, a)
-static inline u32 __raw_readl(const volatile void __iomem *addr)
+#define __raw_readb(a) __arch_getb(a)
+#define __raw_readw(a) __arch_getw(a)
+#define __raw_readl(a) __arch_getl(a)
+#define __raw_readq(a) __arch_getq(a)
+
+static inline void __raw_writesb(unsigned long addr, const void *data,
+ int bytelen)
{
- u32 w;
+ u8 *buf = (uint8_t *)data;
- __asm__ __volatile__("ld%U1 %0, %1\n"
- : "=r" (w)
- : "m" (*(volatile u32 __force *)addr)
- : "memory");
- return w;
+ while (bytelen--)
+ __arch_putb(*buf++, addr);
}
-#define readb __raw_readb
-
-static inline u16 readw(const volatile void __iomem *addr)
+static inline void __raw_writesw(unsigned long addr, const void *data,
+ int wordlen)
{
- return __le16_to_cpu(__raw_readw(addr));
-}
+ u16 *buf = (uint16_t *)data;
-static inline u32 readl(const volatile void __iomem *addr)
-{
- return __le32_to_cpu(__raw_readl(addr));
+ while (wordlen--)
+ __arch_putw(*buf++, addr);
}
-static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
+static inline void __raw_writesl(unsigned long addr, const void *data,
+ int longlen)
{
- __asm__ __volatile__("stb%U1 %0, %1\n"
- :
- : "r" (b), "m" (*(volatile u8 __force *)addr)
- : "memory");
-}
+ u32 *buf = (uint32_t *)data;
-static inline void __raw_writew(u16 s, volatile void __iomem *addr)
-{
- __asm__ __volatile__("stw%U1 %0, %1\n"
- :
- : "r" (s), "m" (*(volatile u16 __force *)addr)
- : "memory");
+ while (longlen--)
+ __arch_putl(*buf++, addr);
}
-static inline void __raw_writel(u32 w, volatile void __iomem *addr)
+static inline void __raw_readsb(unsigned long addr, void *data, int bytelen)
{
- __asm__ __volatile__("st%U1 %0, %1\n"
- :
- : "r" (w), "m" (*(volatile u32 __force *)addr)
- : "memory");
-}
-
-#define writeb __raw_writeb
-#define writew(b, addr) __raw_writew(__cpu_to_le16(b), addr)
-#define writel(b, addr) __raw_writel(__cpu_to_le32(b), addr)
+ u8 *buf = (uint8_t *)data;
-static inline int __raw_readsb(unsigned int addr, void *data, int bytelen)
-{
- __asm__ __volatile__ ("1:ld.di r8, [r0]\n"
- "sub.f r2, r2, 1\n"
- "bnz.d 1b\n"
- "stb.ab r8, [r1, 1]\n"
- :
- : "r" (addr), "r" (data), "r" (bytelen)
- : "r8");
- return bytelen;
+ while (bytelen--)
+ *buf++ = __arch_getb(addr);
}
-static inline int __raw_readsw(unsigned int addr, void *data, int wordlen)
+static inline void __raw_readsw(unsigned long addr, void *data, int wordlen)
{
- __asm__ __volatile__ ("1:ld.di r8, [r0]\n"
- "sub.f r2, r2, 1\n"
- "bnz.d 1b\n"
- "stw.ab r8, [r1, 2]\n"
- :
- : "r" (addr), "r" (data), "r" (wordlen)
- : "r8");
- return wordlen;
-}
+ u16 *buf = (uint16_t *)data;
-static inline int __raw_readsl(unsigned int addr, void *data, int longlen)
-{
- __asm__ __volatile__ ("1:ld.di r8, [r0]\n"
- "sub.f r2, r2, 1\n"
- "bnz.d 1b\n"
- "st.ab r8, [r1, 4]\n"
- :
- : "r" (addr), "r" (data), "r" (longlen)
- : "r8");
- return longlen;
+ while (wordlen--)
+ *buf++ = __arch_getw(addr);
}
-static inline int __raw_writesb(unsigned int addr, void *data, int bytelen)
+static inline void __raw_readsl(unsigned long addr, void *data, int longlen)
{
- __asm__ __volatile__ ("1:ldb.ab r8, [r1, 1]\n"
- "sub.f r2, r2, 1\n"
- "bnz.d 1b\n"
- "st.di r8, [r0, 0]\n"
- :
- : "r" (addr), "r" (data), "r" (bytelen)
- : "r8");
- return bytelen;
-}
+ u32 *buf = (uint32_t *)data;
-static inline int __raw_writesw(unsigned int addr, void *data, int wordlen)
-{
- __asm__ __volatile__ ("1:ldw.ab r8, [r1, 2]\n"
- "sub.f r2, r2, 1\n"
- "bnz.d 1b\n"
- "st.ab.di r8, [r0, 0]\n"
- :
- : "r" (addr), "r" (data), "r" (wordlen)
- : "r8");
- return wordlen;
+ while (longlen--)
+ *buf++ = __arch_getl(addr);
}
-static inline int __raw_writesl(unsigned int addr, void *data, int longlen)
-{
- __asm__ __volatile__ ("1:ld.ab r8, [r1, 4]\n"
- "sub.f r2, r2, 1\n"
- "bnz.d 1b\n"
- "st.ab.di r8, [r0, 0]\n"
- :
- : "r" (addr), "r" (data), "r" (longlen)
- : "r8");
- return longlen;
-}
+/*
+ * Relaxed I/O memory access primitives. These follow the Device memory
+ * ordering rules but do not guarantee any ordering relative to Normal memory
+ * accesses.
+ */
+#define readb_relaxed(c) ({ u8 __r = __raw_readb(c); __r; })
+#define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \
+ __raw_readw(c)); __r; })
+#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
+ __raw_readl(c)); __r; })
+#define readq_relaxed(c) ({ u64 __r = le64_to_cpu((__force __le64) \
+ __raw_readq(c)); __r; })
+
+#define writeb_relaxed(v, c) ((void)__raw_writeb((v), (c)))
+#define writew_relaxed(v, c) ((void)__raw_writew((__force u16) \
+ cpu_to_le16(v), (c)))
+#define writel_relaxed(v, c) ((void)__raw_writel((__force u32) \
+ cpu_to_le32(v), (c)))
+#define writeq_relaxed(v, c) ((void)__raw_writeq((__force u64) \
+ cpu_to_le64(v), (c)))
+
+/*
+ * MMIO can also get buffered/optimized in micro-arch, so barriers needed
+ * Based on ARM model for the typical use case
+ *
+ * <ST [DMA buffer]>
+ * <writel MMIO "go" reg>
+ * or:
+ * <readl MMIO "status" reg>
+ * <LD [DMA buffer]>
+ *
+ * http://lkml.kernel.org/r/20150622133656.GG1583@arm.com
+ */
+#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
+#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
+#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
+#define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(); __v; })
+
+#define writeb(v, c) ({ __iowmb(); writeb_relaxed(v, c); })
+#define writew(v, c) ({ __iowmb(); writew_relaxed(v, c); })
+#define writel(v, c) ({ __iowmb(); writel_relaxed(v, c); })
+#define writeq(v, c) ({ __iowmb(); writeq_relaxed(v, c); })
#define out_arch(type, endian, a, v) __raw_write##type(cpu_to_##endian(v), a)
#define in_arch(type, endian, a) endian##_to_cpu(__raw_read##type(a))
#define setbits_8(addr, set) setbits(8, addr, set)
#define clrsetbits_8(addr, clear, set) clrsetbits(8, addr, clear, set)
+#include <asm-generic/io.h>
+
#endif /* __ASM_ARC_IO_H */