mvebu: set fan_ctrl.sh only on mamba
[oweals/openwrt.git] / target / linux / lantiq / patches-4.4 / 0170-MIPS-lantiq-lock-DMA-register-accesses-for-SMP.patch
1 From 58078a30038b578c26c532545448fe3746648390 Mon Sep 17 00:00:00 2001
2 From: Hauke Mehrtens <hauke@hauke-m.de>
3 Date: Thu, 29 Dec 2016 21:02:57 +0100
4 Subject: [PATCH] MIPS: lantiq: lock DMA register accesses for SMP
5
6 The DMA controller channel and port configuration is changed by
7 selecting the port or channel in one register and then update the
8 configuration in other registers. This has to be done in an atomic
9 operation. Previously only the local interrupts were deactivated which
10 works for single CPU systems. If the system supports SMP a better
11 locking is needed, use spinlocks instead.
12 On more recent SoCs (at least xrx200 and later) there are two memory
13 regions to change the configuration, there we could use one area for
14 each CPU and do not have to synchronize between the CPUs and more.
15
16 Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
17 ---
18  arch/mips/lantiq/xway/dma.c | 38 ++++++++++++++++++++------------------
19  1 file changed, 20 insertions(+), 18 deletions(-)
20
21 --- a/arch/mips/lantiq/xway/dma.c
22 +++ b/arch/mips/lantiq/xway/dma.c
23 @@ -20,6 +20,7 @@
24  #include <linux/io.h>
25  #include <linux/dma-mapping.h>
26  #include <linux/module.h>
27 +#include <linux/spinlock.h>
28  #include <linux/clk.h>
29  #include <linux/err.h>
30  
31 @@ -59,16 +60,17 @@
32                                                 ltq_dma_membase + (z))
33  
34  static void __iomem *ltq_dma_membase;
35 +static DEFINE_SPINLOCK(ltq_dma_lock);
36  
37  void
38  ltq_dma_enable_irq(struct ltq_dma_channel *ch)
39  {
40         unsigned long flags;
41  
42 -       local_irq_save(flags);
43 +       spin_lock_irqsave(&ltq_dma_lock, flags);
44         ltq_dma_w32(ch->nr, LTQ_DMA_CS);
45         ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
46 -       local_irq_restore(flags);
47 +       spin_unlock_irqrestore(&ltq_dma_lock, flags);
48  }
49  EXPORT_SYMBOL_GPL(ltq_dma_enable_irq);
50  
51 @@ -77,10 +79,10 @@ ltq_dma_disable_irq(struct ltq_dma_chann
52  {
53         unsigned long flags;
54  
55 -       local_irq_save(flags);
56 +       spin_lock_irqsave(&ltq_dma_lock, flags);
57         ltq_dma_w32(ch->nr, LTQ_DMA_CS);
58         ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN);
59 -       local_irq_restore(flags);
60 +       spin_unlock_irqrestore(&ltq_dma_lock, flags);
61  }
62  EXPORT_SYMBOL_GPL(ltq_dma_disable_irq);
63  
64 @@ -89,10 +91,10 @@ ltq_dma_ack_irq(struct ltq_dma_channel *
65  {
66         unsigned long flags;
67  
68 -       local_irq_save(flags);
69 +       spin_lock_irqsave(&ltq_dma_lock, flags);
70         ltq_dma_w32(ch->nr, LTQ_DMA_CS);
71         ltq_dma_w32(DMA_IRQ_ACK, LTQ_DMA_CIS);
72 -       local_irq_restore(flags);
73 +       spin_unlock_irqrestore(&ltq_dma_lock, flags);
74  }
75  EXPORT_SYMBOL_GPL(ltq_dma_ack_irq);
76  
77 @@ -101,11 +103,11 @@ ltq_dma_open(struct ltq_dma_channel *ch)
78  {
79         unsigned long flag;
80  
81 -       local_irq_save(flag);
82 +       spin_lock_irqsave(&ltq_dma_lock, flag);
83         ltq_dma_w32(ch->nr, LTQ_DMA_CS);
84         ltq_dma_w32_mask(0, DMA_CHAN_ON, LTQ_DMA_CCTRL);
85 -       ltq_dma_enable_irq(ch);
86 -       local_irq_restore(flag);
87 +       ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
88 +       spin_unlock_irqrestore(&ltq_dma_lock, flag);
89  }
90  EXPORT_SYMBOL_GPL(ltq_dma_open);
91  
92 @@ -114,11 +116,11 @@ ltq_dma_close(struct ltq_dma_channel *ch
93  {
94         unsigned long flag;
95  
96 -       local_irq_save(flag);
97 +       spin_lock_irqsave(&ltq_dma_lock, flag);
98         ltq_dma_w32(ch->nr, LTQ_DMA_CS);
99         ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL);
100 -       ltq_dma_disable_irq(ch);
101 -       local_irq_restore(flag);
102 +       ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN);
103 +       spin_unlock_irqrestore(&ltq_dma_lock, flag);
104  }
105  EXPORT_SYMBOL_GPL(ltq_dma_close);
106  
107 @@ -133,7 +135,7 @@ ltq_dma_alloc(struct ltq_dma_channel *ch
108                                 &ch->phys, GFP_ATOMIC);
109         memset(ch->desc_base, 0, LTQ_DESC_NUM * LTQ_DESC_SIZE);
110  
111 -       local_irq_save(flags);
112 +       spin_lock_irqsave(&ltq_dma_lock, flags);
113         ltq_dma_w32(ch->nr, LTQ_DMA_CS);
114         ltq_dma_w32(ch->phys, LTQ_DMA_CDBA);
115         ltq_dma_w32(LTQ_DESC_NUM, LTQ_DMA_CDLEN);
116 @@ -142,7 +144,7 @@ ltq_dma_alloc(struct ltq_dma_channel *ch
117         ltq_dma_w32_mask(0, DMA_CHAN_RST, LTQ_DMA_CCTRL);
118         while (ltq_dma_r32(LTQ_DMA_CCTRL) & DMA_CHAN_RST)
119                 ;
120 -       local_irq_restore(flags);
121 +       spin_unlock_irqrestore(&ltq_dma_lock, flags);
122  }
123  
124  void
125 @@ -152,11 +154,11 @@ ltq_dma_alloc_tx(struct ltq_dma_channel
126  
127         ltq_dma_alloc(ch);
128  
129 -       local_irq_save(flags);
130 +       spin_lock_irqsave(&ltq_dma_lock, flags);
131         ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE);
132         ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
133         ltq_dma_w32(DMA_WEIGHT | DMA_TX, LTQ_DMA_CCTRL);
134 -       local_irq_restore(flags);
135 +       spin_unlock_irqrestore(&ltq_dma_lock, flags);
136  }
137  EXPORT_SYMBOL_GPL(ltq_dma_alloc_tx);
138  
139 @@ -167,11 +169,11 @@ ltq_dma_alloc_rx(struct ltq_dma_channel
140  
141         ltq_dma_alloc(ch);
142  
143 -       local_irq_save(flags);
144 +       spin_lock_irqsave(&ltq_dma_lock, flags);
145         ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE);
146         ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
147         ltq_dma_w32(DMA_WEIGHT, LTQ_DMA_CCTRL);
148 -       local_irq_restore(flags);
149 +       spin_unlock_irqrestore(&ltq_dma_lock, flags);
150  }
151  EXPORT_SYMBOL_GPL(ltq_dma_alloc_rx);
152