kernel: update kernel 4.4 to version 4.4.110
[librecmc/librecmc.git] / target / linux / ipq806x / patches-4.4 / 712-spi-qup-Fix-DMA-mode-to-work-correctly.patch
1 From ed56e6322b067a898a25bda1774eb1180a832246 Mon Sep 17 00:00:00 2001
2 From: Andy Gross <andy.gross@linaro.org>
3 Date: Tue, 2 Feb 2016 17:00:53 -0600
4 Subject: [PATCH] spi: qup: Fix DMA mode to work correctly
5
6 This patch fixes a few issues with the DMA mode.  The QUP needs to be
7 placed in the run mode before the DMA transactions are executed.  The
8 conditions for being able to DMA vary between revisions of the QUP.
9 This is due to v1.1.1 using ADM DMA and later revisions using BAM.
10
11 Change-Id: Ib1f876eaa05d079e0bca4358d2b25b2940986089
12 Signed-off-by: Andy Gross <andy.gross@linaro.org>
13 ---
14  drivers/spi/spi-qup.c | 95 ++++++++++++++++++++++++++++++++++-----------------
15  1 file changed, 63 insertions(+), 32 deletions(-)
16
17 --- a/drivers/spi/spi-qup.c
18 +++ b/drivers/spi/spi-qup.c
19 @@ -143,6 +143,7 @@ struct spi_qup {
20  
21         struct spi_transfer     *xfer;
22         struct completion       done;
23 +       struct completion       dma_tx_done;
24         int                     error;
25         int                     w_size; /* bytes per SPI word */
26         int                     n_words;
27 @@ -285,16 +286,16 @@ static void spi_qup_fifo_write(struct sp
28  
29  static void spi_qup_dma_done(void *data)
30  {
31 -       struct spi_qup *qup = data;
32 +       struct completion *done = data;
33  
34 -       complete(&qup->done);
35 +       complete(done);
36  }
37  
38  static int spi_qup_prep_sg(struct spi_master *master, struct spi_transfer *xfer,
39                            enum dma_transfer_direction dir,
40 -                          dma_async_tx_callback callback)
41 +                          dma_async_tx_callback callback,
42 +                          void *data)
43  {
44 -       struct spi_qup *qup = spi_master_get_devdata(master);
45         unsigned long flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE;
46         struct dma_async_tx_descriptor *desc;
47         struct scatterlist *sgl;
48 @@ -313,11 +314,11 @@ static int spi_qup_prep_sg(struct spi_ma
49         }
50  
51         desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
52 -       if (!desc)
53 -               return -EINVAL;
54 +       if (IS_ERR_OR_NULL(desc))
55 +               return desc ? PTR_ERR(desc) : -EINVAL;
56  
57         desc->callback = callback;
58 -       desc->callback_param = qup;
59 +       desc->callback_param = data;
60  
61         cookie = dmaengine_submit(desc);
62  
63 @@ -333,18 +334,29 @@ static void spi_qup_dma_terminate(struct
64                 dmaengine_terminate_all(master->dma_rx);
65  }
66  
67 -static int spi_qup_do_dma(struct spi_master *master, struct spi_transfer *xfer)
68 +static int spi_qup_do_dma(struct spi_master *master, struct spi_transfer *xfer,
69 +unsigned long timeout)
70  {
71 +       struct spi_qup *qup = spi_master_get_devdata(master);
72         dma_async_tx_callback rx_done = NULL, tx_done = NULL;
73         int ret;
74  
75 +       /* before issuing the descriptors, set the QUP to run */
76 +       ret = spi_qup_set_state(qup, QUP_STATE_RUN);
77 +       if (ret) {
78 +               dev_warn(qup->dev, "cannot set RUN state\n");
79 +               return ret;
80 +       }
81 +
82         if (xfer->rx_buf)
83                 rx_done = spi_qup_dma_done;
84 -       else if (xfer->tx_buf)
85 +
86 +       if (xfer->tx_buf)
87                 tx_done = spi_qup_dma_done;
88  
89         if (xfer->rx_buf) {
90 -               ret = spi_qup_prep_sg(master, xfer, DMA_DEV_TO_MEM, rx_done);
91 +               ret = spi_qup_prep_sg(master, xfer, DMA_DEV_TO_MEM, rx_done,
92 +                                     &qup->done);
93                 if (ret)
94                         return ret;
95  
96 @@ -352,17 +364,26 @@ static int spi_qup_do_dma(struct spi_mas
97         }
98  
99         if (xfer->tx_buf) {
100 -               ret = spi_qup_prep_sg(master, xfer, DMA_MEM_TO_DEV, tx_done);
101 +               ret = spi_qup_prep_sg(master, xfer, DMA_MEM_TO_DEV, tx_done,
102 +                                     &qup->dma_tx_done);
103                 if (ret)
104                         return ret;
105  
106                 dma_async_issue_pending(master->dma_tx);
107         }
108  
109 -       return 0;
110 +       if (xfer->rx_buf && !wait_for_completion_timeout(&qup->done, timeout))
111 +               return -ETIMEDOUT;
112 +
113 +       if (xfer->tx_buf &&
114 +           !wait_for_completion_timeout(&qup->dma_tx_done, timeout))
115 +               ret = -ETIMEDOUT;
116 +
117 +       return ret;
118  }
119  
120 -static int spi_qup_do_pio(struct spi_master *master, struct spi_transfer *xfer)
121 +static int spi_qup_do_pio(struct spi_master *master, struct spi_transfer *xfer,
122 +                         unsigned long timeout)
123  {
124         struct spi_qup *qup = spi_master_get_devdata(master);
125         int ret;
126 @@ -382,6 +403,15 @@ static int spi_qup_do_pio(struct spi_mas
127         if (qup->mode == QUP_IO_M_MODE_FIFO)
128                 spi_qup_fifo_write(qup, xfer);
129  
130 +       ret = spi_qup_set_state(qup, QUP_STATE_RUN);
131 +       if (ret) {
132 +               dev_warn(qup->dev, "cannot set RUN state\n");
133 +               return ret;
134 +       }
135 +
136 +       if (!wait_for_completion_timeout(&qup->done, timeout))
137 +               return -ETIMEDOUT;
138 +
139         return 0;
140  }
141  
142 @@ -430,7 +460,6 @@ static irqreturn_t spi_qup_qup_irq(int i
143                         dev_warn(controller->dev, "CLK_OVER_RUN\n");
144                 if (spi_err & SPI_ERROR_CLK_UNDER_RUN)
145                         dev_warn(controller->dev, "CLK_UNDER_RUN\n");
146 -
147                 error = -EIO;
148         }
149  
150 @@ -619,6 +648,7 @@ static int spi_qup_transfer_one(struct s
151         timeout = 100 * msecs_to_jiffies(timeout);
152  
153         reinit_completion(&controller->done);
154 +       reinit_completion(&controller->dma_tx_done);
155  
156         spin_lock_irqsave(&controller->lock, flags);
157         controller->xfer     = xfer;
158 @@ -628,21 +658,13 @@ static int spi_qup_transfer_one(struct s
159         spin_unlock_irqrestore(&controller->lock, flags);
160  
161         if (spi_qup_is_dma_xfer(controller->mode))
162 -               ret = spi_qup_do_dma(master, xfer);
163 +               ret = spi_qup_do_dma(master, xfer, timeout);
164         else
165 -               ret = spi_qup_do_pio(master, xfer);
166 +               ret = spi_qup_do_pio(master, xfer, timeout);
167  
168         if (ret)
169                 goto exit;
170  
171 -       if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
172 -               dev_warn(controller->dev, "cannot set EXECUTE state\n");
173 -               goto exit;
174 -       }
175 -
176 -       if (!wait_for_completion_timeout(&controller->done, timeout))
177 -               ret = -ETIMEDOUT;
178 -
179  exit:
180         spi_qup_set_state(controller, QUP_STATE_RESET);
181         spin_lock_irqsave(&controller->lock, flags);
182 @@ -664,15 +686,23 @@ static bool spi_qup_can_dma(struct spi_m
183         size_t dma_align = dma_get_cache_alignment();
184         int n_words;
185  
186 -       if (xfer->rx_buf && (xfer->len % qup->in_blk_sz ||
187 -           IS_ERR_OR_NULL(master->dma_rx) ||
188 -           !IS_ALIGNED((size_t)xfer->rx_buf, dma_align)))
189 -               return false;
190 +       if (xfer->rx_buf) {
191 +               if (!IS_ALIGNED((size_t)xfer->rx_buf, dma_align) ||
192 +                   IS_ERR_OR_NULL(master->dma_rx))
193 +                       return false;
194  
195 -       if (xfer->tx_buf && (xfer->len % qup->out_blk_sz ||
196 -           IS_ERR_OR_NULL(master->dma_tx) ||
197 -           !IS_ALIGNED((size_t)xfer->tx_buf, dma_align)))
198 -               return false;
199 +               if (qup->qup_v1 && (xfer->len % qup->in_blk_sz))
200 +                       return false;
201 +       }
202 +
203 +       if (xfer->tx_buf) {
204 +               if (!IS_ALIGNED((size_t)xfer->tx_buf, dma_align) ||
205 +                   IS_ERR_OR_NULL(master->dma_tx))
206 +                       return false;
207 +
208 +               if (qup->qup_v1 && (xfer->len % qup->out_blk_sz))
209 +                       return false;
210 +       }
211  
212         n_words = xfer->len / DIV_ROUND_UP(xfer->bits_per_word, 8);
213         if (n_words <= (qup->in_fifo_sz / sizeof(u32)))
214 @@ -875,6 +905,7 @@ static int spi_qup_probe(struct platform
215  
216         spin_lock_init(&controller->lock);
217         init_completion(&controller->done);
218 +       init_completion(&controller->dma_tx_done);
219  
220         iomode = readl_relaxed(base + QUP_IO_M_MODES);
221