kernel: bump 4.9 to 4.9.67
[oweals/openwrt.git] / target / linux / layerscape / patches-4.9 / 815-spi-support-layerscape.patch
1 From a12f522b48a8cb637c1c026b46a76b2ef7983f8d Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Mon, 25 Sep 2017 12:12:41 +0800
4 Subject: [PATCH] spi: support layerscape
5
6 This is a integrated patch for layerscape dspi support.
7
8 Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
9 Signed-off-by: Sanchayan Maity <maitysanchayan@gmail.com>
10 Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
11 Signed-off-by: Sanchayan Maity <maitysanchayan@gmail.com>
12 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
13 ---
14  drivers/spi/Kconfig        |   1 +
15  drivers/spi/spi-fsl-dspi.c | 309 ++++++++++++++++++++++++++++++++++++++++++++-
16  2 files changed, 305 insertions(+), 5 deletions(-)
17
18 --- a/drivers/spi/spi-fsl-dspi.c
19 +++ b/drivers/spi/spi-fsl-dspi.c
20 @@ -15,6 +15,8 @@
21  
22  #include <linux/clk.h>
23  #include <linux/delay.h>
24 +#include <linux/dmaengine.h>
25 +#include <linux/dma-mapping.h>
26  #include <linux/err.h>
27  #include <linux/errno.h>
28  #include <linux/interrupt.h>
29 @@ -40,6 +42,7 @@
30  #define TRAN_STATE_WORD_ODD_NUM        0x04
31  
32  #define DSPI_FIFO_SIZE                 4
33 +#define DSPI_DMA_BUFSIZE               (DSPI_FIFO_SIZE * 1024)
34  
35  #define SPI_MCR                0x00
36  #define SPI_MCR_MASTER         (1 << 31)
37 @@ -72,6 +75,11 @@
38  #define SPI_SR_TCFQF           0x80000000
39  #define SPI_SR_CLEAR           0xdaad0000
40  
41 +#define SPI_RSER_TFFFE         BIT(25)
42 +#define SPI_RSER_TFFFD         BIT(24)
43 +#define SPI_RSER_RFDFE         BIT(17)
44 +#define SPI_RSER_RFDFD         BIT(16)
45 +
46  #define SPI_RSER               0x30
47  #define SPI_RSER_EOQFE         0x10000000
48  #define SPI_RSER_TCFQE         0x80000000
49 @@ -109,6 +117,8 @@
50  
51  #define SPI_TCR_TCNT_MAX       0x10000
52  
53 +#define DMA_COMPLETION_TIMEOUT msecs_to_jiffies(3000)
54 +
55  struct chip_data {
56         u32 mcr_val;
57         u32 ctar_val;
58 @@ -118,6 +128,7 @@ struct chip_data {
59  enum dspi_trans_mode {
60         DSPI_EOQ_MODE = 0,
61         DSPI_TCFQ_MODE,
62 +       DSPI_DMA_MODE,
63  };
64  
65  struct fsl_dspi_devtype_data {
66 @@ -126,7 +137,7 @@ struct fsl_dspi_devtype_data {
67  };
68  
69  static const struct fsl_dspi_devtype_data vf610_data = {
70 -       .trans_mode = DSPI_EOQ_MODE,
71 +       .trans_mode = DSPI_DMA_MODE,
72         .max_clock_factor = 2,
73  };
74  
75 @@ -140,6 +151,23 @@ static const struct fsl_dspi_devtype_dat
76         .max_clock_factor = 8,
77  };
78  
79 +struct fsl_dspi_dma {
80 +       /* Length of transfer in words of DSPI_FIFO_SIZE */
81 +       u32 curr_xfer_len;
82 +
83 +       u32 *tx_dma_buf;
84 +       struct dma_chan *chan_tx;
85 +       dma_addr_t tx_dma_phys;
86 +       struct completion cmd_tx_complete;
87 +       struct dma_async_tx_descriptor *tx_desc;
88 +
89 +       u32 *rx_dma_buf;
90 +       struct dma_chan *chan_rx;
91 +       dma_addr_t rx_dma_phys;
92 +       struct completion cmd_rx_complete;
93 +       struct dma_async_tx_descriptor *rx_desc;
94 +};
95 +
96  struct fsl_dspi {
97         struct spi_master       *master;
98         struct platform_device  *pdev;
99 @@ -166,8 +194,11 @@ struct fsl_dspi {
100         u32                     waitflags;
101  
102         u32                     spi_tcnt;
103 +       struct fsl_dspi_dma     *dma;
104  };
105  
106 +static u32 dspi_data_to_pushr(struct fsl_dspi *dspi, int tx_word);
107 +
108  static inline int is_double_byte_mode(struct fsl_dspi *dspi)
109  {
110         unsigned int val;
111 @@ -177,6 +208,255 @@ static inline int is_double_byte_mode(st
112         return ((val & SPI_FRAME_BITS_MASK) == SPI_FRAME_BITS(8)) ? 0 : 1;
113  }
114  
115 +static void dspi_tx_dma_callback(void *arg)
116 +{
117 +       struct fsl_dspi *dspi = arg;
118 +       struct fsl_dspi_dma *dma = dspi->dma;
119 +
120 +       complete(&dma->cmd_tx_complete);
121 +}
122 +
123 +static void dspi_rx_dma_callback(void *arg)
124 +{
125 +       struct fsl_dspi *dspi = arg;
126 +       struct fsl_dspi_dma *dma = dspi->dma;
127 +       int rx_word;
128 +       int i;
129 +       u16 d;
130 +
131 +       rx_word = is_double_byte_mode(dspi);
132 +
133 +       if (!(dspi->dataflags & TRAN_STATE_RX_VOID)) {
134 +               for (i = 0; i < dma->curr_xfer_len; i++) {
135 +                       d = dspi->dma->rx_dma_buf[i];
136 +                       rx_word ? (*(u16 *)dspi->rx = d) :
137 +                                               (*(u8 *)dspi->rx = d);
138 +                       dspi->rx += rx_word + 1;
139 +               }
140 +       }
141 +
142 +       complete(&dma->cmd_rx_complete);
143 +}
144 +
145 +static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
146 +{
147 +       struct fsl_dspi_dma *dma = dspi->dma;
148 +       struct device *dev = &dspi->pdev->dev;
149 +       int time_left;
150 +       int tx_word;
151 +       int i;
152 +
153 +       tx_word = is_double_byte_mode(dspi);
154 +
155 +       for (i = 0; i < dma->curr_xfer_len; i++) {
156 +               dspi->dma->tx_dma_buf[i] = dspi_data_to_pushr(dspi, tx_word);
157 +               if ((dspi->cs_change) && (!dspi->len))
158 +                       dspi->dma->tx_dma_buf[i] &= ~SPI_PUSHR_CONT;
159 +       }
160 +
161 +       dma->tx_desc = dmaengine_prep_slave_single(dma->chan_tx,
162 +                                       dma->tx_dma_phys,
163 +                                       dma->curr_xfer_len *
164 +                                       DMA_SLAVE_BUSWIDTH_4_BYTES,
165 +                                       DMA_MEM_TO_DEV,
166 +                                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
167 +       if (!dma->tx_desc) {
168 +               dev_err(dev, "Not able to get desc for DMA xfer\n");
169 +               return -EIO;
170 +       }
171 +
172 +       dma->tx_desc->callback = dspi_tx_dma_callback;
173 +       dma->tx_desc->callback_param = dspi;
174 +       if (dma_submit_error(dmaengine_submit(dma->tx_desc))) {
175 +               dev_err(dev, "DMA submit failed\n");
176 +               return -EINVAL;
177 +       }
178 +
179 +       dma->rx_desc = dmaengine_prep_slave_single(dma->chan_rx,
180 +                                       dma->rx_dma_phys,
181 +                                       dma->curr_xfer_len *
182 +                                       DMA_SLAVE_BUSWIDTH_4_BYTES,
183 +                                       DMA_DEV_TO_MEM,
184 +                                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
185 +       if (!dma->rx_desc) {
186 +               dev_err(dev, "Not able to get desc for DMA xfer\n");
187 +               return -EIO;
188 +       }
189 +
190 +       dma->rx_desc->callback = dspi_rx_dma_callback;
191 +       dma->rx_desc->callback_param = dspi;
192 +       if (dma_submit_error(dmaengine_submit(dma->rx_desc))) {
193 +               dev_err(dev, "DMA submit failed\n");
194 +               return -EINVAL;
195 +       }
196 +
197 +       reinit_completion(&dspi->dma->cmd_rx_complete);
198 +       reinit_completion(&dspi->dma->cmd_tx_complete);
199 +
200 +       dma_async_issue_pending(dma->chan_rx);
201 +       dma_async_issue_pending(dma->chan_tx);
202 +
203 +       time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete,
204 +                                       DMA_COMPLETION_TIMEOUT);
205 +       if (time_left == 0) {
206 +               dev_err(dev, "DMA tx timeout\n");
207 +               dmaengine_terminate_all(dma->chan_tx);
208 +               dmaengine_terminate_all(dma->chan_rx);
209 +               return -ETIMEDOUT;
210 +       }
211 +
212 +       time_left = wait_for_completion_timeout(&dspi->dma->cmd_rx_complete,
213 +                                       DMA_COMPLETION_TIMEOUT);
214 +       if (time_left == 0) {
215 +               dev_err(dev, "DMA rx timeout\n");
216 +               dmaengine_terminate_all(dma->chan_tx);
217 +               dmaengine_terminate_all(dma->chan_rx);
218 +               return -ETIMEDOUT;
219 +       }
220 +
221 +       return 0;
222 +}
223 +
224 +static int dspi_dma_xfer(struct fsl_dspi *dspi)
225 +{
226 +       struct fsl_dspi_dma *dma = dspi->dma;
227 +       struct device *dev = &dspi->pdev->dev;
228 +       int curr_remaining_bytes;
229 +       int bytes_per_buffer;
230 +       int word = 1;
231 +       int ret = 0;
232 +
233 +       if (is_double_byte_mode(dspi))
234 +               word = 2;
235 +       curr_remaining_bytes = dspi->len;
236 +       bytes_per_buffer = DSPI_DMA_BUFSIZE / DSPI_FIFO_SIZE;
237 +       while (curr_remaining_bytes) {
238 +               /* Check if current transfer fits the DMA buffer */
239 +               dma->curr_xfer_len = curr_remaining_bytes / word;
240 +               if (dma->curr_xfer_len > bytes_per_buffer)
241 +                       dma->curr_xfer_len = bytes_per_buffer;
242 +
243 +               ret = dspi_next_xfer_dma_submit(dspi);
244 +               if (ret) {
245 +                       dev_err(dev, "DMA transfer failed\n");
246 +                       goto exit;
247 +
248 +               } else {
249 +                       curr_remaining_bytes -= dma->curr_xfer_len * word;
250 +                       if (curr_remaining_bytes < 0)
251 +                               curr_remaining_bytes = 0;
252 +               }
253 +       }
254 +
255 +exit:
256 +       return ret;
257 +}
258 +
259 +static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
260 +{
261 +       struct fsl_dspi_dma *dma;
262 +       struct dma_slave_config cfg;
263 +       struct device *dev = &dspi->pdev->dev;
264 +       int ret;
265 +
266 +       dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
267 +       if (!dma)
268 +               return -ENOMEM;
269 +
270 +       dma->chan_rx = dma_request_slave_channel(dev, "rx");
271 +       if (!dma->chan_rx) {
272 +               dev_err(dev, "rx dma channel not available\n");
273 +               ret = -ENODEV;
274 +               return ret;
275 +       }
276 +
277 +       dma->chan_tx = dma_request_slave_channel(dev, "tx");
278 +       if (!dma->chan_tx) {
279 +               dev_err(dev, "tx dma channel not available\n");
280 +               ret = -ENODEV;
281 +               goto err_tx_channel;
282 +       }
283 +
284 +       dma->tx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
285 +                                       &dma->tx_dma_phys, GFP_KERNEL);
286 +       if (!dma->tx_dma_buf) {
287 +               ret = -ENOMEM;
288 +               goto err_tx_dma_buf;
289 +       }
290 +
291 +       dma->rx_dma_buf = dma_alloc_coherent(dev, DSPI_DMA_BUFSIZE,
292 +                                       &dma->rx_dma_phys, GFP_KERNEL);
293 +       if (!dma->rx_dma_buf) {
294 +               ret = -ENOMEM;
295 +               goto err_rx_dma_buf;
296 +       }
297 +
298 +       cfg.src_addr = phy_addr + SPI_POPR;
299 +       cfg.dst_addr = phy_addr + SPI_PUSHR;
300 +       cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
301 +       cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
302 +       cfg.src_maxburst = 1;
303 +       cfg.dst_maxburst = 1;
304 +
305 +       cfg.direction = DMA_DEV_TO_MEM;
306 +       ret = dmaengine_slave_config(dma->chan_rx, &cfg);
307 +       if (ret) {
308 +               dev_err(dev, "can't configure rx dma channel\n");
309 +               ret = -EINVAL;
310 +               goto err_slave_config;
311 +       }
312 +
313 +       cfg.direction = DMA_MEM_TO_DEV;
314 +       ret = dmaengine_slave_config(dma->chan_tx, &cfg);
315 +       if (ret) {
316 +               dev_err(dev, "can't configure tx dma channel\n");
317 +               ret = -EINVAL;
318 +               goto err_slave_config;
319 +       }
320 +
321 +       dspi->dma = dma;
322 +       init_completion(&dma->cmd_tx_complete);
323 +       init_completion(&dma->cmd_rx_complete);
324 +
325 +       return 0;
326 +
327 +err_slave_config:
328 +       dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
329 +                       dma->rx_dma_buf, dma->rx_dma_phys);
330 +err_rx_dma_buf:
331 +       dma_free_coherent(dev, DSPI_DMA_BUFSIZE,
332 +                       dma->tx_dma_buf, dma->tx_dma_phys);
333 +err_tx_dma_buf:
334 +       dma_release_channel(dma->chan_tx);
335 +err_tx_channel:
336 +       dma_release_channel(dma->chan_rx);
337 +
338 +       devm_kfree(dev, dma);
339 +       dspi->dma = NULL;
340 +
341 +       return ret;
342 +}
343 +
344 +static void dspi_release_dma(struct fsl_dspi *dspi)
345 +{
346 +       struct fsl_dspi_dma *dma = dspi->dma;
347 +       struct device *dev = &dspi->pdev->dev;
348 +
349 +       if (dma) {
350 +               if (dma->chan_tx) {
351 +                       dma_unmap_single(dev, dma->tx_dma_phys,
352 +                                       DSPI_DMA_BUFSIZE, DMA_TO_DEVICE);
353 +                       dma_release_channel(dma->chan_tx);
354 +               }
355 +
356 +               if (dma->chan_rx) {
357 +                       dma_unmap_single(dev, dma->rx_dma_phys,
358 +                                       DSPI_DMA_BUFSIZE, DMA_FROM_DEVICE);
359 +                       dma_release_channel(dma->chan_rx);
360 +               }
361 +       }
362 +}
363 +
364  static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
365                 unsigned long clkrate)
366  {
367 @@ -425,6 +705,12 @@ static int dspi_transfer_one_message(str
368                         regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_TCFQE);
369                         dspi_tcfq_write(dspi);
370                         break;
371 +               case DSPI_DMA_MODE:
372 +                       regmap_write(dspi->regmap, SPI_RSER,
373 +                               SPI_RSER_TFFFE | SPI_RSER_TFFFD |
374 +                               SPI_RSER_RFDFE | SPI_RSER_RFDFD);
375 +                       status = dspi_dma_xfer(dspi);
376 +                       break;
377                 default:
378                         dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
379                                 trans_mode);
380 @@ -432,9 +718,13 @@ static int dspi_transfer_one_message(str
381                         goto out;
382                 }
383  
384 -               if (wait_event_interruptible(dspi->waitq, dspi->waitflags))
385 -                       dev_err(&dspi->pdev->dev, "wait transfer complete fail!\n");
386 -               dspi->waitflags = 0;
387 +               if (trans_mode != DSPI_DMA_MODE) {
388 +                       if (wait_event_interruptible(dspi->waitq,
389 +                                               dspi->waitflags))
390 +                               dev_err(&dspi->pdev->dev,
391 +                                       "wait transfer complete fail!\n");
392 +                       dspi->waitflags = 0;
393 +               }
394  
395                 if (transfer->delay_usecs)
396                         udelay(transfer->delay_usecs);
397 @@ -712,7 +1002,8 @@ static int dspi_probe(struct platform_de
398         if (IS_ERR(dspi->regmap)) {
399                 dev_err(&pdev->dev, "failed to init regmap: %ld\n",
400                                 PTR_ERR(dspi->regmap));
401 -               return PTR_ERR(dspi->regmap);
402 +               ret = PTR_ERR(dspi->regmap);
403 +               goto out_master_put;
404         }
405  
406         dspi_init(dspi);
407 @@ -740,6 +1031,13 @@ static int dspi_probe(struct platform_de
408         if (ret)
409                 goto out_master_put;
410  
411 +       if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
412 +               if (dspi_request_dma(dspi, res->start)) {
413 +                       dev_err(&pdev->dev, "can't get dma channels\n");
414 +                       goto out_clk_put;
415 +               }
416 +       }
417 +
418         master->max_speed_hz =
419                 clk_get_rate(dspi->clk) / dspi->devtype_data->max_clock_factor;
420  
421 @@ -768,6 +1066,7 @@ static int dspi_remove(struct platform_d
422         struct fsl_dspi *dspi = spi_master_get_devdata(master);
423  
424         /* Disconnect from the SPI framework */
425 +       dspi_release_dma(dspi);
426         clk_disable_unprepare(dspi->clk);
427         spi_unregister_master(dspi->master);
428