ath79/mikrotik: use routerbootpart partitions
[oweals/openwrt.git] / target / linux / layerscape / patches-5.4 / 819-uart-0005-tty-serial-fsl_lpuart-enable-dma-mode-for-imx8qxp.patch
1 From 0d6e214f5a257f9b53619ef8aa3b6e767189bdcf Mon Sep 17 00:00:00 2001
2 From: Fugang Duan <fugang.duan@nxp.com>
3 Date: Wed, 11 Sep 2019 16:21:06 +0800
4 Subject: [PATCH] tty: serial: fsl_lpuart: enable dma mode for imx8qxp
5
6 imx8qxp lpuart support eDMA for dma mode, support EOP (end-of-packet)
7 feature. But eDMA cannot detect the correct DADDR for current major
8 loop in cyclic mode, so it doesn't support cyclic mode.
9
10 The patch is to enable lpuart prep slave sg dma mode for imx8qxp.
11
12 Signed-off-by: Fugang Duan <fugang.duan@nxp.com>
13 ---
14  drivers/tty/serial/fsl_lpuart.c | 280 +++++++++++++++++++++++++++++++---------
15  1 file changed, 219 insertions(+), 61 deletions(-)
16
17 --- a/drivers/tty/serial/fsl_lpuart.c
18 +++ b/drivers/tty/serial/fsl_lpuart.c
19 @@ -131,6 +131,7 @@
20  #define UARTBAUD_M10           0x20000000
21  #define UARTBAUD_TDMAE         0x00800000
22  #define UARTBAUD_RDMAE         0x00200000
23 +#define UARTBAUD_RIDMAE                0x00100000
24  #define UARTBAUD_MATCFG                0x00400000
25  #define UARTBAUD_BOTHEDGE      0x00020000
26  #define UARTBAUD_RESYNCDIS     0x00010000
27 @@ -179,7 +180,7 @@
28  #define UARTCTRL_SBK           0x00010000
29  #define UARTCTRL_MA1IE         0x00008000
30  #define UARTCTRL_MA2IE         0x00004000
31 -#define UARTCTRL_IDLECFG       0x00000100
32 +#define UARTCTRL_IDLECFG_OFF   0x8
33  #define UARTCTRL_LOOPS         0x00000080
34  #define UARTCTRL_DOZEEN                0x00000040
35  #define UARTCTRL_RSRC          0x00000020
36 @@ -197,6 +198,7 @@
37  #define UARTDATA_MASK          0x3ff
38  
39  #define UARTMODIR_IREN         0x00020000
40 +#define UARTMODIR_RTSWATER_S   0x8
41  #define UARTMODIR_TXCTSSRC     0x00000020
42  #define UARTMODIR_TXCTSC       0x00000010
43  #define UARTMODIR_RXRTSE       0x00000008
44 @@ -210,6 +212,8 @@
45  #define UARTFIFO_RXUF          0x00010000
46  #define UARTFIFO_TXFLUSH       0x00008000
47  #define UARTFIFO_RXFLUSH       0x00004000
48 +#define UARTFIFO_RXIDEN_MASK   0x7
49 +#define UARTFIFO_RXIDEN_OFF    10
50  #define UARTFIFO_TXOFE         0x00000200
51  #define UARTFIFO_RXUFE         0x00000100
52  #define UARTFIFO_TXFE          0x00000080
53 @@ -226,6 +230,9 @@
54  #define UARTWATER_TXWATER_OFF  0
55  #define UARTWATER_RXWATER_OFF  16
56  
57 +#define UARTFIFO_RXIDEN_RDRF   0x3
58 +#define UARTCTRL_IDLECFG       0x7
59 +
60  /* Rx DMA timeout in ms, which is used to calculate Rx ring buffer size */
61  #define DMA_RX_TIMEOUT         (10)
62  
63 @@ -253,6 +260,9 @@ struct lpuart_port {
64         unsigned int            txfifo_size;
65         unsigned int            rxfifo_size;
66  
67 +       u8                      rx_watermark;
68 +       bool                    dma_eeop;
69 +       bool                    rx_dma_cyclic;
70         bool                    lpuart_dma_tx_use;
71         bool                    lpuart_dma_rx_use;
72         struct dma_chan         *dma_tx_chan;
73 @@ -278,28 +288,38 @@ struct lpuart_soc_data {
74         enum lpuart_type devtype;
75         char iotype;
76         u8 reg_off;
77 +       u8 rx_watermark;
78 +       bool rx_dma_cyclic;
79  };
80  
81  static const struct lpuart_soc_data vf_data = {
82         .devtype = VF610_LPUART,
83         .iotype = UPIO_MEM,
84 +       .rx_watermark = 1,
85 +       .rx_dma_cyclic = true,
86  };
87  
88  static const struct lpuart_soc_data ls_data = {
89         .devtype = LS1021A_LPUART,
90         .iotype = UPIO_MEM32BE,
91 +       .rx_watermark = 0,
92 +       .rx_dma_cyclic = true,
93  };
94  
95  static struct lpuart_soc_data imx7ulp_data = {
96         .devtype = IMX7ULP_LPUART,
97         .iotype = UPIO_MEM32,
98         .reg_off = IMX_REG_OFF,
99 +       .rx_watermark = 0,
100 +       .rx_dma_cyclic = true,
101  };
102  
103  static struct lpuart_soc_data imx8qxp_data = {
104         .devtype = IMX8QXP_LPUART,
105         .iotype = UPIO_MEM32,
106         .reg_off = IMX_REG_OFF,
107 +       .rx_watermark = 31,
108 +       .rx_dma_cyclic = false,
109  };
110  
111  static const struct of_device_id lpuart_dt_ids[] = {
112 @@ -313,6 +333,7 @@ MODULE_DEVICE_TABLE(of, lpuart_dt_ids);
113  
114  /* Forward declare this for the dma callbacks*/
115  static void lpuart_dma_tx_complete(void *arg);
116 +static int lpuart_sched_rx_dma(struct lpuart_port *sport);
117  
118  static inline bool is_imx8qxp_lpuart(struct lpuart_port *sport)
119  {
120 @@ -1000,19 +1021,15 @@ static irqreturn_t lpuart32_int(int irq,
121         if ((sts & UARTSTAT_TDRE) && !sport->lpuart_dma_tx_use)
122                 lpuart32_txint(sport);
123  
124 +       if (sport->lpuart_dma_rx_use && sport->dma_eeop)
125 +               sts &= ~UARTSTAT_IDLE;
126 +
127         lpuart32_write(&sport->port, sts, UARTSTAT);
128         return IRQ_HANDLED;
129  }
130  
131 -static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
132 +static void lpuart_rx_error_stat(struct lpuart_port *sport)
133  {
134 -       struct tty_port *port = &sport->port.state->port;
135 -       struct dma_tx_state state;
136 -       enum dma_status dmastat;
137 -       struct circ_buf *ring = &sport->rx_ring;
138 -       unsigned long flags;
139 -       int count = 0;
140 -
141         if (lpuart_is_32(sport)) {
142                 unsigned long sr = lpuart32_read(&sport->port, UARTSTAT);
143  
144 @@ -1064,8 +1081,21 @@ static void lpuart_copy_rx_to_tty(struct
145                         writeb(cr2, sport->port.membase + UARTCR2);
146                 }
147         }
148 +}
149 +
150 +static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
151 +{
152 +       struct tty_port *port = &sport->port.state->port;
153 +       struct dma_tx_state state;
154 +       enum dma_status dmastat;
155 +       struct circ_buf *ring = &sport->rx_ring;
156 +       unsigned long flags;
157 +       int count = 0;
158  
159 -       async_tx_ack(sport->dma_rx_desc);
160 +       if (!is_imx8qxp_lpuart(sport)) {
161 +               lpuart_rx_error_stat(sport);
162 +               async_tx_ack(sport->dma_rx_desc);
163 +       }
164  
165         spin_lock_irqsave(&sport->port.lock, flags);
166  
167 @@ -1128,7 +1158,33 @@ static void lpuart_copy_rx_to_tty(struct
168         spin_unlock_irqrestore(&sport->port.lock, flags);
169  
170         tty_flip_buffer_push(port);
171 -       mod_timer(&sport->lpuart_timer, jiffies + sport->dma_rx_timeout);
172 +
173 +       if (!sport->dma_eeop)
174 +               mod_timer(&sport->lpuart_timer,
175 +                         jiffies + sport->dma_rx_timeout);
176 +}
177 +
178 +static void lpuart_dma_rx_post_handler(struct lpuart_port *sport)
179 +{
180 +       unsigned long flags;
181 +       unsigned long rxcount;
182 +
183 +       spin_lock_irqsave(&sport->port.lock, flags);
184 +
185 +       /* For end of packet, clear the idle flag to avoid to trigger
186 +        * the next transfer. Only i.MX8x lpuart support EEOP.
187 +        */
188 +       if (sport->dma_eeop && lpuart_is_32(sport)) {
189 +               rxcount = lpuart32_read(&sport->port, UARTWATER);
190 +               rxcount = rxcount >> UARTWATER_RXCNT_OFF;
191 +               if (!rxcount)
192 +                       lpuart32_write(&sport->port, UARTSTAT_IDLE, UARTSTAT);
193 +       }
194 +
195 +       lpuart_sched_rx_dma(sport);
196 +
197 +       spin_unlock_irqrestore(&sport->port.lock, flags);
198 +
199  }
200  
201  static void lpuart_dma_rx_complete(void *arg)
202 @@ -1136,6 +1192,8 @@ static void lpuart_dma_rx_complete(void
203         struct lpuart_port *sport = arg;
204  
205         lpuart_copy_rx_to_tty(sport);
206 +       if (!sport->rx_dma_cyclic)
207 +               lpuart_dma_rx_post_handler(sport);
208  }
209  
210  static void lpuart_timer_func(struct timer_list *t)
211 @@ -1143,13 +1201,78 @@ static void lpuart_timer_func(struct tim
212         struct lpuart_port *sport = from_timer(sport, t, lpuart_timer);
213  
214         lpuart_copy_rx_to_tty(sport);
215 +       if (!sport->rx_dma_cyclic) {
216 +               dmaengine_terminate_async(sport->dma_rx_chan);
217 +               lpuart_dma_rx_post_handler(sport);
218 +       }
219  }
220  
221 -static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
222 +static int lpuart_sched_rxdma_cyclic(struct lpuart_port *sport)
223 +{
224 +       sport->dma_rx_desc = dmaengine_prep_dma_cyclic(sport->dma_rx_chan,
225 +                                sg_dma_address(&sport->rx_sgl),
226 +                                sport->rx_sgl.length,
227 +                                sport->rx_sgl.length / 2,
228 +                                DMA_DEV_TO_MEM,
229 +                                DMA_PREP_INTERRUPT);
230 +       if (!sport->dma_rx_desc) {
231 +               dev_err(sport->port.dev, "Cannot prepare cyclic DMA\n");
232 +               return -EFAULT;
233 +       }
234 +
235 +       return 0;
236 +}
237 +
238 +static int lpuart_sched_rxdma_slave_sg(struct lpuart_port *sport)
239 +{
240 +       dma_sync_sg_for_device(sport->port.dev, &sport->rx_sgl, 1,
241 +                              DMA_FROM_DEVICE);
242 +       sport->dma_rx_desc = dmaengine_prep_slave_sg(sport->dma_rx_chan,
243 +                               &sport->rx_sgl,
244 +                               1,
245 +                               DMA_DEV_TO_MEM,
246 +                               DMA_PREP_INTERRUPT);
247 +       if (!sport->dma_rx_desc) {
248 +               dev_err(sport->port.dev, "Cannot prepare slave_sg DMA\n");
249 +               return -EFAULT;
250 +       }
251 +       sport->rx_ring.tail = 0;
252 +       sport->rx_ring.head = 0;
253 +
254 +       return 0;
255 +}
256 +
257 +static int lpuart_sched_rx_dma(struct lpuart_port *sport)
258 +{
259 +       unsigned long temp;
260 +       int ret;
261 +
262 +       if (sport->rx_dma_cyclic)
263 +               ret = lpuart_sched_rxdma_cyclic(sport);
264 +       else
265 +               ret = lpuart_sched_rxdma_slave_sg(sport);
266 +
267 +       sport->dma_rx_desc->callback = lpuart_dma_rx_complete;
268 +       sport->dma_rx_desc->callback_param = sport;
269 +       sport->dma_rx_cookie = dmaengine_submit(sport->dma_rx_desc);
270 +       dma_async_issue_pending(sport->dma_rx_chan);
271 +
272 +       if (lpuart_is_32(sport)) {
273 +               temp = lpuart32_read(&sport->port, UARTBAUD);
274 +               if (sport->dma_eeop)
275 +                       temp |= UARTBAUD_RIDMAE;
276 +               temp |= UARTBAUD_RDMAE;
277 +               lpuart32_write(&sport->port, temp, UARTBAUD);
278 +       } else {
279 +               writeb(readb(sport->port.membase + UARTCR5) | UARTCR5_RDMAS,
280 +                      sport->port.membase + UARTCR5);
281 +       }
282 +
283 +       return ret;
284 +}
285 +
286 +static void lpuart_get_rx_dma_rng_len(struct lpuart_port *sport)
287  {
288 -       struct dma_slave_config dma_rx_sconfig = {};
289 -       struct circ_buf *ring = &sport->rx_ring;
290 -       int ret, nent;
291         int bits, baud;
292         struct tty_port *port = &sport->port.state->port;
293         struct tty_struct *tty = port->tty;
294 @@ -1169,6 +1292,18 @@ static inline int lpuart_start_rx_dma(st
295         sport->rx_dma_rng_buf_len = (1 << (fls(sport->rx_dma_rng_buf_len) - 1));
296         if (sport->rx_dma_rng_buf_len < 16)
297                 sport->rx_dma_rng_buf_len = 16;
298 +}
299 +
300 +static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
301 +{
302 +       struct dma_slave_config dma_rx_sconfig = {};
303 +       struct circ_buf *ring = &sport->rx_ring;
304 +       int ret, nent;
305 +
306 +       if (!sport->dma_eeop)
307 +               lpuart_get_rx_dma_rng_len(sport);
308 +       else
309 +               sport->rx_dma_rng_buf_len = PAGE_SIZE;
310  
311         ring->buf = kzalloc(sport->rx_dma_rng_buf_len, GFP_ATOMIC);
312         if (!ring->buf)
313 @@ -1194,32 +1329,7 @@ static inline int lpuart_start_rx_dma(st
314                 return ret;
315         }
316  
317 -       sport->dma_rx_desc = dmaengine_prep_dma_cyclic(sport->dma_rx_chan,
318 -                                sg_dma_address(&sport->rx_sgl),
319 -                                sport->rx_sgl.length,
320 -                                sport->rx_sgl.length / 2,
321 -                                DMA_DEV_TO_MEM,
322 -                                DMA_PREP_INTERRUPT);
323 -       if (!sport->dma_rx_desc) {
324 -               dev_err(sport->port.dev, "Cannot prepare cyclic DMA\n");
325 -               return -EFAULT;
326 -       }
327 -
328 -       sport->dma_rx_desc->callback = lpuart_dma_rx_complete;
329 -       sport->dma_rx_desc->callback_param = sport;
330 -       sport->dma_rx_cookie = dmaengine_submit(sport->dma_rx_desc);
331 -       dma_async_issue_pending(sport->dma_rx_chan);
332 -
333 -       if (lpuart_is_32(sport)) {
334 -               unsigned long temp = lpuart32_read(&sport->port, UARTBAUD);
335 -
336 -               lpuart32_write(&sport->port, temp | UARTBAUD_RDMAE, UARTBAUD);
337 -       } else {
338 -               writeb(readb(sport->port.membase + UARTCR5) | UARTCR5_RDMAS,
339 -                      sport->port.membase + UARTCR5);
340 -       }
341 -
342 -       return 0;
343 +       return lpuart_sched_rx_dma(sport);
344  }
345  
346  static void lpuart_dma_rx_free(struct uart_port *port)
347 @@ -1405,8 +1515,10 @@ static void lpuart_setup_watermark(struc
348                 writeb(UARTSFIFO_RXUF, sport->port.membase + UARTSFIFO);
349         }
350  
351 +       if (uart_console(&sport->port))
352 +               sport->rx_watermark = 1;
353         writeb(0, sport->port.membase + UARTTWFIFO);
354 -       writeb(1, sport->port.membase + UARTRWFIFO);
355 +       writeb(sport->rx_watermark, sport->port.membase + UARTRWFIFO);
356  
357         /* Restore cr2 */
358         writeb(cr2_saved, sport->port.membase + UARTCR2);
359 @@ -1427,6 +1539,7 @@ static void lpuart32_setup_watermark(str
360  {
361         unsigned long val, ctrl;
362         unsigned long ctrl_saved;
363 +       unsigned long rxiden_cnt;
364  
365         ctrl = lpuart32_read(&sport->port, UARTCTRL);
366         ctrl_saved = ctrl;
367 @@ -1438,12 +1551,26 @@ static void lpuart32_setup_watermark(str
368         val = lpuart32_read(&sport->port, UARTFIFO);
369         val |= UARTFIFO_TXFE | UARTFIFO_RXFE;
370         val |= UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH;
371 +       val &= ~(UARTFIFO_RXIDEN_MASK << UARTFIFO_RXIDEN_OFF);
372 +       rxiden_cnt = sport->dma_eeop ? 0 : UARTFIFO_RXIDEN_RDRF;
373 +       val |= ((rxiden_cnt & UARTFIFO_RXIDEN_MASK) <<
374 +               UARTFIFO_RXIDEN_OFF);
375         lpuart32_write(&sport->port, val, UARTFIFO);
376  
377         /* set the watermark */
378 -       val = (0x1 << UARTWATER_RXWATER_OFF) | (0x0 << UARTWATER_TXWATER_OFF);
379 +       if (uart_console(&sport->port))
380 +               sport->rx_watermark = 1;
381 +       val = (sport->rx_watermark << UARTWATER_RXWATER_OFF) |
382 +             (0x0 << UARTWATER_TXWATER_OFF);
383         lpuart32_write(&sport->port, val, UARTWATER);
384  
385 +       /* set RTS watermark */
386 +       if (!uart_console(&sport->port)) {
387 +               val = lpuart32_read(&sport->port, UARTMODIR);
388 +               val = (sport->rxfifo_size >> 1) << UARTMODIR_RTSWATER_S;
389 +               lpuart32_write(&sport->port, val, UARTMODIR);
390 +       }
391 +
392         /* Restore cr2 */
393         lpuart32_write(&sport->port, ctrl_saved, UARTCTRL);
394  }
395 @@ -1455,17 +1582,29 @@ static void lpuart32_setup_watermark_ena
396         lpuart32_setup_watermark(sport);
397  
398         temp = lpuart32_read(&sport->port, UARTCTRL);
399 -       temp |= UARTCTRL_RE | UARTCTRL_TE | UARTCTRL_ILIE;
400 +       temp |= UARTCTRL_RE | UARTCTRL_TE;
401 +       temp |= UARTCTRL_IDLECFG << UARTCTRL_IDLECFG_OFF;
402         lpuart32_write(&sport->port, temp, UARTCTRL);
403  }
404  
405  static void rx_dma_timer_init(struct lpuart_port *sport)
406  {
407 +       if (sport->dma_eeop)
408 +               return;
409 +
410         timer_setup(&sport->lpuart_timer, lpuart_timer_func, 0);
411         sport->lpuart_timer.expires = jiffies + sport->dma_rx_timeout;
412         add_timer(&sport->lpuart_timer);
413  }
414  
415 +static void lpuart_del_timer_sync(struct lpuart_port *sport)
416 +{
417 +       if (sport->dma_eeop)
418 +               return;
419 +
420 +       del_timer_sync(&sport->lpuart_timer);
421 +}
422 +
423  static void lpuart_tx_dma_startup(struct lpuart_port *sport)
424  {
425         u32 uartbaud;
426 @@ -1529,19 +1668,23 @@ static int lpuart_startup(struct uart_po
427         return 0;
428  }
429  
430 +static void lpuart32_hw_disable(struct lpuart_port *sport)
431 +{
432 +       unsigned long temp;
433 +
434 +       temp = lpuart32_read(&sport->port, UARTCTRL);
435 +       temp &= ~(UARTCTRL_RIE | UARTCTRL_ILIE | UARTCTRL_RE |
436 +                 UARTCTRL_TIE | UARTCTRL_TE);
437 +       lpuart32_write(&sport->port, temp, UARTCTRL);
438 +}
439 +
440  static void lpuart32_configure(struct lpuart_port *sport)
441  {
442         unsigned long temp;
443  
444 -       if (sport->lpuart_dma_rx_use) {
445 -               /* RXWATER must be 0 */
446 -               temp = lpuart32_read(&sport->port, UARTWATER);
447 -               temp &= ~(UARTWATER_WATER_MASK << UARTWATER_RXWATER_OFF);
448 -               lpuart32_write(&sport->port, temp, UARTWATER);
449 -       }
450         temp = lpuart32_read(&sport->port, UARTCTRL);
451         if (!sport->lpuart_dma_rx_use)
452 -               temp |= UARTCTRL_RIE;
453 +               temp |= UARTCTRL_RIE | UARTCTRL_ILIE;
454         if (!sport->lpuart_dma_tx_use)
455                 temp |= UARTCTRL_TIE;
456         lpuart32_write(&sport->port, temp, UARTCTRL);
457 @@ -1574,12 +1717,12 @@ static int lpuart32_startup(struct uart_
458  
459         spin_lock_irqsave(&sport->port.lock, flags);
460  
461 -       lpuart32_setup_watermark_enable(sport);
462 -
463 +       lpuart32_hw_disable(sport);
464  
465         lpuart_rx_dma_startup(sport);
466         lpuart_tx_dma_startup(sport);
467  
468 +       lpuart32_setup_watermark_enable(sport);
469         lpuart32_configure(sport);
470  
471         spin_unlock_irqrestore(&sport->port.lock, flags);
472 @@ -1589,7 +1732,7 @@ static int lpuart32_startup(struct uart_
473  static void lpuart_dma_shutdown(struct lpuart_port *sport)
474  {
475         if (sport->lpuart_dma_rx_use) {
476 -               del_timer_sync(&sport->lpuart_timer);
477 +               lpuart_del_timer_sync(sport);
478                 lpuart_dma_rx_free(&sport->port);
479         }
480  
481 @@ -1630,11 +1773,22 @@ static void lpuart32_shutdown(struct uar
482  
483         spin_lock_irqsave(&port->lock, flags);
484  
485 +       /* clear statue */
486 +       temp = lpuart32_read(&sport->port, UARTSTAT);
487 +       lpuart32_write(&sport->port, temp, UARTSTAT);
488 +
489 +       /* disable Rx/Tx DMA */
490 +       temp = lpuart32_read(port, UARTBAUD);
491 +       temp &= ~(UARTBAUD_TDMAE | UARTBAUD_RDMAE | UARTBAUD_RIDMAE);
492 +       lpuart32_write(port, temp, UARTBAUD);
493 +
494         /* disable Rx/Tx and interrupts */
495         temp = lpuart32_read(port, UARTCTRL);
496 -       temp &= ~(UARTCTRL_TE | UARTCTRL_RE |
497 -                       UARTCTRL_TIE | UARTCTRL_TCIE | UARTCTRL_RIE);
498 +       temp &= ~(UARTCTRL_TE | UARTCTRL_RE | UARTCTRL_TIE |
499 +               UARTCTRL_TCIE | UARTCTRL_RIE | UARTCTRL_ILIE |
500 +               UARTCTRL_LOOPS);
501         lpuart32_write(port, temp, UARTCTRL);
502 +       lpuart32_write(port, 0, UARTMODIR);
503  
504         spin_unlock_irqrestore(&port->lock, flags);
505  
506 @@ -1731,10 +1885,10 @@ lpuart_set_termios(struct uart_port *por
507          * baud rate and restart Rx DMA path.
508          *
509          * Since timer function acqures sport->port.lock, need to stop before
510 -        * acquring same lock because otherwise del_timer_sync() can deadlock.
511 +        * acquring same lock because otherwise lpuart_del_timer_sync() can deadlock.
512          */
513         if (old && sport->lpuart_dma_rx_use) {
514 -               del_timer_sync(&sport->lpuart_timer);
515 +               lpuart_del_timer_sync(sport);
516                 lpuart_dma_rx_free(&sport->port);
517         }
518  
519 @@ -1946,10 +2100,10 @@ lpuart32_set_termios(struct uart_port *p
520          * baud rate and restart Rx DMA path.
521          *
522          * Since timer function acqures sport->port.lock, need to stop before
523 -        * acquring same lock because otherwise del_timer_sync() can deadlock.
524 +        * acquring same lock because otherwise lpuart_del_timer_sync() can deadlock.
525          */
526         if (old && sport->lpuart_dma_rx_use) {
527 -               del_timer_sync(&sport->lpuart_timer);
528 +               lpuart_del_timer_sync(sport);
529                 lpuart_dma_rx_free(&sport->port);
530         }
531  
532 @@ -2458,6 +2612,10 @@ static int lpuart_probe(struct platform_
533         sport->port.dev = &pdev->dev;
534         sport->port.type = PORT_LPUART;
535         sport->devtype = sdata->devtype;
536 +       sport->rx_dma_cyclic = sdata->rx_dma_cyclic;
537 +       sport->rx_watermark = sdata->rx_watermark;
538 +       sport->dma_eeop = is_imx8qxp_lpuart(sport);
539 +
540         ret = platform_get_irq(pdev, 0);
541         if (ret < 0)
542                 return ret;
543 @@ -2620,7 +2778,7 @@ static int lpuart_suspend(struct device
544                  * Rx DMA path before suspend and start Rx DMA path on resume.
545                  */
546                 if (irq_wake) {
547 -                       del_timer_sync(&sport->lpuart_timer);
548 +                       lpuart_del_timer_sync(sport);
549                         lpuart_dma_rx_free(&sport->port);
550                 }
551