87da2af5624fecd39550bcf73c99c911ec878dca
[librecmc/librecmc.git] /
1 From ba39b344e9240a4a5fd4ab8178200b85cd1809da Mon Sep 17 00:00:00 2001
2 From: Christian Marangi <ansuelsmth@gmail.com>
3 Date: Sat, 23 Jul 2022 16:29:32 +0200
4 Subject: [PATCH 4/5] net: ethernet: stmicro: stmmac: generate stmmac dma conf
5  before open
6
7 Rework the driver to generate the stmmac dma_conf before stmmac_open.
8 This permits a function to first check if it's possible to allocate a
9 new dma_config and then pass it directly to __stmmac_open and "open" the
10 interface with the new configuration.
11
12 Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
13 Signed-off-by: Jakub Kicinski <kuba@kernel.org>
14 ---
15  .../net/ethernet/stmicro/stmmac/stmmac_main.c | 462 +++++++++++-------
16  1 file changed, 289 insertions(+), 173 deletions(-)
17
18 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
19 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
20 @@ -1301,7 +1301,8 @@ static int stmmac_phy_setup(struct stmma
21         return 0;
22  }
23  
24 -static void stmmac_display_rx_rings(struct stmmac_priv *priv)
25 +static void stmmac_display_rx_rings(struct stmmac_priv *priv,
26 +                                   struct stmmac_dma_conf *dma_conf)
27  {
28         u32 rx_cnt = priv->plat->rx_queues_to_use;
29         unsigned int desc_size;
30 @@ -1310,7 +1311,7 @@ static void stmmac_display_rx_rings(stru
31  
32         /* Display RX rings */
33         for (queue = 0; queue < rx_cnt; queue++) {
34 -               struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
35 +               struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
36  
37                 pr_info("\tRX Queue %u rings\n", queue);
38  
39 @@ -1323,12 +1324,13 @@ static void stmmac_display_rx_rings(stru
40                 }
41  
42                 /* Display RX ring */
43 -               stmmac_display_ring(priv, head_rx, priv->dma_conf.dma_rx_size, true,
44 +               stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
45                                     rx_q->dma_rx_phy, desc_size);
46         }
47  }
48  
49 -static void stmmac_display_tx_rings(struct stmmac_priv *priv)
50 +static void stmmac_display_tx_rings(struct stmmac_priv *priv,
51 +                                   struct stmmac_dma_conf *dma_conf)
52  {
53         u32 tx_cnt = priv->plat->tx_queues_to_use;
54         unsigned int desc_size;
55 @@ -1337,7 +1339,7 @@ static void stmmac_display_tx_rings(stru
56  
57         /* Display TX rings */
58         for (queue = 0; queue < tx_cnt; queue++) {
59 -               struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
60 +               struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
61  
62                 pr_info("\tTX Queue %d rings\n", queue);
63  
64 @@ -1352,18 +1354,19 @@ static void stmmac_display_tx_rings(stru
65                         desc_size = sizeof(struct dma_desc);
66                 }
67  
68 -               stmmac_display_ring(priv, head_tx, priv->dma_conf.dma_tx_size, false,
69 +               stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
70                                     tx_q->dma_tx_phy, desc_size);
71         }
72  }
73  
74 -static void stmmac_display_rings(struct stmmac_priv *priv)
75 +static void stmmac_display_rings(struct stmmac_priv *priv,
76 +                                struct stmmac_dma_conf *dma_conf)
77  {
78         /* Display RX ring */
79 -       stmmac_display_rx_rings(priv);
80 +       stmmac_display_rx_rings(priv, dma_conf);
81  
82         /* Display TX ring */
83 -       stmmac_display_tx_rings(priv);
84 +       stmmac_display_tx_rings(priv, dma_conf);
85  }
86  
87  static int stmmac_set_bfsize(int mtu, int bufsize)
88 @@ -1387,44 +1390,50 @@ static int stmmac_set_bfsize(int mtu, in
89  /**
90   * stmmac_clear_rx_descriptors - clear RX descriptors
91   * @priv: driver private structure
92 + * @dma_conf: structure to take the dma data
93   * @queue: RX queue index
94   * Description: this function is called to clear the RX descriptors
95   * in case of both basic and extended descriptors are used.
96   */
97 -static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
98 +static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
99 +                                       struct stmmac_dma_conf *dma_conf,
100 +                                       u32 queue)
101  {
102 -       struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
103 +       struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
104         int i;
105  
106         /* Clear the RX descriptors */
107 -       for (i = 0; i < priv->dma_conf.dma_rx_size; i++)
108 +       for (i = 0; i < dma_conf->dma_rx_size; i++)
109                 if (priv->extend_desc)
110                         stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
111                                         priv->use_riwt, priv->mode,
112 -                                       (i == priv->dma_conf.dma_rx_size - 1),
113 -                                       priv->dma_conf.dma_buf_sz);
114 +                                       (i == dma_conf->dma_rx_size - 1),
115 +                                       dma_conf->dma_buf_sz);
116                 else
117                         stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
118                                         priv->use_riwt, priv->mode,
119 -                                       (i == priv->dma_conf.dma_rx_size - 1),
120 -                                       priv->dma_conf.dma_buf_sz);
121 +                                       (i == dma_conf->dma_rx_size - 1),
122 +                                       dma_conf->dma_buf_sz);
123  }
124  
125  /**
126   * stmmac_clear_tx_descriptors - clear tx descriptors
127   * @priv: driver private structure
128 + * @dma_conf: structure to take the dma data
129   * @queue: TX queue index.
130   * Description: this function is called to clear the TX descriptors
131   * in case of both basic and extended descriptors are used.
132   */
133 -static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
134 +static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
135 +                                       struct stmmac_dma_conf *dma_conf,
136 +                                       u32 queue)
137  {
138 -       struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
139 +       struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
140         int i;
141  
142         /* Clear the TX descriptors */
143 -       for (i = 0; i < priv->dma_conf.dma_tx_size; i++) {
144 -               int last = (i == (priv->dma_conf.dma_tx_size - 1));
145 +       for (i = 0; i < dma_conf->dma_tx_size; i++) {
146 +               int last = (i == (dma_conf->dma_tx_size - 1));
147                 struct dma_desc *p;
148  
149                 if (priv->extend_desc)
150 @@ -1441,10 +1450,12 @@ static void stmmac_clear_tx_descriptors(
151  /**
152   * stmmac_clear_descriptors - clear descriptors
153   * @priv: driver private structure
154 + * @dma_conf: structure to take the dma data
155   * Description: this function is called to clear the TX and RX descriptors
156   * in case of both basic and extended descriptors are used.
157   */
158 -static void stmmac_clear_descriptors(struct stmmac_priv *priv)
159 +static void stmmac_clear_descriptors(struct stmmac_priv *priv,
160 +                                    struct stmmac_dma_conf *dma_conf)
161  {
162         u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
163         u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
164 @@ -1452,16 +1463,17 @@ static void stmmac_clear_descriptors(str
165  
166         /* Clear the RX descriptors */
167         for (queue = 0; queue < rx_queue_cnt; queue++)
168 -               stmmac_clear_rx_descriptors(priv, queue);
169 +               stmmac_clear_rx_descriptors(priv, dma_conf, queue);
170  
171         /* Clear the TX descriptors */
172         for (queue = 0; queue < tx_queue_cnt; queue++)
173 -               stmmac_clear_tx_descriptors(priv, queue);
174 +               stmmac_clear_tx_descriptors(priv, dma_conf, queue);
175  }
176  
177  /**
178   * stmmac_init_rx_buffers - init the RX descriptor buffer.
179   * @priv: driver private structure
180 + * @dma_conf: structure to take the dma data
181   * @p: descriptor pointer
182   * @i: descriptor index
183   * @flags: gfp flag
184 @@ -1469,10 +1481,12 @@ static void stmmac_clear_descriptors(str
185   * Description: this function is called to allocate a receive buffer, perform
186   * the DMA mapping and init the descriptor.
187   */
188 -static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
189 +static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
190 +                                 struct stmmac_dma_conf *dma_conf,
191 +                                 struct dma_desc *p,
192                                   int i, gfp_t flags, u32 queue)
193  {
194 -       struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
195 +       struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
196         struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
197  
198         if (!buf->page) {
199 @@ -1497,7 +1511,7 @@ static int stmmac_init_rx_buffers(struct
200         buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
201  
202         stmmac_set_desc_addr(priv, p, buf->addr);
203 -       if (priv->dma_conf.dma_buf_sz == BUF_SIZE_16KiB)
204 +       if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
205                 stmmac_init_desc3(priv, p);
206  
207         return 0;
208 @@ -1506,12 +1520,13 @@ static int stmmac_init_rx_buffers(struct
209  /**
210   * stmmac_free_rx_buffer - free RX dma buffers
211   * @priv: private structure
212 - * @queue: RX queue index
213 + * @rx_q: RX queue
214   * @i: buffer index.
215   */
216 -static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
217 +static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
218 +                                 struct stmmac_rx_queue *rx_q,
219 +                                 int i)
220  {
221 -       struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
222         struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
223  
224         if (buf->page)
225 @@ -1526,12 +1541,15 @@ static void stmmac_free_rx_buffer(struct
226  /**
227   * stmmac_free_tx_buffer - free RX dma buffers
228   * @priv: private structure
229 + * @dma_conf: structure to take the dma data
230   * @queue: RX queue index
231   * @i: buffer index.
232   */
233 -static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
234 +static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
235 +                                 struct stmmac_dma_conf *dma_conf,
236 +                                 u32 queue, int i)
237  {
238 -       struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
239 +       struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
240  
241         if (tx_q->tx_skbuff_dma[i].buf &&
242             tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
243 @@ -1570,23 +1588,28 @@ static void stmmac_free_tx_buffer(struct
244  /**
245   * dma_free_rx_skbufs - free RX dma buffers
246   * @priv: private structure
247 + * @dma_conf: structure to take the dma data
248   * @queue: RX queue index
249   */
250 -static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
251 +static void dma_free_rx_skbufs(struct stmmac_priv *priv,
252 +                              struct stmmac_dma_conf *dma_conf,
253 +                              u32 queue)
254  {
255 +       struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
256         int i;
257  
258 -       for (i = 0; i < priv->dma_conf.dma_rx_size; i++)
259 -               stmmac_free_rx_buffer(priv, queue, i);
260 +       for (i = 0; i < dma_conf->dma_rx_size; i++)
261 +               stmmac_free_rx_buffer(priv, rx_q, i);
262  }
263  
264 -static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
265 -                                  gfp_t flags)
266 +static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
267 +                                  struct stmmac_dma_conf *dma_conf,
268 +                                  u32 queue, gfp_t flags)
269  {
270 -       struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
271 +       struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
272         int i;
273  
274 -       for (i = 0; i < priv->dma_conf.dma_rx_size; i++) {
275 +       for (i = 0; i < dma_conf->dma_rx_size; i++) {
276                 struct dma_desc *p;
277                 int ret;
278  
279 @@ -1595,7 +1618,7 @@ static int stmmac_alloc_rx_buffers(struc
280                 else
281                         p = rx_q->dma_rx + i;
282  
283 -               ret = stmmac_init_rx_buffers(priv, p, i, flags,
284 +               ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
285                                              queue);
286                 if (ret)
287                         return ret;
288 @@ -1609,14 +1632,17 @@ static int stmmac_alloc_rx_buffers(struc
289  /**
290   * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
291   * @priv: private structure
292 + * @dma_conf: structure to take the dma data
293   * @queue: RX queue index
294   */
295 -static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
296 +static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
297 +                               struct stmmac_dma_conf *dma_conf,
298 +                               u32 queue)
299  {
300 -       struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
301 +       struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
302         int i;
303  
304 -       for (i = 0; i < priv->dma_conf.dma_rx_size; i++) {
305 +       for (i = 0; i < dma_conf->dma_rx_size; i++) {
306                 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
307  
308                 if (!buf->xdp)
309 @@ -1627,12 +1653,14 @@ static void dma_free_rx_xskbufs(struct s
310         }
311  }
312  
313 -static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
314 +static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
315 +                                     struct stmmac_dma_conf *dma_conf,
316 +                                     u32 queue)
317  {
318 -       struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
319 +       struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
320         int i;
321  
322 -       for (i = 0; i < priv->dma_conf.dma_rx_size; i++) {
323 +       for (i = 0; i < dma_conf->dma_rx_size; i++) {
324                 struct stmmac_rx_buffer *buf;
325                 dma_addr_t dma_addr;
326                 struct dma_desc *p;
327 @@ -1667,22 +1695,25 @@ static struct xsk_buff_pool *stmmac_get_
328  /**
329   * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
330   * @priv: driver private structure
331 + * @dma_conf: structure to take the dma data
332   * @queue: RX queue index
333   * @flags: gfp flag.
334   * Description: this function initializes the DMA RX descriptors
335   * and allocates the socket buffers. It supports the chained and ring
336   * modes.
337   */
338 -static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags)
339 +static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
340 +                                   struct stmmac_dma_conf *dma_conf,
341 +                                   u32 queue, gfp_t flags)
342  {
343 -       struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
344 +       struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
345         int ret;
346  
347         netif_dbg(priv, probe, priv->dev,
348                   "(%s) dma_rx_phy=0x%08x\n", __func__,
349                   (u32)rx_q->dma_rx_phy);
350  
351 -       stmmac_clear_rx_descriptors(priv, queue);
352 +       stmmac_clear_rx_descriptors(priv, dma_conf, queue);
353  
354         xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
355  
356 @@ -1709,9 +1740,9 @@ static int __init_dma_rx_desc_rings(stru
357                 /* RX XDP ZC buffer pool may not be populated, e.g.
358                  * xdpsock TX-only.
359                  */
360 -               stmmac_alloc_rx_buffers_zc(priv, queue);
361 +               stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
362         } else {
363 -               ret = stmmac_alloc_rx_buffers(priv, queue, flags);
364 +               ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
365                 if (ret < 0)
366                         return -ENOMEM;
367         }
368 @@ -1721,17 +1752,19 @@ static int __init_dma_rx_desc_rings(stru
369                 if (priv->extend_desc)
370                         stmmac_mode_init(priv, rx_q->dma_erx,
371                                          rx_q->dma_rx_phy,
372 -                                        priv->dma_conf.dma_rx_size, 1);
373 +                                        dma_conf->dma_rx_size, 1);
374                 else
375                         stmmac_mode_init(priv, rx_q->dma_rx,
376                                          rx_q->dma_rx_phy,
377 -                                        priv->dma_conf.dma_rx_size, 0);
378 +                                        dma_conf->dma_rx_size, 0);
379         }
380  
381         return 0;
382  }
383  
384 -static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
385 +static int init_dma_rx_desc_rings(struct net_device *dev,
386 +                                 struct stmmac_dma_conf *dma_conf,
387 +                                 gfp_t flags)
388  {
389         struct stmmac_priv *priv = netdev_priv(dev);
390         u32 rx_count = priv->plat->rx_queues_to_use;
391 @@ -1743,7 +1776,7 @@ static int init_dma_rx_desc_rings(struct
392                   "SKB addresses:\nskb\t\tskb data\tdma data\n");
393  
394         for (queue = 0; queue < rx_count; queue++) {
395 -               ret = __init_dma_rx_desc_rings(priv, queue, flags);
396 +               ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
397                 if (ret)
398                         goto err_init_rx_buffers;
399         }
400 @@ -1752,12 +1785,12 @@ static int init_dma_rx_desc_rings(struct
401  
402  err_init_rx_buffers:
403         while (queue >= 0) {
404 -               struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
405 +               struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
406  
407                 if (rx_q->xsk_pool)
408 -                       dma_free_rx_xskbufs(priv, queue);
409 +                       dma_free_rx_xskbufs(priv, dma_conf, queue);
410                 else
411 -                       dma_free_rx_skbufs(priv, queue);
412 +                       dma_free_rx_skbufs(priv, dma_conf, queue);
413  
414                 rx_q->buf_alloc_num = 0;
415                 rx_q->xsk_pool = NULL;
416 @@ -1774,14 +1807,17 @@ err_init_rx_buffers:
417  /**
418   * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
419   * @priv: driver private structure
420 - * @queue : TX queue index
421 + * @dma_conf: structure to take the dma data
422 + * @queue: TX queue index
423   * Description: this function initializes the DMA TX descriptors
424   * and allocates the socket buffers. It supports the chained and ring
425   * modes.
426   */
427 -static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
428 +static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
429 +                                   struct stmmac_dma_conf *dma_conf,
430 +                                   u32 queue)
431  {
432 -       struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
433 +       struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
434         int i;
435  
436         netif_dbg(priv, probe, priv->dev,
437 @@ -1793,16 +1829,16 @@ static int __init_dma_tx_desc_rings(stru
438                 if (priv->extend_desc)
439                         stmmac_mode_init(priv, tx_q->dma_etx,
440                                          tx_q->dma_tx_phy,
441 -                                        priv->dma_conf.dma_tx_size, 1);
442 +                                        dma_conf->dma_tx_size, 1);
443                 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
444                         stmmac_mode_init(priv, tx_q->dma_tx,
445                                          tx_q->dma_tx_phy,
446 -                                        priv->dma_conf.dma_tx_size, 0);
447 +                                        dma_conf->dma_tx_size, 0);
448         }
449  
450         tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
451  
452 -       for (i = 0; i < priv->dma_conf.dma_tx_size; i++) {
453 +       for (i = 0; i < dma_conf->dma_tx_size; i++) {
454                 struct dma_desc *p;
455  
456                 if (priv->extend_desc)
457 @@ -1824,7 +1860,8 @@ static int __init_dma_tx_desc_rings(stru
458         return 0;
459  }
460  
461 -static int init_dma_tx_desc_rings(struct net_device *dev)
462 +static int init_dma_tx_desc_rings(struct net_device *dev,
463 +                                 struct stmmac_dma_conf *dma_conf)
464  {
465         struct stmmac_priv *priv = netdev_priv(dev);
466         u32 tx_queue_cnt;
467 @@ -1833,7 +1870,7 @@ static int init_dma_tx_desc_rings(struct
468         tx_queue_cnt = priv->plat->tx_queues_to_use;
469  
470         for (queue = 0; queue < tx_queue_cnt; queue++)
471 -               __init_dma_tx_desc_rings(priv, queue);
472 +               __init_dma_tx_desc_rings(priv, dma_conf, queue);
473  
474         return 0;
475  }
476 @@ -1841,26 +1878,29 @@ static int init_dma_tx_desc_rings(struct
477  /**
478   * init_dma_desc_rings - init the RX/TX descriptor rings
479   * @dev: net device structure
480 + * @dma_conf: structure to take the dma data
481   * @flags: gfp flag.
482   * Description: this function initializes the DMA RX/TX descriptors
483   * and allocates the socket buffers. It supports the chained and ring
484   * modes.
485   */
486 -static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
487 +static int init_dma_desc_rings(struct net_device *dev,
488 +                              struct stmmac_dma_conf *dma_conf,
489 +                              gfp_t flags)
490  {
491         struct stmmac_priv *priv = netdev_priv(dev);
492         int ret;
493  
494 -       ret = init_dma_rx_desc_rings(dev, flags);
495 +       ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
496         if (ret)
497                 return ret;
498  
499 -       ret = init_dma_tx_desc_rings(dev);
500 +       ret = init_dma_tx_desc_rings(dev, dma_conf);
501  
502 -       stmmac_clear_descriptors(priv);
503 +       stmmac_clear_descriptors(priv, dma_conf);
504  
505         if (netif_msg_hw(priv))
506 -               stmmac_display_rings(priv);
507 +               stmmac_display_rings(priv, dma_conf);
508  
509         return ret;
510  }
511 @@ -1868,17 +1908,20 @@ static int init_dma_desc_rings(struct ne
512  /**
513   * dma_free_tx_skbufs - free TX dma buffers
514   * @priv: private structure
515 + * @dma_conf: structure to take the dma data
516   * @queue: TX queue index
517   */
518 -static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
519 +static void dma_free_tx_skbufs(struct stmmac_priv *priv,
520 +                              struct stmmac_dma_conf *dma_conf,
521 +                              u32 queue)
522  {
523 -       struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
524 +       struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
525         int i;
526  
527         tx_q->xsk_frames_done = 0;
528  
529 -       for (i = 0; i < priv->dma_conf.dma_tx_size; i++)
530 -               stmmac_free_tx_buffer(priv, queue, i);
531 +       for (i = 0; i < dma_conf->dma_tx_size; i++)
532 +               stmmac_free_tx_buffer(priv, dma_conf, queue, i);
533  
534         if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
535                 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
536 @@ -1897,34 +1940,37 @@ static void stmmac_free_tx_skbufs(struct
537         u32 queue;
538  
539         for (queue = 0; queue < tx_queue_cnt; queue++)
540 -               dma_free_tx_skbufs(priv, queue);
541 +               dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
542  }
543  
544  /**
545   * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
546   * @priv: private structure
547 + * @dma_conf: structure to take the dma data
548   * @queue: RX queue index
549   */
550 -static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
551 +static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
552 +                                        struct stmmac_dma_conf *dma_conf,
553 +                                        u32 queue)
554  {
555 -       struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
556 +       struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
557  
558         /* Release the DMA RX socket buffers */
559         if (rx_q->xsk_pool)
560 -               dma_free_rx_xskbufs(priv, queue);
561 +               dma_free_rx_xskbufs(priv, dma_conf, queue);
562         else
563 -               dma_free_rx_skbufs(priv, queue);
564 +               dma_free_rx_skbufs(priv, dma_conf, queue);
565  
566         rx_q->buf_alloc_num = 0;
567         rx_q->xsk_pool = NULL;
568  
569         /* Free DMA regions of consistent memory previously allocated */
570         if (!priv->extend_desc)
571 -               dma_free_coherent(priv->device, priv->dma_conf.dma_rx_size *
572 +               dma_free_coherent(priv->device, dma_conf->dma_rx_size *
573                                   sizeof(struct dma_desc),
574                                   rx_q->dma_rx, rx_q->dma_rx_phy);
575         else
576 -               dma_free_coherent(priv->device, priv->dma_conf.dma_rx_size *
577 +               dma_free_coherent(priv->device, dma_conf->dma_rx_size *
578                                   sizeof(struct dma_extended_desc),
579                                   rx_q->dma_erx, rx_q->dma_rx_phy);
580  
581 @@ -1936,29 +1982,33 @@ static void __free_dma_rx_desc_resources
582                 page_pool_destroy(rx_q->page_pool);
583  }
584  
585 -static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
586 +static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
587 +                                      struct stmmac_dma_conf *dma_conf)
588  {
589         u32 rx_count = priv->plat->rx_queues_to_use;
590         u32 queue;
591  
592         /* Free RX queue resources */
593         for (queue = 0; queue < rx_count; queue++)
594 -               __free_dma_rx_desc_resources(priv, queue);
595 +               __free_dma_rx_desc_resources(priv, dma_conf, queue);
596  }
597  
598  /**
599   * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
600   * @priv: private structure
601 + * @dma_conf: structure to take the dma data
602   * @queue: TX queue index
603   */
604 -static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
605 +static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
606 +                                        struct stmmac_dma_conf *dma_conf,
607 +                                        u32 queue)
608  {
609 -       struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
610 +       struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
611         size_t size;
612         void *addr;
613  
614         /* Release the DMA TX socket buffers */
615 -       dma_free_tx_skbufs(priv, queue);
616 +       dma_free_tx_skbufs(priv, dma_conf, queue);
617  
618         if (priv->extend_desc) {
619                 size = sizeof(struct dma_extended_desc);
620 @@ -1971,7 +2021,7 @@ static void __free_dma_tx_desc_resources
621                 addr = tx_q->dma_tx;
622         }
623  
624 -       size *= priv->dma_conf.dma_tx_size;
625 +       size *= dma_conf->dma_tx_size;
626  
627         dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
628  
629 @@ -1979,28 +2029,32 @@ static void __free_dma_tx_desc_resources
630         kfree(tx_q->tx_skbuff);
631  }
632  
633 -static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
634 +static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
635 +                                      struct stmmac_dma_conf *dma_conf)
636  {
637         u32 tx_count = priv->plat->tx_queues_to_use;
638         u32 queue;
639  
640         /* Free TX queue resources */
641         for (queue = 0; queue < tx_count; queue++)
642 -               __free_dma_tx_desc_resources(priv, queue);
643 +               __free_dma_tx_desc_resources(priv, dma_conf, queue);
644  }
645  
646  /**
647   * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
648   * @priv: private structure
649 + * @dma_conf: structure to take the dma data
650   * @queue: RX queue index
651   * Description: according to which descriptor can be used (extend or basic)
652   * this function allocates the resources for TX and RX paths. In case of
653   * reception, for example, it pre-allocated the RX socket buffer in order to
654   * allow zero-copy mechanism.
655   */
656 -static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
657 +static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
658 +                                        struct stmmac_dma_conf *dma_conf,
659 +                                        u32 queue)
660  {
661 -       struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
662 +       struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
663         struct stmmac_channel *ch = &priv->channel[queue];
664         bool xdp_prog = stmmac_xdp_is_enabled(priv);
665         struct page_pool_params pp_params = { 0 };
666 @@ -2012,8 +2066,8 @@ static int __alloc_dma_rx_desc_resources
667         rx_q->priv_data = priv;
668  
669         pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
670 -       pp_params.pool_size = priv->dma_conf.dma_rx_size;
671 -       num_pages = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE);
672 +       pp_params.pool_size = dma_conf->dma_rx_size;
673 +       num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
674         pp_params.order = ilog2(num_pages);
675         pp_params.nid = dev_to_node(priv->device);
676         pp_params.dev = priv->device;
677 @@ -2028,7 +2082,7 @@ static int __alloc_dma_rx_desc_resources
678                 return ret;
679         }
680  
681 -       rx_q->buf_pool = kcalloc(priv->dma_conf.dma_rx_size,
682 +       rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
683                                  sizeof(*rx_q->buf_pool),
684                                  GFP_KERNEL);
685         if (!rx_q->buf_pool)
686 @@ -2036,7 +2090,7 @@ static int __alloc_dma_rx_desc_resources
687  
688         if (priv->extend_desc) {
689                 rx_q->dma_erx = dma_alloc_coherent(priv->device,
690 -                                                  priv->dma_conf.dma_rx_size *
691 +                                                  dma_conf->dma_rx_size *
692                                                    sizeof(struct dma_extended_desc),
693                                                    &rx_q->dma_rx_phy,
694                                                    GFP_KERNEL);
695 @@ -2045,7 +2099,7 @@ static int __alloc_dma_rx_desc_resources
696  
697         } else {
698                 rx_q->dma_rx = dma_alloc_coherent(priv->device,
699 -                                                 priv->dma_conf.dma_rx_size *
700 +                                                 dma_conf->dma_rx_size *
701                                                   sizeof(struct dma_desc),
702                                                   &rx_q->dma_rx_phy,
703                                                   GFP_KERNEL);
704 @@ -2070,7 +2124,8 @@ static int __alloc_dma_rx_desc_resources
705         return 0;
706  }
707  
708 -static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
709 +static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
710 +                                      struct stmmac_dma_conf *dma_conf)
711  {
712         u32 rx_count = priv->plat->rx_queues_to_use;
713         u32 queue;
714 @@ -2078,7 +2133,7 @@ static int alloc_dma_rx_desc_resources(s
715  
716         /* RX queues buffers and DMA */
717         for (queue = 0; queue < rx_count; queue++) {
718 -               ret = __alloc_dma_rx_desc_resources(priv, queue);
719 +               ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
720                 if (ret)
721                         goto err_dma;
722         }
723 @@ -2086,7 +2141,7 @@ static int alloc_dma_rx_desc_resources(s
724         return 0;
725  
726  err_dma:
727 -       free_dma_rx_desc_resources(priv);
728 +       free_dma_rx_desc_resources(priv, dma_conf);
729  
730         return ret;
731  }
732 @@ -2094,28 +2149,31 @@ err_dma:
733  /**
734   * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
735   * @priv: private structure
736 + * @dma_conf: structure to take the dma data
737   * @queue: TX queue index
738   * Description: according to which descriptor can be used (extend or basic)
739   * this function allocates the resources for TX and RX paths. In case of
740   * reception, for example, it pre-allocated the RX socket buffer in order to
741   * allow zero-copy mechanism.
742   */
743 -static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
744 +static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
745 +                                        struct stmmac_dma_conf *dma_conf,
746 +                                        u32 queue)
747  {
748 -       struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
749 +       struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
750         size_t size;
751         void *addr;
752  
753         tx_q->queue_index = queue;
754         tx_q->priv_data = priv;
755  
756 -       tx_q->tx_skbuff_dma = kcalloc(priv->dma_conf.dma_tx_size,
757 +       tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
758                                       sizeof(*tx_q->tx_skbuff_dma),
759                                       GFP_KERNEL);
760         if (!tx_q->tx_skbuff_dma)
761                 return -ENOMEM;
762  
763 -       tx_q->tx_skbuff = kcalloc(priv->dma_conf.dma_tx_size,
764 +       tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
765                                   sizeof(struct sk_buff *),
766                                   GFP_KERNEL);
767         if (!tx_q->tx_skbuff)
768 @@ -2128,7 +2186,7 @@ static int __alloc_dma_tx_desc_resources
769         else
770                 size = sizeof(struct dma_desc);
771  
772 -       size *= priv->dma_conf.dma_tx_size;
773 +       size *= dma_conf->dma_tx_size;
774  
775         addr = dma_alloc_coherent(priv->device, size,
776                                   &tx_q->dma_tx_phy, GFP_KERNEL);
777 @@ -2145,7 +2203,8 @@ static int __alloc_dma_tx_desc_resources
778         return 0;
779  }
780  
781 -static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
782 +static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
783 +                                      struct stmmac_dma_conf *dma_conf)
784  {
785         u32 tx_count = priv->plat->tx_queues_to_use;
786         u32 queue;
787 @@ -2153,7 +2212,7 @@ static int alloc_dma_tx_desc_resources(s
788  
789         /* TX queues buffers and DMA */
790         for (queue = 0; queue < tx_count; queue++) {
791 -               ret = __alloc_dma_tx_desc_resources(priv, queue);
792 +               ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
793                 if (ret)
794                         goto err_dma;
795         }
796 @@ -2161,27 +2220,29 @@ static int alloc_dma_tx_desc_resources(s
797         return 0;
798  
799  err_dma:
800 -       free_dma_tx_desc_resources(priv);
801 +       free_dma_tx_desc_resources(priv, dma_conf);
802         return ret;
803  }
804  
805  /**
806   * alloc_dma_desc_resources - alloc TX/RX resources.
807   * @priv: private structure
808 + * @dma_conf: structure to take the dma data
809   * Description: according to which descriptor can be used (extend or basic)
810   * this function allocates the resources for TX and RX paths. In case of
811   * reception, for example, it pre-allocated the RX socket buffer in order to
812   * allow zero-copy mechanism.
813   */
814 -static int alloc_dma_desc_resources(struct stmmac_priv *priv)
815 +static int alloc_dma_desc_resources(struct stmmac_priv *priv,
816 +                                   struct stmmac_dma_conf *dma_conf)
817  {
818         /* RX Allocation */
819 -       int ret = alloc_dma_rx_desc_resources(priv);
820 +       int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
821  
822         if (ret)
823                 return ret;
824  
825 -       ret = alloc_dma_tx_desc_resources(priv);
826 +       ret = alloc_dma_tx_desc_resources(priv, dma_conf);
827  
828         return ret;
829  }
830 @@ -2189,16 +2250,18 @@ static int alloc_dma_desc_resources(stru
831  /**
832   * free_dma_desc_resources - free dma desc resources
833   * @priv: private structure
834 + * @dma_conf: structure to take the dma data
835   */
836 -static void free_dma_desc_resources(struct stmmac_priv *priv)
837 +static void free_dma_desc_resources(struct stmmac_priv *priv,
838 +                                   struct stmmac_dma_conf *dma_conf)
839  {
840         /* Release the DMA TX socket buffers */
841 -       free_dma_tx_desc_resources(priv);
842 +       free_dma_tx_desc_resources(priv, dma_conf);
843  
844         /* Release the DMA RX socket buffers later
845          * to ensure all pending XDP_TX buffers are returned.
846          */
847 -       free_dma_rx_desc_resources(priv);
848 +       free_dma_rx_desc_resources(priv, dma_conf);
849  }
850  
851  /**
852 @@ -2687,8 +2750,8 @@ static void stmmac_tx_err(struct stmmac_
853         netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
854  
855         stmmac_stop_tx_dma(priv, chan);
856 -       dma_free_tx_skbufs(priv, chan);
857 -       stmmac_clear_tx_descriptors(priv, chan);
858 +       dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
859 +       stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
860         stmmac_reset_tx_queue(priv, chan);
861         stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
862                             tx_q->dma_tx_phy, chan);
863 @@ -3686,19 +3749,93 @@ static int stmmac_request_irq(struct net
864  }
865  
866  /**
867 - *  stmmac_open - open entry point of the driver
868 + *  stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
869 + *  @priv: driver private structure
870 + *  @mtu: MTU to setup the dma queue and buf with
871 + *  Description: Allocate and generate a dma_conf based on the provided MTU.
872 + *  Allocate the Tx/Rx DMA queue and init them.
873 + *  Return value:
874 + *  the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
875 + */
876 +static struct stmmac_dma_conf *
877 +stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
878 +{
879 +       struct stmmac_dma_conf *dma_conf;
880 +       int chan, bfsize, ret;
881 +
882 +       dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
883 +       if (!dma_conf) {
884 +               netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
885 +                          __func__);
886 +               return ERR_PTR(-ENOMEM);
887 +       }
888 +
889 +       bfsize = stmmac_set_16kib_bfsize(priv, mtu);
890 +       if (bfsize < 0)
891 +               bfsize = 0;
892 +
893 +       if (bfsize < BUF_SIZE_16KiB)
894 +               bfsize = stmmac_set_bfsize(mtu, 0);
895 +
896 +       dma_conf->dma_buf_sz = bfsize;
897 +       /* Chose the tx/rx size from the already defined one in the
898 +        * priv struct. (if defined)
899 +        */
900 +       dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
901 +       dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
902 +
903 +       if (!dma_conf->dma_tx_size)
904 +               dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
905 +       if (!dma_conf->dma_rx_size)
906 +               dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
907 +
908 +       /* Earlier check for TBS */
909 +       for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
910 +               struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
911 +               int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
912 +
913 +               /* Setup per-TXQ tbs flag before TX descriptor alloc */
914 +               tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
915 +       }
916 +
917 +       ret = alloc_dma_desc_resources(priv, dma_conf);
918 +       if (ret < 0) {
919 +               netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
920 +                          __func__);
921 +               goto alloc_error;
922 +       }
923 +
924 +       ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
925 +       if (ret < 0) {
926 +               netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
927 +                          __func__);
928 +               goto init_error;
929 +       }
930 +
931 +       return dma_conf;
932 +
933 +init_error:
934 +       free_dma_desc_resources(priv, dma_conf);
935 +alloc_error:
936 +       kfree(dma_conf);
937 +       return ERR_PTR(ret);
938 +}
939 +
940 +/**
941 + *  __stmmac_open - open entry point of the driver
942   *  @dev : pointer to the device structure.
943 + *  @dma_conf :  structure to take the dma data
944   *  Description:
945   *  This function is the open entry point of the driver.
946   *  Return value:
947   *  0 on success and an appropriate (-)ve integer as defined in errno.h
948   *  file on failure.
949   */
950 -static int stmmac_open(struct net_device *dev)
951 +static int __stmmac_open(struct net_device *dev,
952 +                        struct stmmac_dma_conf *dma_conf)
953  {
954         struct stmmac_priv *priv = netdev_priv(dev);
955         int mode = priv->plat->phy_interface;
956 -       int bfsize = 0;
957         u32 chan;
958         int ret;
959  
960 @@ -3725,45 +3862,10 @@ static int stmmac_open(struct net_device
961         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
962         priv->xstats.threshold = tc;
963  
964 -       bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
965 -       if (bfsize < 0)
966 -               bfsize = 0;
967 -
968 -       if (bfsize < BUF_SIZE_16KiB)
969 -               bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_conf.dma_buf_sz);
970 -
971 -       priv->dma_conf.dma_buf_sz = bfsize;
972 -       buf_sz = bfsize;
973 -
974         priv->rx_copybreak = STMMAC_RX_COPYBREAK;
975  
976 -       if (!priv->dma_conf.dma_tx_size)
977 -               priv->dma_conf.dma_tx_size = DMA_DEFAULT_TX_SIZE;
978 -       if (!priv->dma_conf.dma_rx_size)
979 -               priv->dma_conf.dma_rx_size = DMA_DEFAULT_RX_SIZE;
980 -
981 -       /* Earlier check for TBS */
982 -       for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
983 -               struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
984 -               int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
985 -
986 -               /* Setup per-TXQ tbs flag before TX descriptor alloc */
987 -               tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
988 -       }
989 -
990 -       ret = alloc_dma_desc_resources(priv);
991 -       if (ret < 0) {
992 -               netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
993 -                          __func__);
994 -               goto dma_desc_error;
995 -       }
996 -
997 -       ret = init_dma_desc_rings(dev, GFP_KERNEL);
998 -       if (ret < 0) {
999 -               netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
1000 -                          __func__);
1001 -               goto init_error;
1002 -       }
1003 +       buf_sz = dma_conf->dma_buf_sz;
1004 +       memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
1005  
1006         if (priv->plat->serdes_powerup) {
1007                 ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
1008 @@ -3806,14 +3908,28 @@ irq_error:
1009  
1010         stmmac_hw_teardown(dev);
1011  init_error:
1012 -       free_dma_desc_resources(priv);
1013 -dma_desc_error:
1014 +       free_dma_desc_resources(priv, &priv->dma_conf);
1015         phylink_disconnect_phy(priv->phylink);
1016  init_phy_error:
1017         pm_runtime_put(priv->device);
1018         return ret;
1019  }
1020  
1021 +static int stmmac_open(struct net_device *dev)
1022 +{
1023 +       struct stmmac_priv *priv = netdev_priv(dev);
1024 +       struct stmmac_dma_conf *dma_conf;
1025 +       int ret;
1026 +
1027 +       dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
1028 +       if (IS_ERR(dma_conf))
1029 +               return PTR_ERR(dma_conf);
1030 +
1031 +       ret = __stmmac_open(dev, dma_conf);
1032 +       kfree(dma_conf);
1033 +       return ret;
1034 +}
1035 +
1036  static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
1037  {
1038         set_bit(__FPE_REMOVING, &priv->fpe_task_state);
1039 @@ -3862,7 +3978,7 @@ static int stmmac_release(struct net_dev
1040         stmmac_stop_all_dma(priv);
1041  
1042         /* Release and free the Rx/Tx resources */
1043 -       free_dma_desc_resources(priv);
1044 +       free_dma_desc_resources(priv, &priv->dma_conf);
1045  
1046         /* Disable the MAC Rx/Tx */
1047         stmmac_mac_set(priv, priv->ioaddr, false);
1048 @@ -6385,7 +6501,7 @@ void stmmac_disable_rx_queue(struct stmm
1049         spin_unlock_irqrestore(&ch->lock, flags);
1050  
1051         stmmac_stop_rx_dma(priv, queue);
1052 -       __free_dma_rx_desc_resources(priv, queue);
1053 +       __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
1054  }
1055  
1056  void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
1057 @@ -6396,21 +6512,21 @@ void stmmac_enable_rx_queue(struct stmma
1058         u32 buf_size;
1059         int ret;
1060  
1061 -       ret = __alloc_dma_rx_desc_resources(priv, queue);
1062 +       ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
1063         if (ret) {
1064                 netdev_err(priv->dev, "Failed to alloc RX desc.\n");
1065                 return;
1066         }
1067  
1068 -       ret = __init_dma_rx_desc_rings(priv, queue, GFP_KERNEL);
1069 +       ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
1070         if (ret) {
1071 -               __free_dma_rx_desc_resources(priv, queue);
1072 +               __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
1073                 netdev_err(priv->dev, "Failed to init RX desc.\n");
1074                 return;
1075         }
1076  
1077         stmmac_reset_rx_queue(priv, queue);
1078 -       stmmac_clear_rx_descriptors(priv, queue);
1079 +       stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
1080  
1081         stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
1082                             rx_q->dma_rx_phy, rx_q->queue_index);
1083 @@ -6448,7 +6564,7 @@ void stmmac_disable_tx_queue(struct stmm
1084         spin_unlock_irqrestore(&ch->lock, flags);
1085  
1086         stmmac_stop_tx_dma(priv, queue);
1087 -       __free_dma_tx_desc_resources(priv, queue);
1088 +       __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
1089  }
1090  
1091  void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
1092 @@ -6458,21 +6574,21 @@ void stmmac_enable_tx_queue(struct stmma
1093         unsigned long flags;
1094         int ret;
1095  
1096 -       ret = __alloc_dma_tx_desc_resources(priv, queue);
1097 +       ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
1098         if (ret) {
1099                 netdev_err(priv->dev, "Failed to alloc TX desc.\n");
1100                 return;
1101         }
1102  
1103 -       ret = __init_dma_tx_desc_rings(priv, queue);
1104 +       ret = __init_dma_tx_desc_rings(priv,  &priv->dma_conf, queue);
1105         if (ret) {
1106 -               __free_dma_tx_desc_resources(priv, queue);
1107 +               __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
1108                 netdev_err(priv->dev, "Failed to init TX desc.\n");
1109                 return;
1110         }
1111  
1112         stmmac_reset_tx_queue(priv, queue);
1113 -       stmmac_clear_tx_descriptors(priv, queue);
1114 +       stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
1115  
1116         stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
1117                             tx_q->dma_tx_phy, tx_q->queue_index);
1118 @@ -6512,7 +6628,7 @@ void stmmac_xdp_release(struct net_devic
1119         stmmac_stop_all_dma(priv);
1120  
1121         /* Release and free the Rx/Tx resources */
1122 -       free_dma_desc_resources(priv);
1123 +       free_dma_desc_resources(priv, &priv->dma_conf);
1124  
1125         /* Disable the MAC Rx/Tx */
1126         stmmac_mac_set(priv, priv->ioaddr, false);
1127 @@ -6537,14 +6653,14 @@ int stmmac_xdp_open(struct net_device *d
1128         u32 chan;
1129         int ret;
1130  
1131 -       ret = alloc_dma_desc_resources(priv);
1132 +       ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
1133         if (ret < 0) {
1134                 netdev_err(dev, "%s: DMA descriptors allocation failed\n",
1135                            __func__);
1136                 goto dma_desc_error;
1137         }
1138  
1139 -       ret = init_dma_desc_rings(dev, GFP_KERNEL);
1140 +       ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
1141         if (ret < 0) {
1142                 netdev_err(dev, "%s: DMA descriptors initialization failed\n",
1143                            __func__);
1144 @@ -6626,7 +6742,7 @@ irq_error:
1145  
1146         stmmac_hw_teardown(dev);
1147  init_error:
1148 -       free_dma_desc_resources(priv);
1149 +       free_dma_desc_resources(priv, &priv->dma_conf);
1150  dma_desc_error:
1151         return ret;
1152  }
1153 @@ -7492,7 +7608,7 @@ int stmmac_resume(struct device *dev)
1154         stmmac_reset_queues_param(priv);
1155  
1156         stmmac_free_tx_skbufs(priv);
1157 -       stmmac_clear_descriptors(priv);
1158 +       stmmac_clear_descriptors(priv, &priv->dma_conf);
1159  
1160         stmmac_hw_setup(ndev, false);
1161         stmmac_init_coalesce(priv);