Linux-libre 5.4.47-gnu
[librecmc/linux-libre.git] / drivers / net / ethernet / stmicro / stmmac / ring_mode.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3   Specialised functions for managing Ring mode
4
5   Copyright(C) 2011  STMicroelectronics Ltd
6
7   It defines all the functions used to handle the normal/enhanced
8   descriptors in case of the DMA is configured to work in chained or
9   in ring mode.
10
11
12   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
13 *******************************************************************************/
14
15 #include "stmmac.h"
16
17 static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
18 {
19         struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p;
20         unsigned int nopaged_len = skb_headlen(skb);
21         struct stmmac_priv *priv = tx_q->priv_data;
22         unsigned int entry = tx_q->cur_tx;
23         unsigned int bmax, len, des2;
24         struct dma_desc *desc;
25
26         if (priv->extend_desc)
27                 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
28         else
29                 desc = tx_q->dma_tx + entry;
30
31         if (priv->plat->enh_desc)
32                 bmax = BUF_SIZE_8KiB;
33         else
34                 bmax = BUF_SIZE_2KiB;
35
36         len = nopaged_len - bmax;
37
38         if (nopaged_len > BUF_SIZE_8KiB) {
39
40                 des2 = dma_map_single(priv->device, skb->data, bmax,
41                                       DMA_TO_DEVICE);
42                 desc->des2 = cpu_to_le32(des2);
43                 if (dma_mapping_error(priv->device, des2))
44                         return -1;
45
46                 tx_q->tx_skbuff_dma[entry].buf = des2;
47                 tx_q->tx_skbuff_dma[entry].len = bmax;
48                 tx_q->tx_skbuff_dma[entry].is_jumbo = true;
49
50                 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
51                 stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum,
52                                 STMMAC_RING_MODE, 0, false, skb->len);
53                 tx_q->tx_skbuff[entry] = NULL;
54                 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
55
56                 if (priv->extend_desc)
57                         desc = (struct dma_desc *)(tx_q->dma_etx + entry);
58                 else
59                         desc = tx_q->dma_tx + entry;
60
61                 des2 = dma_map_single(priv->device, skb->data + bmax, len,
62                                       DMA_TO_DEVICE);
63                 desc->des2 = cpu_to_le32(des2);
64                 if (dma_mapping_error(priv->device, des2))
65                         return -1;
66                 tx_q->tx_skbuff_dma[entry].buf = des2;
67                 tx_q->tx_skbuff_dma[entry].len = len;
68                 tx_q->tx_skbuff_dma[entry].is_jumbo = true;
69
70                 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
71                 stmmac_prepare_tx_desc(priv, desc, 0, len, csum,
72                                 STMMAC_RING_MODE, 1, !skb_is_nonlinear(skb),
73                                 skb->len);
74         } else {
75                 des2 = dma_map_single(priv->device, skb->data,
76                                       nopaged_len, DMA_TO_DEVICE);
77                 desc->des2 = cpu_to_le32(des2);
78                 if (dma_mapping_error(priv->device, des2))
79                         return -1;
80                 tx_q->tx_skbuff_dma[entry].buf = des2;
81                 tx_q->tx_skbuff_dma[entry].len = nopaged_len;
82                 tx_q->tx_skbuff_dma[entry].is_jumbo = true;
83                 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
84                 stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum,
85                                 STMMAC_RING_MODE, 0, !skb_is_nonlinear(skb),
86                                 skb->len);
87         }
88
89         tx_q->cur_tx = entry;
90
91         return entry;
92 }
93
94 static unsigned int is_jumbo_frm(int len, int enh_desc)
95 {
96         unsigned int ret = 0;
97
98         if (len >= BUF_SIZE_4KiB)
99                 ret = 1;
100
101         return ret;
102 }
103
104 static void refill_desc3(void *priv_ptr, struct dma_desc *p)
105 {
106         struct stmmac_rx_queue *rx_q = priv_ptr;
107         struct stmmac_priv *priv = rx_q->priv_data;
108
109         /* Fill DES3 in case of RING mode */
110         if (priv->dma_buf_sz == BUF_SIZE_16KiB)
111                 p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
112 }
113
114 /* In ring mode we need to fill the desc3 because it is used as buffer */
115 static void init_desc3(struct dma_desc *p)
116 {
117         p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
118 }
119
120 static void clean_desc3(void *priv_ptr, struct dma_desc *p)
121 {
122         struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr;
123         struct stmmac_priv *priv = tx_q->priv_data;
124         unsigned int entry = tx_q->dirty_tx;
125
126         /* des3 is only used for jumbo frames tx or time stamping */
127         if (unlikely(tx_q->tx_skbuff_dma[entry].is_jumbo ||
128                      (tx_q->tx_skbuff_dma[entry].last_segment &&
129                       !priv->extend_desc && priv->hwts_tx_en)))
130                 p->des3 = 0;
131 }
132
133 static int set_16kib_bfsize(int mtu)
134 {
135         int ret = 0;
136         if (unlikely(mtu > BUF_SIZE_8KiB))
137                 ret = BUF_SIZE_16KiB;
138         return ret;
139 }
140
141 const struct stmmac_mode_ops ring_mode_ops = {
142         .is_jumbo_frm = is_jumbo_frm,
143         .jumbo_frm = jumbo_frm,
144         .refill_desc3 = refill_desc3,
145         .init_desc3 = init_desc3,
146         .clean_desc3 = clean_desc3,
147         .set_16kib_bfsize = set_16kib_bfsize,
148 };