1 patches for bgmac backported from net-next/master
3 --- a/drivers/net/ethernet/broadcom/Kconfig
4 +++ b/drivers/net/ethernet/broadcom/Kconfig
5 @@ -132,7 +132,8 @@ config BNX2X_SRIOV
8 tristate "BCMA bus GBit core support"
9 - depends on BCMA_HOST_SOC && HAS_DMA
10 + depends on BCMA_HOST_SOC && HAS_DMA && BCM47XX
13 This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus.
14 They can be found on BCM47xx SoCs and provide gigabit ethernet.
15 --- a/drivers/net/ethernet/broadcom/bgmac.c
16 +++ b/drivers/net/ethernet/broadcom/bgmac.c
17 @@ -149,6 +149,8 @@ static netdev_tx_t bgmac_dma_tx_add(stru
18 dma_desc->ctl0 = cpu_to_le32(ctl0);
19 dma_desc->ctl1 = cpu_to_le32(ctl1);
21 + netdev_sent_queue(net_dev, skb->len);
25 /* Increase ring->end to point empty slot. We tell hardware the first
26 @@ -157,6 +159,7 @@ static netdev_tx_t bgmac_dma_tx_add(stru
27 if (++ring->end >= BGMAC_TX_RING_SLOTS)
29 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
31 ring->end * sizeof(struct bgmac_dma_desc));
33 /* Always keep one slot free to allow detecting bugged calls. */
34 @@ -177,10 +180,13 @@ static void bgmac_dma_tx_free(struct bgm
35 struct device *dma_dev = bgmac->core->dma_dev;
38 + unsigned bytes_compl = 0, pkts_compl = 0;
40 /* The last slot that hardware didn't consume yet */
41 empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
42 empty_slot &= BGMAC_DMA_TX_STATDPTR;
43 + empty_slot -= ring->index_base;
44 + empty_slot &= BGMAC_DMA_TX_STATDPTR;
45 empty_slot /= sizeof(struct bgmac_dma_desc);
47 while (ring->start != empty_slot) {
48 @@ -192,6 +198,9 @@ static void bgmac_dma_tx_free(struct bgm
49 slot->skb->len, DMA_TO_DEVICE);
52 + bytes_compl += slot->skb->len;
56 dev_kfree_skb(slot->skb);
58 @@ -205,6 +214,8 @@ static void bgmac_dma_tx_free(struct bgm
62 + netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl);
64 if (freed && netif_queue_stopped(bgmac->net_dev))
65 netif_wake_queue(bgmac->net_dev);
67 @@ -241,31 +252,59 @@ static int bgmac_dma_rx_skb_for_slot(str
68 struct bgmac_slot_info *slot)
70 struct device *dma_dev = bgmac->core->dma_dev;
71 + struct sk_buff *skb;
72 + dma_addr_t dma_addr;
73 struct bgmac_rx_header *rx;
76 - slot->skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
78 + skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
82 /* Poison - if everything goes fine, hardware will overwrite it */
83 - rx = (struct bgmac_rx_header *)slot->skb->data;
84 + rx = (struct bgmac_rx_header *)skb->data;
85 rx->len = cpu_to_le16(0xdead);
86 rx->flags = cpu_to_le16(0xbeef);
88 /* Map skb for the DMA */
89 - slot->dma_addr = dma_map_single(dma_dev, slot->skb->data,
90 - BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
91 - if (dma_mapping_error(dma_dev, slot->dma_addr)) {
92 + dma_addr = dma_map_single(dma_dev, skb->data,
93 + BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
94 + if (dma_mapping_error(dma_dev, dma_addr)) {
95 bgmac_err(bgmac, "DMA mapping error\n");
100 + /* Update the slot */
102 + slot->dma_addr = dma_addr;
104 if (slot->dma_addr & 0xC0000000)
105 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
110 +static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac,
111 + struct bgmac_dma_ring *ring, int desc_idx)
113 + struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx;
114 + u32 ctl0 = 0, ctl1 = 0;
116 + if (desc_idx == ring->num_slots - 1)
117 + ctl0 |= BGMAC_DESC_CTL0_EOT;
118 + ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
119 + /* Is there any BGMAC device that requires extension? */
120 + /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
121 + * B43_DMA64_DCTL1_ADDREXT_MASK;
124 + dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr));
125 + dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr));
126 + dma_desc->ctl0 = cpu_to_le32(ctl0);
127 + dma_desc->ctl1 = cpu_to_le32(ctl1);
130 static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
133 @@ -274,6 +313,8 @@ static int bgmac_dma_rx_read(struct bgma
135 end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
136 end_slot &= BGMAC_DMA_RX_STATDPTR;
137 + end_slot -= ring->index_base;
138 + end_slot &= BGMAC_DMA_RX_STATDPTR;
139 end_slot /= sizeof(struct bgmac_dma_desc);
141 ring->end = end_slot;
142 @@ -282,7 +323,6 @@ static int bgmac_dma_rx_read(struct bgma
143 struct device *dma_dev = bgmac->core->dma_dev;
144 struct bgmac_slot_info *slot = &ring->slots[ring->start];
145 struct sk_buff *skb = slot->skb;
146 - struct sk_buff *new_skb;
147 struct bgmac_rx_header *rx;
150 @@ -295,38 +335,51 @@ static int bgmac_dma_rx_read(struct bgma
151 len = le16_to_cpu(rx->len);
152 flags = le16_to_cpu(rx->flags);
154 - /* Check for poison and drop or pass the packet */
155 - if (len == 0xdead && flags == 0xbeef) {
156 - bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
160 + dma_addr_t old_dma_addr = slot->dma_addr;
163 + /* Check for poison and drop or pass the packet */
164 + if (len == 0xdead && flags == 0xbeef) {
165 + bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
167 + dma_sync_single_for_device(dma_dev,
177 - new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len);
179 - skb_put(new_skb, len);
180 - skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET,
183 - skb_checksum_none_assert(skb);
184 - new_skb->protocol =
185 - eth_type_trans(new_skb, bgmac->net_dev);
186 - netif_receive_skb(new_skb);
189 - bgmac->net_dev->stats.rx_dropped++;
190 - bgmac_err(bgmac, "Allocation of skb for copying packet failed!\n");
191 + /* Prepare new skb as replacement */
192 + err = bgmac_dma_rx_skb_for_slot(bgmac, slot);
194 + /* Poison the old skb */
195 + rx->len = cpu_to_le16(0xdead);
196 + rx->flags = cpu_to_le16(0xbeef);
198 + dma_sync_single_for_device(dma_dev,
204 + bgmac_dma_rx_setup_desc(bgmac, ring, ring->start);
206 - /* Poison the old skb */
207 - rx->len = cpu_to_le16(0xdead);
208 - rx->flags = cpu_to_le16(0xbeef);
211 - /* Make it back accessible to the hardware */
212 - dma_sync_single_for_device(dma_dev, slot->dma_addr,
213 - BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
214 + /* Unmap old skb, we'll pass it to the netfif */
215 + dma_unmap_single(dma_dev, old_dma_addr,
216 + BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
218 + skb_put(skb, BGMAC_RX_FRAME_OFFSET + len);
219 + skb_pull(skb, BGMAC_RX_FRAME_OFFSET);
221 + skb_checksum_none_assert(skb);
222 + skb->protocol = eth_type_trans(skb, bgmac->net_dev);
223 + netif_receive_skb(skb);
227 if (++ring->start >= BGMAC_RX_RING_SLOTS)
229 @@ -418,9 +471,6 @@ static int bgmac_dma_alloc(struct bgmac
230 ring = &bgmac->tx_ring[i];
231 ring->num_slots = BGMAC_TX_RING_SLOTS;
232 ring->mmio_base = ring_base[i];
233 - if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_TX))
234 - bgmac_warn(bgmac, "TX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
237 /* Alloc ring of descriptors */
238 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
239 @@ -435,6 +485,13 @@ static int bgmac_dma_alloc(struct bgmac
240 if (ring->dma_base & 0xC0000000)
241 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
243 + ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
244 + BGMAC_DMA_RING_TX);
245 + if (ring->unaligned)
246 + ring->index_base = lower_32_bits(ring->dma_base);
248 + ring->index_base = 0;
250 /* No need to alloc TX slots yet */
253 @@ -444,9 +501,6 @@ static int bgmac_dma_alloc(struct bgmac
254 ring = &bgmac->rx_ring[i];
255 ring->num_slots = BGMAC_RX_RING_SLOTS;
256 ring->mmio_base = ring_base[i];
257 - if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_RX))
258 - bgmac_warn(bgmac, "RX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
261 /* Alloc ring of descriptors */
262 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
263 @@ -462,6 +516,13 @@ static int bgmac_dma_alloc(struct bgmac
264 if (ring->dma_base & 0xC0000000)
265 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
267 + ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
268 + BGMAC_DMA_RING_RX);
269 + if (ring->unaligned)
270 + ring->index_base = lower_32_bits(ring->dma_base);
272 + ring->index_base = 0;
275 for (j = 0; j < ring->num_slots; j++) {
276 err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
277 @@ -482,19 +543,19 @@ err_dma_free:
278 static void bgmac_dma_init(struct bgmac *bgmac)
280 struct bgmac_dma_ring *ring;
281 - struct bgmac_dma_desc *dma_desc;
285 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
286 ring = &bgmac->tx_ring[i];
288 - /* We don't implement unaligned addressing, so enable first */
289 - bgmac_dma_tx_enable(bgmac, ring);
290 + if (!ring->unaligned)
291 + bgmac_dma_tx_enable(bgmac, ring);
292 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
293 lower_32_bits(ring->dma_base));
294 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
295 upper_32_bits(ring->dma_base));
296 + if (ring->unaligned)
297 + bgmac_dma_tx_enable(bgmac, ring);
300 ring->end = 0; /* Points the slot that should *not* be read */
301 @@ -505,32 +566,20 @@ static void bgmac_dma_init(struct bgmac
303 ring = &bgmac->rx_ring[i];
305 - /* We don't implement unaligned addressing, so enable first */
306 - bgmac_dma_rx_enable(bgmac, ring);
307 + if (!ring->unaligned)
308 + bgmac_dma_rx_enable(bgmac, ring);
309 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
310 lower_32_bits(ring->dma_base));
311 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
312 upper_32_bits(ring->dma_base));
313 + if (ring->unaligned)
314 + bgmac_dma_rx_enable(bgmac, ring);
316 - for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots;
320 - if (j == ring->num_slots - 1)
321 - ctl0 |= BGMAC_DESC_CTL0_EOT;
322 - ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
323 - /* Is there any BGMAC device that requires extension? */
324 - /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
325 - * B43_DMA64_DCTL1_ADDREXT_MASK;
328 - dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[j].dma_addr));
329 - dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[j].dma_addr));
330 - dma_desc->ctl0 = cpu_to_le32(ctl0);
331 - dma_desc->ctl1 = cpu_to_le32(ctl1);
333 + for (j = 0; j < ring->num_slots; j++)
334 + bgmac_dma_rx_setup_desc(bgmac, ring, j);
336 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
338 ring->num_slots * sizeof(struct bgmac_dma_desc));
341 @@ -909,9 +958,9 @@ static void bgmac_chip_reset(struct bgma
343 u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
344 BGMAC_CHIPCTL_1_IF_TYPE_MII;
348 - if (bcm47xx_nvram_getenv("et_swtype", buf, 1) > 0) {
349 + if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
350 if (kstrtou8(buf, 0, &et_swtype))
351 bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n",
353 @@ -970,6 +1019,8 @@ static void bgmac_chip_reset(struct bgma
354 bgmac_miiconfig(bgmac);
355 bgmac_phy_init(bgmac);
357 + netdev_reset_queue(bgmac->net_dev);
359 bgmac->int_status = 0;
362 --- a/drivers/net/ethernet/broadcom/bgmac.h
363 +++ b/drivers/net/ethernet/broadcom/bgmac.h
364 @@ -384,6 +384,8 @@ struct bgmac_dma_ring {
366 struct bgmac_dma_desc *cpu_base;
368 + u32 index_base; /* Used for unaligned rings only, otherwise 0 */
371 struct bgmac_slot_info slots[BGMAC_RX_RING_SLOTS];