1 // SPDX-License-Identifier: GPL-2.0-only
3 * aQuantia Corporation Network Driver
4 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
7 /* File aq_ring.c: Definition of functions for Rx/Tx rings. */
12 #include "aq_hw_utils.h"
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
17 static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev)
19 unsigned int len = PAGE_SIZE << rxpage->order;
21 dma_unmap_page(dev, rxpage->daddr, len, DMA_FROM_DEVICE);
23 /* Drop the ref for being in the ring. */
24 __free_pages(rxpage->page, rxpage->order);
28 static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order,
35 page = dev_alloc_pages(order);
39 daddr = dma_map_page(dev, page, 0, PAGE_SIZE << order,
42 if (unlikely(dma_mapping_error(dev, daddr)))
46 rxpage->daddr = daddr;
47 rxpage->order = order;
53 __free_pages(page, order);
59 static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
64 if (rxbuf->rxdata.page) {
65 /* One means ring is the only user and can reuse */
66 if (page_ref_count(rxbuf->rxdata.page) > 1) {
67 /* Try reuse buffer */
68 rxbuf->rxdata.pg_off += AQ_CFG_RX_FRAME_MAX;
69 if (rxbuf->rxdata.pg_off + AQ_CFG_RX_FRAME_MAX <=
70 (PAGE_SIZE << order)) {
71 self->stats.rx.pg_flips++;
73 /* Buffer exhausted. We have other users and
74 * should release this page and realloc
76 aq_free_rxpage(&rxbuf->rxdata,
77 aq_nic_get_dev(self->aq_nic));
78 self->stats.rx.pg_losts++;
81 rxbuf->rxdata.pg_off = 0;
82 self->stats.rx.pg_reuses++;
86 if (!rxbuf->rxdata.page) {
87 ret = aq_get_rxpage(&rxbuf->rxdata, order,
88 aq_nic_get_dev(self->aq_nic));
95 static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
96 struct aq_nic_s *aq_nic)
101 kcalloc(self->size, sizeof(struct aq_ring_buff_s), GFP_KERNEL);
103 if (!self->buff_ring) {
107 self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic),
108 self->size * self->dx_size,
109 &self->dx_ring_pa, GFP_KERNEL);
110 if (!self->dx_ring) {
123 struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self,
124 struct aq_nic_s *aq_nic,
126 struct aq_nic_cfg_s *aq_nic_cfg)
130 self->aq_nic = aq_nic;
132 self->size = aq_nic_cfg->txds;
133 self->dx_size = aq_nic_cfg->aq_hw_caps->txd_size;
135 self = aq_ring_alloc(self, aq_nic);
149 struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
150 struct aq_nic_s *aq_nic,
152 struct aq_nic_cfg_s *aq_nic_cfg)
156 self->aq_nic = aq_nic;
158 self->size = aq_nic_cfg->rxds;
159 self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size;
160 self->page_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE +
161 (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1;
163 if (aq_nic_cfg->rxpageorder > self->page_order)
164 self->page_order = aq_nic_cfg->rxpageorder;
166 self = aq_ring_alloc(self, aq_nic);
180 int aq_ring_init(struct aq_ring_s *self)
188 static inline bool aq_ring_dx_in_range(unsigned int h, unsigned int i,
191 return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
194 void aq_ring_update_queue_state(struct aq_ring_s *ring)
196 if (aq_ring_avail_dx(ring) <= AQ_CFG_SKB_FRAGS_MAX)
197 aq_ring_queue_stop(ring);
198 else if (aq_ring_avail_dx(ring) > AQ_CFG_RESTART_DESC_THRES)
199 aq_ring_queue_wake(ring);
202 void aq_ring_queue_wake(struct aq_ring_s *ring)
204 struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
206 if (__netif_subqueue_stopped(ndev, ring->idx)) {
207 netif_wake_subqueue(ndev, ring->idx);
208 ring->stats.tx.queue_restarts++;
212 void aq_ring_queue_stop(struct aq_ring_s *ring)
214 struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
216 if (!__netif_subqueue_stopped(ndev, ring->idx))
217 netif_stop_subqueue(ndev, ring->idx);
220 bool aq_ring_tx_clean(struct aq_ring_s *self)
222 struct device *dev = aq_nic_get_dev(self->aq_nic);
225 for (budget = AQ_CFG_TX_CLEAN_BUDGET;
226 budget && self->sw_head != self->hw_head; budget--) {
227 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
229 if (likely(buff->is_mapped)) {
230 if (unlikely(buff->is_sop)) {
232 buff->eop_index != 0xffffU &&
233 (!aq_ring_dx_in_range(self->sw_head,
238 dma_unmap_single(dev, buff->pa, buff->len,
241 dma_unmap_page(dev, buff->pa, buff->len,
246 if (unlikely(buff->is_eop))
247 dev_kfree_skb_any(buff->skb);
250 buff->eop_index = 0xffffU;
251 self->sw_head = aq_ring_next_dx(self, self->sw_head);
257 static void aq_rx_checksum(struct aq_ring_s *self,
258 struct aq_ring_buff_s *buff,
261 if (!(self->aq_nic->ndev->features & NETIF_F_RXCSUM))
264 if (unlikely(buff->is_cso_err)) {
265 ++self->stats.rx.errors;
266 skb->ip_summed = CHECKSUM_NONE;
269 if (buff->is_ip_cso) {
270 __skb_incr_checksum_unnecessary(skb);
272 skb->ip_summed = CHECKSUM_NONE;
275 if (buff->is_udp_cso || buff->is_tcp_cso)
276 __skb_incr_checksum_unnecessary(skb);
279 #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
280 int aq_ring_rx_clean(struct aq_ring_s *self,
281 struct napi_struct *napi,
285 struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
286 bool is_rsc_completed = true;
289 for (; (self->sw_head != self->hw_head) && budget;
290 self->sw_head = aq_ring_next_dx(self, self->sw_head),
291 --budget, ++(*work_done)) {
292 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
293 struct aq_ring_buff_s *buff_ = NULL;
294 struct sk_buff *skb = NULL;
295 unsigned int next_ = 0U;
299 if (buff->is_cleaned)
306 buff_ = &self->buff_ring[next_];
308 aq_ring_dx_in_range(self->sw_head,
312 if (unlikely(!is_rsc_completed))
315 buff->is_error |= buff_->is_error;
316 buff->is_cso_err |= buff_->is_cso_err;
318 } while (!buff_->is_eop);
320 if (!is_rsc_completed) {
324 if (buff->is_error || buff->is_cso_err) {
328 buff_ = &self->buff_ring[next_];
330 buff_->is_cleaned = true;
331 } while (!buff_->is_eop);
333 ++self->stats.rx.errors;
338 if (buff->is_error) {
339 ++self->stats.rx.errors;
343 dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic),
346 buff->len, DMA_FROM_DEVICE);
348 /* for single fragment packets use build_skb() */
350 buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) {
351 skb = build_skb(aq_buf_vaddr(&buff->rxdata),
352 AQ_CFG_RX_FRAME_MAX);
353 if (unlikely(!skb)) {
357 skb_put(skb, buff->len);
358 page_ref_inc(buff->rxdata.page);
360 skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE);
361 if (unlikely(!skb)) {
367 if (hdr_len > AQ_CFG_RX_HDR_SIZE)
368 hdr_len = eth_get_headlen(skb->dev,
369 aq_buf_vaddr(&buff->rxdata),
372 memcpy(__skb_put(skb, hdr_len), aq_buf_vaddr(&buff->rxdata),
373 ALIGN(hdr_len, sizeof(long)));
375 if (buff->len - hdr_len > 0) {
376 skb_add_rx_frag(skb, 0, buff->rxdata.page,
377 buff->rxdata.pg_off + hdr_len,
379 AQ_CFG_RX_FRAME_MAX);
380 page_ref_inc(buff->rxdata.page);
388 buff_ = &self->buff_ring[next_];
390 dma_sync_single_range_for_cpu(
391 aq_nic_get_dev(self->aq_nic),
393 buff_->rxdata.pg_off,
396 skb_add_rx_frag(skb, i++,
398 buff_->rxdata.pg_off,
400 AQ_CFG_RX_FRAME_MAX);
401 page_ref_inc(buff_->rxdata.page);
402 buff_->is_cleaned = 1;
404 buff->is_ip_cso &= buff_->is_ip_cso;
405 buff->is_udp_cso &= buff_->is_udp_cso;
406 buff->is_tcp_cso &= buff_->is_tcp_cso;
407 buff->is_cso_err |= buff_->is_cso_err;
409 } while (!buff_->is_eop);
414 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
417 skb->protocol = eth_type_trans(skb, ndev);
419 aq_rx_checksum(self, buff, skb);
421 skb_set_hash(skb, buff->rss_hash,
422 buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
425 skb_record_rx_queue(skb, self->idx);
427 ++self->stats.rx.packets;
428 self->stats.rx.bytes += skb->len;
430 napi_gro_receive(napi, skb);
437 int aq_ring_rx_fill(struct aq_ring_s *self)
439 unsigned int page_order = self->page_order;
440 struct aq_ring_buff_s *buff = NULL;
444 if (aq_ring_avail_dx(self) < min_t(unsigned int, AQ_CFG_RX_REFILL_THRES,
448 for (i = aq_ring_avail_dx(self); i--;
449 self->sw_tail = aq_ring_next_dx(self, self->sw_tail)) {
450 buff = &self->buff_ring[self->sw_tail];
453 buff->len = AQ_CFG_RX_FRAME_MAX;
455 err = aq_get_rxpages(self, buff, page_order);
459 buff->pa = aq_buf_daddr(&buff->rxdata);
467 void aq_ring_rx_deinit(struct aq_ring_s *self)
472 for (; self->sw_head != self->sw_tail;
473 self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
474 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
476 aq_free_rxpage(&buff->rxdata, aq_nic_get_dev(self->aq_nic));
482 void aq_ring_free(struct aq_ring_s *self)
487 kfree(self->buff_ring);
490 dma_free_coherent(aq_nic_get_dev(self->aq_nic),
491 self->size * self->dx_size, self->dx_ring,