Linux-libre 4.14.68-gnu
[librecmc/linux-libre.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2017 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/if.h>
35 #include <linux/if_vlan.h>
36 #include <linux/if_bridge.h>
37 #include <linux/rtc.h>
38 #include <linux/bpf.h>
39 #include <net/ip.h>
40 #include <net/tcp.h>
41 #include <net/udp.h>
42 #include <net/checksum.h>
43 #include <net/ip6_checksum.h>
44 #include <net/udp_tunnel.h>
45 #include <linux/workqueue.h>
46 #include <linux/prefetch.h>
47 #include <linux/cache.h>
48 #include <linux/log2.h>
49 #include <linux/aer.h>
50 #include <linux/bitmap.h>
51 #include <linux/cpu_rmap.h>
52 #include <linux/cpumask.h>
53 #include <net/pkt_cls.h>
54
55 #include "bnxt_hsi.h"
56 #include "bnxt.h"
57 #include "bnxt_ulp.h"
58 #include "bnxt_sriov.h"
59 #include "bnxt_ethtool.h"
60 #include "bnxt_dcb.h"
61 #include "bnxt_xdp.h"
62 #include "bnxt_vfr.h"
63 #include "bnxt_tc.h"
64
65 #define BNXT_TX_TIMEOUT         (5 * HZ)
66
67 static const char version[] =
68         "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
69
70 MODULE_LICENSE("GPL");
71 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
72 MODULE_VERSION(DRV_MODULE_VERSION);
73
74 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
75 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
76 #define BNXT_RX_COPY_THRESH 256
77
78 #define BNXT_TX_PUSH_THRESH 164
79
80 enum board_idx {
81         BCM57301,
82         BCM57302,
83         BCM57304,
84         BCM57417_NPAR,
85         BCM58700,
86         BCM57311,
87         BCM57312,
88         BCM57402,
89         BCM57404,
90         BCM57406,
91         BCM57402_NPAR,
92         BCM57407,
93         BCM57412,
94         BCM57414,
95         BCM57416,
96         BCM57417,
97         BCM57412_NPAR,
98         BCM57314,
99         BCM57417_SFP,
100         BCM57416_SFP,
101         BCM57404_NPAR,
102         BCM57406_NPAR,
103         BCM57407_SFP,
104         BCM57407_NPAR,
105         BCM57414_NPAR,
106         BCM57416_NPAR,
107         BCM57452,
108         BCM57454,
109         BCM58802,
110         BCM58808,
111         NETXTREME_E_VF,
112         NETXTREME_C_VF,
113 };
114
115 /* indexed by enum above */
116 static const struct {
117         char *name;
118 } board_info[] = {
119         [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
120         [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
121         [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
122         [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
123         [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
124         [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
125         [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
126         [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
127         [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
128         [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
129         [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
130         [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
131         [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
132         [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
133         [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
134         [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
135         [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
136         [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
137         [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
138         [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
139         [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
140         [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
141         [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
142         [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
143         [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
144         [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
145         [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
146         [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
147         [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
148         [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
149         [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
150         [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
151 };
152
153 static const struct pci_device_id bnxt_pci_tbl[] = {
154         { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
155         { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
156         { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
157         { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
158         { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
159         { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
160         { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
161         { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
162         { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
163         { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
164         { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
165         { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
166         { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
167         { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
168         { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
169         { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
170         { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
171         { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
172         { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
173         { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
174         { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
175         { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
176         { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
177         { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
178         { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
179         { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
180         { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
181         { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
182         { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
183         { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
184         { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
185         { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
186         { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
187         { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
188 #ifdef CONFIG_BNXT_SRIOV
189         { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
190         { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
191         { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
192         { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
193         { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
194         { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
195         { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
196         { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
197 #endif
198         { 0 }
199 };
200
201 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
202
203 static const u16 bnxt_vf_req_snif[] = {
204         HWRM_FUNC_CFG,
205         HWRM_PORT_PHY_QCFG,
206         HWRM_CFA_L2_FILTER_ALLOC,
207 };
208
209 static const u16 bnxt_async_events_arr[] = {
210         ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
211         ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
212         ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
213         ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
214         ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
215 };
216
217 static struct workqueue_struct *bnxt_pf_wq;
218
219 static bool bnxt_vf_pciid(enum board_idx idx)
220 {
221         return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF);
222 }
223
224 #define DB_CP_REARM_FLAGS       (DB_KEY_CP | DB_IDX_VALID)
225 #define DB_CP_FLAGS             (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
226 #define DB_CP_IRQ_DIS_FLAGS     (DB_KEY_CP | DB_IRQ_DIS)
227
228 #define BNXT_CP_DB_REARM(db, raw_cons)                                  \
229                 writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db)
230
231 #define BNXT_CP_DB(db, raw_cons)                                        \
232                 writel(DB_CP_FLAGS | RING_CMP(raw_cons), db)
233
234 #define BNXT_CP_DB_IRQ_DIS(db)                                          \
235                 writel(DB_CP_IRQ_DIS_FLAGS, db)
236
237 const u16 bnxt_lhint_arr[] = {
238         TX_BD_FLAGS_LHINT_512_AND_SMALLER,
239         TX_BD_FLAGS_LHINT_512_TO_1023,
240         TX_BD_FLAGS_LHINT_1024_TO_2047,
241         TX_BD_FLAGS_LHINT_1024_TO_2047,
242         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
243         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
244         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
245         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
246         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
247         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
248         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
249         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
250         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
251         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
252         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
253         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
254         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
255         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
256         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
257 };
258
259 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
260 {
261         struct metadata_dst *md_dst = skb_metadata_dst(skb);
262
263         if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
264                 return 0;
265
266         return md_dst->u.port_info.port_id;
267 }
268
269 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
270 {
271         struct bnxt *bp = netdev_priv(dev);
272         struct tx_bd *txbd;
273         struct tx_bd_ext *txbd1;
274         struct netdev_queue *txq;
275         int i;
276         dma_addr_t mapping;
277         unsigned int length, pad = 0;
278         u32 len, free_size, vlan_tag_flags, cfa_action, flags;
279         u16 prod, last_frag;
280         struct pci_dev *pdev = bp->pdev;
281         struct bnxt_tx_ring_info *txr;
282         struct bnxt_sw_tx_bd *tx_buf;
283
284         i = skb_get_queue_mapping(skb);
285         if (unlikely(i >= bp->tx_nr_rings)) {
286                 dev_kfree_skb_any(skb);
287                 return NETDEV_TX_OK;
288         }
289
290         txq = netdev_get_tx_queue(dev, i);
291         txr = &bp->tx_ring[bp->tx_ring_map[i]];
292         prod = txr->tx_prod;
293
294         free_size = bnxt_tx_avail(bp, txr);
295         if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
296                 netif_tx_stop_queue(txq);
297                 return NETDEV_TX_BUSY;
298         }
299
300         length = skb->len;
301         len = skb_headlen(skb);
302         last_frag = skb_shinfo(skb)->nr_frags;
303
304         txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
305
306         txbd->tx_bd_opaque = prod;
307
308         tx_buf = &txr->tx_buf_ring[prod];
309         tx_buf->skb = skb;
310         tx_buf->nr_frags = last_frag;
311
312         vlan_tag_flags = 0;
313         cfa_action = bnxt_xmit_get_cfa_action(skb);
314         if (skb_vlan_tag_present(skb)) {
315                 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
316                                  skb_vlan_tag_get(skb);
317                 /* Currently supports 8021Q, 8021AD vlan offloads
318                  * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
319                  */
320                 if (skb->vlan_proto == htons(ETH_P_8021Q))
321                         vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
322         }
323
324         if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
325                 struct tx_push_buffer *tx_push_buf = txr->tx_push;
326                 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
327                 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
328                 void *pdata = tx_push_buf->data;
329                 u64 *end;
330                 int j, push_len;
331
332                 /* Set COAL_NOW to be ready quickly for the next push */
333                 tx_push->tx_bd_len_flags_type =
334                         cpu_to_le32((length << TX_BD_LEN_SHIFT) |
335                                         TX_BD_TYPE_LONG_TX_BD |
336                                         TX_BD_FLAGS_LHINT_512_AND_SMALLER |
337                                         TX_BD_FLAGS_COAL_NOW |
338                                         TX_BD_FLAGS_PACKET_END |
339                                         (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
340
341                 if (skb->ip_summed == CHECKSUM_PARTIAL)
342                         tx_push1->tx_bd_hsize_lflags =
343                                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
344                 else
345                         tx_push1->tx_bd_hsize_lflags = 0;
346
347                 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
348                 tx_push1->tx_bd_cfa_action =
349                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
350
351                 end = pdata + length;
352                 end = PTR_ALIGN(end, 8) - 1;
353                 *end = 0;
354
355                 skb_copy_from_linear_data(skb, pdata, len);
356                 pdata += len;
357                 for (j = 0; j < last_frag; j++) {
358                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
359                         void *fptr;
360
361                         fptr = skb_frag_address_safe(frag);
362                         if (!fptr)
363                                 goto normal_tx;
364
365                         memcpy(pdata, fptr, skb_frag_size(frag));
366                         pdata += skb_frag_size(frag);
367                 }
368
369                 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
370                 txbd->tx_bd_haddr = txr->data_mapping;
371                 prod = NEXT_TX(prod);
372                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
373                 memcpy(txbd, tx_push1, sizeof(*txbd));
374                 prod = NEXT_TX(prod);
375                 tx_push->doorbell =
376                         cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
377                 txr->tx_prod = prod;
378
379                 tx_buf->is_push = 1;
380                 netdev_tx_sent_queue(txq, skb->len);
381                 wmb();  /* Sync is_push and byte queue before pushing data */
382
383                 push_len = (length + sizeof(*tx_push) + 7) / 8;
384                 if (push_len > 16) {
385                         __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16);
386                         __iowrite32_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
387                                          (push_len - 16) << 1);
388                 } else {
389                         __iowrite64_copy(txr->tx_doorbell, tx_push_buf,
390                                          push_len);
391                 }
392
393                 goto tx_done;
394         }
395
396 normal_tx:
397         if (length < BNXT_MIN_PKT_SIZE) {
398                 pad = BNXT_MIN_PKT_SIZE - length;
399                 if (skb_pad(skb, pad)) {
400                         /* SKB already freed. */
401                         tx_buf->skb = NULL;
402                         return NETDEV_TX_OK;
403                 }
404                 length = BNXT_MIN_PKT_SIZE;
405         }
406
407         mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
408
409         if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
410                 dev_kfree_skb_any(skb);
411                 tx_buf->skb = NULL;
412                 return NETDEV_TX_OK;
413         }
414
415         dma_unmap_addr_set(tx_buf, mapping, mapping);
416         flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
417                 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
418
419         txbd->tx_bd_haddr = cpu_to_le64(mapping);
420
421         prod = NEXT_TX(prod);
422         txbd1 = (struct tx_bd_ext *)
423                 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
424
425         txbd1->tx_bd_hsize_lflags = 0;
426         if (skb_is_gso(skb)) {
427                 u32 hdr_len;
428
429                 if (skb->encapsulation)
430                         hdr_len = skb_inner_network_offset(skb) +
431                                 skb_inner_network_header_len(skb) +
432                                 inner_tcp_hdrlen(skb);
433                 else
434                         hdr_len = skb_transport_offset(skb) +
435                                 tcp_hdrlen(skb);
436
437                 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
438                                         TX_BD_FLAGS_T_IPID |
439                                         (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
440                 length = skb_shinfo(skb)->gso_size;
441                 txbd1->tx_bd_mss = cpu_to_le32(length);
442                 length += hdr_len;
443         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
444                 txbd1->tx_bd_hsize_lflags =
445                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
446                 txbd1->tx_bd_mss = 0;
447         }
448
449         length >>= 9;
450         flags |= bnxt_lhint_arr[length];
451         txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
452
453         txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
454         txbd1->tx_bd_cfa_action =
455                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
456         for (i = 0; i < last_frag; i++) {
457                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
458
459                 prod = NEXT_TX(prod);
460                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
461
462                 len = skb_frag_size(frag);
463                 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
464                                            DMA_TO_DEVICE);
465
466                 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
467                         goto tx_dma_error;
468
469                 tx_buf = &txr->tx_buf_ring[prod];
470                 dma_unmap_addr_set(tx_buf, mapping, mapping);
471
472                 txbd->tx_bd_haddr = cpu_to_le64(mapping);
473
474                 flags = len << TX_BD_LEN_SHIFT;
475                 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
476         }
477
478         flags &= ~TX_BD_LEN;
479         txbd->tx_bd_len_flags_type =
480                 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
481                             TX_BD_FLAGS_PACKET_END);
482
483         netdev_tx_sent_queue(txq, skb->len);
484
485         /* Sync BD data before updating doorbell */
486         wmb();
487
488         prod = NEXT_TX(prod);
489         txr->tx_prod = prod;
490
491         if (!skb->xmit_more || netif_xmit_stopped(txq))
492                 bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod);
493
494 tx_done:
495
496         mmiowb();
497
498         if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
499                 if (skb->xmit_more && !tx_buf->is_push)
500                         bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod);
501
502                 netif_tx_stop_queue(txq);
503
504                 /* netif_tx_stop_queue() must be done before checking
505                  * tx index in bnxt_tx_avail() below, because in
506                  * bnxt_tx_int(), we update tx index before checking for
507                  * netif_tx_queue_stopped().
508                  */
509                 smp_mb();
510                 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
511                         netif_tx_wake_queue(txq);
512         }
513         return NETDEV_TX_OK;
514
515 tx_dma_error:
516         last_frag = i;
517
518         /* start back at beginning and unmap skb */
519         prod = txr->tx_prod;
520         tx_buf = &txr->tx_buf_ring[prod];
521         tx_buf->skb = NULL;
522         dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
523                          skb_headlen(skb), PCI_DMA_TODEVICE);
524         prod = NEXT_TX(prod);
525
526         /* unmap remaining mapped pages */
527         for (i = 0; i < last_frag; i++) {
528                 prod = NEXT_TX(prod);
529                 tx_buf = &txr->tx_buf_ring[prod];
530                 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
531                                skb_frag_size(&skb_shinfo(skb)->frags[i]),
532                                PCI_DMA_TODEVICE);
533         }
534
535         dev_kfree_skb_any(skb);
536         return NETDEV_TX_OK;
537 }
538
539 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
540 {
541         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
542         struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
543         u16 cons = txr->tx_cons;
544         struct pci_dev *pdev = bp->pdev;
545         int i;
546         unsigned int tx_bytes = 0;
547
548         for (i = 0; i < nr_pkts; i++) {
549                 struct bnxt_sw_tx_bd *tx_buf;
550                 struct sk_buff *skb;
551                 int j, last;
552
553                 tx_buf = &txr->tx_buf_ring[cons];
554                 cons = NEXT_TX(cons);
555                 skb = tx_buf->skb;
556                 tx_buf->skb = NULL;
557
558                 if (tx_buf->is_push) {
559                         tx_buf->is_push = 0;
560                         goto next_tx_int;
561                 }
562
563                 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
564                                  skb_headlen(skb), PCI_DMA_TODEVICE);
565                 last = tx_buf->nr_frags;
566
567                 for (j = 0; j < last; j++) {
568                         cons = NEXT_TX(cons);
569                         tx_buf = &txr->tx_buf_ring[cons];
570                         dma_unmap_page(
571                                 &pdev->dev,
572                                 dma_unmap_addr(tx_buf, mapping),
573                                 skb_frag_size(&skb_shinfo(skb)->frags[j]),
574                                 PCI_DMA_TODEVICE);
575                 }
576
577 next_tx_int:
578                 cons = NEXT_TX(cons);
579
580                 tx_bytes += skb->len;
581                 dev_kfree_skb_any(skb);
582         }
583
584         netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
585         txr->tx_cons = cons;
586
587         /* Need to make the tx_cons update visible to bnxt_start_xmit()
588          * before checking for netif_tx_queue_stopped().  Without the
589          * memory barrier, there is a small possibility that bnxt_start_xmit()
590          * will miss it and cause the queue to be stopped forever.
591          */
592         smp_mb();
593
594         if (unlikely(netif_tx_queue_stopped(txq)) &&
595             (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
596                 __netif_tx_lock(txq, smp_processor_id());
597                 if (netif_tx_queue_stopped(txq) &&
598                     bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
599                     txr->dev_state != BNXT_DEV_STATE_CLOSING)
600                         netif_tx_wake_queue(txq);
601                 __netif_tx_unlock(txq);
602         }
603 }
604
605 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
606                                          gfp_t gfp)
607 {
608         struct device *dev = &bp->pdev->dev;
609         struct page *page;
610
611         page = alloc_page(gfp);
612         if (!page)
613                 return NULL;
614
615         *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
616                                       DMA_ATTR_WEAK_ORDERING);
617         if (dma_mapping_error(dev, *mapping)) {
618                 __free_page(page);
619                 return NULL;
620         }
621         *mapping += bp->rx_dma_offset;
622         return page;
623 }
624
625 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
626                                        gfp_t gfp)
627 {
628         u8 *data;
629         struct pci_dev *pdev = bp->pdev;
630
631         data = kmalloc(bp->rx_buf_size, gfp);
632         if (!data)
633                 return NULL;
634
635         *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
636                                         bp->rx_buf_use_size, bp->rx_dir,
637                                         DMA_ATTR_WEAK_ORDERING);
638
639         if (dma_mapping_error(&pdev->dev, *mapping)) {
640                 kfree(data);
641                 data = NULL;
642         }
643         return data;
644 }
645
646 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
647                        u16 prod, gfp_t gfp)
648 {
649         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
650         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
651         dma_addr_t mapping;
652
653         if (BNXT_RX_PAGE_MODE(bp)) {
654                 struct page *page = __bnxt_alloc_rx_page(bp, &mapping, gfp);
655
656                 if (!page)
657                         return -ENOMEM;
658
659                 rx_buf->data = page;
660                 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
661         } else {
662                 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
663
664                 if (!data)
665                         return -ENOMEM;
666
667                 rx_buf->data = data;
668                 rx_buf->data_ptr = data + bp->rx_offset;
669         }
670         rx_buf->mapping = mapping;
671
672         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
673         return 0;
674 }
675
676 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
677 {
678         u16 prod = rxr->rx_prod;
679         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
680         struct rx_bd *cons_bd, *prod_bd;
681
682         prod_rx_buf = &rxr->rx_buf_ring[prod];
683         cons_rx_buf = &rxr->rx_buf_ring[cons];
684
685         prod_rx_buf->data = data;
686         prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
687
688         prod_rx_buf->mapping = cons_rx_buf->mapping;
689
690         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
691         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
692
693         prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
694 }
695
696 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
697 {
698         u16 next, max = rxr->rx_agg_bmap_size;
699
700         next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
701         if (next >= max)
702                 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
703         return next;
704 }
705
706 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
707                                      struct bnxt_rx_ring_info *rxr,
708                                      u16 prod, gfp_t gfp)
709 {
710         struct rx_bd *rxbd =
711                 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
712         struct bnxt_sw_rx_agg_bd *rx_agg_buf;
713         struct pci_dev *pdev = bp->pdev;
714         struct page *page;
715         dma_addr_t mapping;
716         u16 sw_prod = rxr->rx_sw_agg_prod;
717         unsigned int offset = 0;
718
719         if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
720                 page = rxr->rx_page;
721                 if (!page) {
722                         page = alloc_page(gfp);
723                         if (!page)
724                                 return -ENOMEM;
725                         rxr->rx_page = page;
726                         rxr->rx_page_offset = 0;
727                 }
728                 offset = rxr->rx_page_offset;
729                 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
730                 if (rxr->rx_page_offset == PAGE_SIZE)
731                         rxr->rx_page = NULL;
732                 else
733                         get_page(page);
734         } else {
735                 page = alloc_page(gfp);
736                 if (!page)
737                         return -ENOMEM;
738         }
739
740         mapping = dma_map_page_attrs(&pdev->dev, page, offset,
741                                      BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
742                                      DMA_ATTR_WEAK_ORDERING);
743         if (dma_mapping_error(&pdev->dev, mapping)) {
744                 __free_page(page);
745                 return -EIO;
746         }
747
748         if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
749                 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
750
751         __set_bit(sw_prod, rxr->rx_agg_bmap);
752         rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
753         rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
754
755         rx_agg_buf->page = page;
756         rx_agg_buf->offset = offset;
757         rx_agg_buf->mapping = mapping;
758         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
759         rxbd->rx_bd_opaque = sw_prod;
760         return 0;
761 }
762
763 static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
764                                    u32 agg_bufs)
765 {
766         struct bnxt *bp = bnapi->bp;
767         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
768         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
769         u16 prod = rxr->rx_agg_prod;
770         u16 sw_prod = rxr->rx_sw_agg_prod;
771         u32 i;
772
773         for (i = 0; i < agg_bufs; i++) {
774                 u16 cons;
775                 struct rx_agg_cmp *agg;
776                 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
777                 struct rx_bd *prod_bd;
778                 struct page *page;
779
780                 agg = (struct rx_agg_cmp *)
781                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
782                 cons = agg->rx_agg_cmp_opaque;
783                 __clear_bit(cons, rxr->rx_agg_bmap);
784
785                 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
786                         sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
787
788                 __set_bit(sw_prod, rxr->rx_agg_bmap);
789                 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
790                 cons_rx_buf = &rxr->rx_agg_ring[cons];
791
792                 /* It is possible for sw_prod to be equal to cons, so
793                  * set cons_rx_buf->page to NULL first.
794                  */
795                 page = cons_rx_buf->page;
796                 cons_rx_buf->page = NULL;
797                 prod_rx_buf->page = page;
798                 prod_rx_buf->offset = cons_rx_buf->offset;
799
800                 prod_rx_buf->mapping = cons_rx_buf->mapping;
801
802                 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
803
804                 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
805                 prod_bd->rx_bd_opaque = sw_prod;
806
807                 prod = NEXT_RX_AGG(prod);
808                 sw_prod = NEXT_RX_AGG(sw_prod);
809                 cp_cons = NEXT_CMP(cp_cons);
810         }
811         rxr->rx_agg_prod = prod;
812         rxr->rx_sw_agg_prod = sw_prod;
813 }
814
815 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
816                                         struct bnxt_rx_ring_info *rxr,
817                                         u16 cons, void *data, u8 *data_ptr,
818                                         dma_addr_t dma_addr,
819                                         unsigned int offset_and_len)
820 {
821         unsigned int payload = offset_and_len >> 16;
822         unsigned int len = offset_and_len & 0xffff;
823         struct skb_frag_struct *frag;
824         struct page *page = data;
825         u16 prod = rxr->rx_prod;
826         struct sk_buff *skb;
827         int off, err;
828
829         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
830         if (unlikely(err)) {
831                 bnxt_reuse_rx_data(rxr, cons, data);
832                 return NULL;
833         }
834         dma_addr -= bp->rx_dma_offset;
835         dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
836                              DMA_ATTR_WEAK_ORDERING);
837
838         if (unlikely(!payload))
839                 payload = eth_get_headlen(data_ptr, len);
840
841         skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
842         if (!skb) {
843                 __free_page(page);
844                 return NULL;
845         }
846
847         off = (void *)data_ptr - page_address(page);
848         skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
849         memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
850                payload + NET_IP_ALIGN);
851
852         frag = &skb_shinfo(skb)->frags[0];
853         skb_frag_size_sub(frag, payload);
854         frag->page_offset += payload;
855         skb->data_len -= payload;
856         skb->tail += payload;
857
858         return skb;
859 }
860
861 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
862                                    struct bnxt_rx_ring_info *rxr, u16 cons,
863                                    void *data, u8 *data_ptr,
864                                    dma_addr_t dma_addr,
865                                    unsigned int offset_and_len)
866 {
867         u16 prod = rxr->rx_prod;
868         struct sk_buff *skb;
869         int err;
870
871         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
872         if (unlikely(err)) {
873                 bnxt_reuse_rx_data(rxr, cons, data);
874                 return NULL;
875         }
876
877         skb = build_skb(data, 0);
878         dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
879                                bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
880         if (!skb) {
881                 kfree(data);
882                 return NULL;
883         }
884
885         skb_reserve(skb, bp->rx_offset);
886         skb_put(skb, offset_and_len & 0xffff);
887         return skb;
888 }
889
890 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
891                                      struct sk_buff *skb, u16 cp_cons,
892                                      u32 agg_bufs)
893 {
894         struct pci_dev *pdev = bp->pdev;
895         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
896         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
897         u16 prod = rxr->rx_agg_prod;
898         u32 i;
899
900         for (i = 0; i < agg_bufs; i++) {
901                 u16 cons, frag_len;
902                 struct rx_agg_cmp *agg;
903                 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
904                 struct page *page;
905                 dma_addr_t mapping;
906
907                 agg = (struct rx_agg_cmp *)
908                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
909                 cons = agg->rx_agg_cmp_opaque;
910                 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
911                             RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
912
913                 cons_rx_buf = &rxr->rx_agg_ring[cons];
914                 skb_fill_page_desc(skb, i, cons_rx_buf->page,
915                                    cons_rx_buf->offset, frag_len);
916                 __clear_bit(cons, rxr->rx_agg_bmap);
917
918                 /* It is possible for bnxt_alloc_rx_page() to allocate
919                  * a sw_prod index that equals the cons index, so we
920                  * need to clear the cons entry now.
921                  */
922                 mapping = cons_rx_buf->mapping;
923                 page = cons_rx_buf->page;
924                 cons_rx_buf->page = NULL;
925
926                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
927                         struct skb_shared_info *shinfo;
928                         unsigned int nr_frags;
929
930                         shinfo = skb_shinfo(skb);
931                         nr_frags = --shinfo->nr_frags;
932                         __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
933
934                         dev_kfree_skb(skb);
935
936                         cons_rx_buf->page = page;
937
938                         /* Update prod since possibly some pages have been
939                          * allocated already.
940                          */
941                         rxr->rx_agg_prod = prod;
942                         bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i);
943                         return NULL;
944                 }
945
946                 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
947                                      PCI_DMA_FROMDEVICE,
948                                      DMA_ATTR_WEAK_ORDERING);
949
950                 skb->data_len += frag_len;
951                 skb->len += frag_len;
952                 skb->truesize += PAGE_SIZE;
953
954                 prod = NEXT_RX_AGG(prod);
955                 cp_cons = NEXT_CMP(cp_cons);
956         }
957         rxr->rx_agg_prod = prod;
958         return skb;
959 }
960
961 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
962                                u8 agg_bufs, u32 *raw_cons)
963 {
964         u16 last;
965         struct rx_agg_cmp *agg;
966
967         *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
968         last = RING_CMP(*raw_cons);
969         agg = (struct rx_agg_cmp *)
970                 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
971         return RX_AGG_CMP_VALID(agg, *raw_cons);
972 }
973
974 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
975                                             unsigned int len,
976                                             dma_addr_t mapping)
977 {
978         struct bnxt *bp = bnapi->bp;
979         struct pci_dev *pdev = bp->pdev;
980         struct sk_buff *skb;
981
982         skb = napi_alloc_skb(&bnapi->napi, len);
983         if (!skb)
984                 return NULL;
985
986         dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
987                                 bp->rx_dir);
988
989         memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
990                len + NET_IP_ALIGN);
991
992         dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
993                                    bp->rx_dir);
994
995         skb_put(skb, len);
996         return skb;
997 }
998
999 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi,
1000                            u32 *raw_cons, void *cmp)
1001 {
1002         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1003         struct rx_cmp *rxcmp = cmp;
1004         u32 tmp_raw_cons = *raw_cons;
1005         u8 cmp_type, agg_bufs = 0;
1006
1007         cmp_type = RX_CMP_TYPE(rxcmp);
1008
1009         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1010                 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1011                             RX_CMP_AGG_BUFS) >>
1012                            RX_CMP_AGG_BUFS_SHIFT;
1013         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1014                 struct rx_tpa_end_cmp *tpa_end = cmp;
1015
1016                 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1017                             RX_TPA_END_CMP_AGG_BUFS) >>
1018                            RX_TPA_END_CMP_AGG_BUFS_SHIFT;
1019         }
1020
1021         if (agg_bufs) {
1022                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1023                         return -EBUSY;
1024         }
1025         *raw_cons = tmp_raw_cons;
1026         return 0;
1027 }
1028
1029 static void bnxt_queue_sp_work(struct bnxt *bp)
1030 {
1031         if (BNXT_PF(bp))
1032                 queue_work(bnxt_pf_wq, &bp->sp_task);
1033         else
1034                 schedule_work(&bp->sp_task);
1035 }
1036
1037 static void bnxt_cancel_sp_work(struct bnxt *bp)
1038 {
1039         if (BNXT_PF(bp))
1040                 flush_workqueue(bnxt_pf_wq);
1041         else
1042                 cancel_work_sync(&bp->sp_task);
1043 }
1044
1045 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1046 {
1047         if (!rxr->bnapi->in_reset) {
1048                 rxr->bnapi->in_reset = true;
1049                 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1050                 bnxt_queue_sp_work(bp);
1051         }
1052         rxr->rx_next_cons = 0xffff;
1053 }
1054
1055 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1056                            struct rx_tpa_start_cmp *tpa_start,
1057                            struct rx_tpa_start_cmp_ext *tpa_start1)
1058 {
1059         u8 agg_id = TPA_START_AGG_ID(tpa_start);
1060         u16 cons, prod;
1061         struct bnxt_tpa_info *tpa_info;
1062         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1063         struct rx_bd *prod_bd;
1064         dma_addr_t mapping;
1065
1066         cons = tpa_start->rx_tpa_start_cmp_opaque;
1067         prod = rxr->rx_prod;
1068         cons_rx_buf = &rxr->rx_buf_ring[cons];
1069         prod_rx_buf = &rxr->rx_buf_ring[prod];
1070         tpa_info = &rxr->rx_tpa[agg_id];
1071
1072         if (unlikely(cons != rxr->rx_next_cons)) {
1073                 bnxt_sched_reset(bp, rxr);
1074                 return;
1075         }
1076         /* Store cfa_code in tpa_info to use in tpa_end
1077          * completion processing.
1078          */
1079         tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1080         prod_rx_buf->data = tpa_info->data;
1081         prod_rx_buf->data_ptr = tpa_info->data_ptr;
1082
1083         mapping = tpa_info->mapping;
1084         prod_rx_buf->mapping = mapping;
1085
1086         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1087
1088         prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1089
1090         tpa_info->data = cons_rx_buf->data;
1091         tpa_info->data_ptr = cons_rx_buf->data_ptr;
1092         cons_rx_buf->data = NULL;
1093         tpa_info->mapping = cons_rx_buf->mapping;
1094
1095         tpa_info->len =
1096                 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1097                                 RX_TPA_START_CMP_LEN_SHIFT;
1098         if (likely(TPA_START_HASH_VALID(tpa_start))) {
1099                 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1100
1101                 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1102                 tpa_info->gso_type = SKB_GSO_TCPV4;
1103                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1104                 if (hash_type == 3)
1105                         tpa_info->gso_type = SKB_GSO_TCPV6;
1106                 tpa_info->rss_hash =
1107                         le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1108         } else {
1109                 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1110                 tpa_info->gso_type = 0;
1111                 if (netif_msg_rx_err(bp))
1112                         netdev_warn(bp->dev, "TPA packet without valid hash\n");
1113         }
1114         tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1115         tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1116         tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1117
1118         rxr->rx_prod = NEXT_RX(prod);
1119         cons = NEXT_RX(cons);
1120         rxr->rx_next_cons = NEXT_RX(cons);
1121         cons_rx_buf = &rxr->rx_buf_ring[cons];
1122
1123         bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1124         rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1125         cons_rx_buf->data = NULL;
1126 }
1127
1128 static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
1129                            u16 cp_cons, u32 agg_bufs)
1130 {
1131         if (agg_bufs)
1132                 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
1133 }
1134
1135 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1136                                            int payload_off, int tcp_ts,
1137                                            struct sk_buff *skb)
1138 {
1139 #ifdef CONFIG_INET
1140         struct tcphdr *th;
1141         int len, nw_off;
1142         u16 outer_ip_off, inner_ip_off, inner_mac_off;
1143         u32 hdr_info = tpa_info->hdr_info;
1144         bool loopback = false;
1145
1146         inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1147         inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1148         outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1149
1150         /* If the packet is an internal loopback packet, the offsets will
1151          * have an extra 4 bytes.
1152          */
1153         if (inner_mac_off == 4) {
1154                 loopback = true;
1155         } else if (inner_mac_off > 4) {
1156                 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1157                                             ETH_HLEN - 2));
1158
1159                 /* We only support inner iPv4/ipv6.  If we don't see the
1160                  * correct protocol ID, it must be a loopback packet where
1161                  * the offsets are off by 4.
1162                  */
1163                 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1164                         loopback = true;
1165         }
1166         if (loopback) {
1167                 /* internal loopback packet, subtract all offsets by 4 */
1168                 inner_ip_off -= 4;
1169                 inner_mac_off -= 4;
1170                 outer_ip_off -= 4;
1171         }
1172
1173         nw_off = inner_ip_off - ETH_HLEN;
1174         skb_set_network_header(skb, nw_off);
1175         if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1176                 struct ipv6hdr *iph = ipv6_hdr(skb);
1177
1178                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1179                 len = skb->len - skb_transport_offset(skb);
1180                 th = tcp_hdr(skb);
1181                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1182         } else {
1183                 struct iphdr *iph = ip_hdr(skb);
1184
1185                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1186                 len = skb->len - skb_transport_offset(skb);
1187                 th = tcp_hdr(skb);
1188                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1189         }
1190
1191         if (inner_mac_off) { /* tunnel */
1192                 struct udphdr *uh = NULL;
1193                 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1194                                             ETH_HLEN - 2));
1195
1196                 if (proto == htons(ETH_P_IP)) {
1197                         struct iphdr *iph = (struct iphdr *)skb->data;
1198
1199                         if (iph->protocol == IPPROTO_UDP)
1200                                 uh = (struct udphdr *)(iph + 1);
1201                 } else {
1202                         struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1203
1204                         if (iph->nexthdr == IPPROTO_UDP)
1205                                 uh = (struct udphdr *)(iph + 1);
1206                 }
1207                 if (uh) {
1208                         if (uh->check)
1209                                 skb_shinfo(skb)->gso_type |=
1210                                         SKB_GSO_UDP_TUNNEL_CSUM;
1211                         else
1212                                 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1213                 }
1214         }
1215 #endif
1216         return skb;
1217 }
1218
1219 #define BNXT_IPV4_HDR_SIZE      (sizeof(struct iphdr) + sizeof(struct tcphdr))
1220 #define BNXT_IPV6_HDR_SIZE      (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1221
1222 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1223                                            int payload_off, int tcp_ts,
1224                                            struct sk_buff *skb)
1225 {
1226 #ifdef CONFIG_INET
1227         struct tcphdr *th;
1228         int len, nw_off, tcp_opt_len = 0;
1229
1230         if (tcp_ts)
1231                 tcp_opt_len = 12;
1232
1233         if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1234                 struct iphdr *iph;
1235
1236                 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1237                          ETH_HLEN;
1238                 skb_set_network_header(skb, nw_off);
1239                 iph = ip_hdr(skb);
1240                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1241                 len = skb->len - skb_transport_offset(skb);
1242                 th = tcp_hdr(skb);
1243                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1244         } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1245                 struct ipv6hdr *iph;
1246
1247                 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1248                          ETH_HLEN;
1249                 skb_set_network_header(skb, nw_off);
1250                 iph = ipv6_hdr(skb);
1251                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1252                 len = skb->len - skb_transport_offset(skb);
1253                 th = tcp_hdr(skb);
1254                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1255         } else {
1256                 dev_kfree_skb_any(skb);
1257                 return NULL;
1258         }
1259
1260         if (nw_off) { /* tunnel */
1261                 struct udphdr *uh = NULL;
1262
1263                 if (skb->protocol == htons(ETH_P_IP)) {
1264                         struct iphdr *iph = (struct iphdr *)skb->data;
1265
1266                         if (iph->protocol == IPPROTO_UDP)
1267                                 uh = (struct udphdr *)(iph + 1);
1268                 } else {
1269                         struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1270
1271                         if (iph->nexthdr == IPPROTO_UDP)
1272                                 uh = (struct udphdr *)(iph + 1);
1273                 }
1274                 if (uh) {
1275                         if (uh->check)
1276                                 skb_shinfo(skb)->gso_type |=
1277                                         SKB_GSO_UDP_TUNNEL_CSUM;
1278                         else
1279                                 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1280                 }
1281         }
1282 #endif
1283         return skb;
1284 }
1285
1286 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1287                                            struct bnxt_tpa_info *tpa_info,
1288                                            struct rx_tpa_end_cmp *tpa_end,
1289                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1290                                            struct sk_buff *skb)
1291 {
1292 #ifdef CONFIG_INET
1293         int payload_off;
1294         u16 segs;
1295
1296         segs = TPA_END_TPA_SEGS(tpa_end);
1297         if (segs == 1)
1298                 return skb;
1299
1300         NAPI_GRO_CB(skb)->count = segs;
1301         skb_shinfo(skb)->gso_size =
1302                 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1303         skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1304         payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1305                        RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
1306                       RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
1307         skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1308         if (likely(skb))
1309                 tcp_gro_complete(skb);
1310 #endif
1311         return skb;
1312 }
1313
1314 /* Given the cfa_code of a received packet determine which
1315  * netdev (vf-rep or PF) the packet is destined to.
1316  */
1317 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1318 {
1319         struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1320
1321         /* if vf-rep dev is NULL, the must belongs to the PF */
1322         return dev ? dev : bp->dev;
1323 }
1324
1325 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1326                                            struct bnxt_napi *bnapi,
1327                                            u32 *raw_cons,
1328                                            struct rx_tpa_end_cmp *tpa_end,
1329                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1330                                            u8 *event)
1331 {
1332         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1333         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1334         u8 agg_id = TPA_END_AGG_ID(tpa_end);
1335         u8 *data_ptr, agg_bufs;
1336         u16 cp_cons = RING_CMP(*raw_cons);
1337         unsigned int len;
1338         struct bnxt_tpa_info *tpa_info;
1339         dma_addr_t mapping;
1340         struct sk_buff *skb;
1341         void *data;
1342
1343         if (unlikely(bnapi->in_reset)) {
1344                 int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end);
1345
1346                 if (rc < 0)
1347                         return ERR_PTR(-EBUSY);
1348                 return NULL;
1349         }
1350
1351         tpa_info = &rxr->rx_tpa[agg_id];
1352         data = tpa_info->data;
1353         data_ptr = tpa_info->data_ptr;
1354         prefetch(data_ptr);
1355         len = tpa_info->len;
1356         mapping = tpa_info->mapping;
1357
1358         agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1359                     RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
1360
1361         if (agg_bufs) {
1362                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1363                         return ERR_PTR(-EBUSY);
1364
1365                 *event |= BNXT_AGG_EVENT;
1366                 cp_cons = NEXT_CMP(cp_cons);
1367         }
1368
1369         if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1370                 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1371                 if (agg_bufs > MAX_SKB_FRAGS)
1372                         netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1373                                     agg_bufs, (int)MAX_SKB_FRAGS);
1374                 return NULL;
1375         }
1376
1377         if (len <= bp->rx_copy_thresh) {
1378                 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1379                 if (!skb) {
1380                         bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1381                         return NULL;
1382                 }
1383         } else {
1384                 u8 *new_data;
1385                 dma_addr_t new_mapping;
1386
1387                 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1388                 if (!new_data) {
1389                         bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1390                         return NULL;
1391                 }
1392
1393                 tpa_info->data = new_data;
1394                 tpa_info->data_ptr = new_data + bp->rx_offset;
1395                 tpa_info->mapping = new_mapping;
1396
1397                 skb = build_skb(data, 0);
1398                 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1399                                        bp->rx_buf_use_size, bp->rx_dir,
1400                                        DMA_ATTR_WEAK_ORDERING);
1401
1402                 if (!skb) {
1403                         kfree(data);
1404                         bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1405                         return NULL;
1406                 }
1407                 skb_reserve(skb, bp->rx_offset);
1408                 skb_put(skb, len);
1409         }
1410
1411         if (agg_bufs) {
1412                 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
1413                 if (!skb) {
1414                         /* Page reuse already handled by bnxt_rx_pages(). */
1415                         return NULL;
1416                 }
1417         }
1418
1419         skb->protocol =
1420                 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1421
1422         if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1423                 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1424
1425         if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1426             (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1427                 u16 vlan_proto = tpa_info->metadata >>
1428                         RX_CMP_FLAGS2_METADATA_TPID_SFT;
1429                 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK;
1430
1431                 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1432         }
1433
1434         skb_checksum_none_assert(skb);
1435         if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1436                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1437                 skb->csum_level =
1438                         (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1439         }
1440
1441         if (TPA_END_GRO(tpa_end))
1442                 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1443
1444         return skb;
1445 }
1446
1447 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1448                              struct sk_buff *skb)
1449 {
1450         if (skb->dev != bp->dev) {
1451                 /* this packet belongs to a vf-rep */
1452                 bnxt_vf_rep_rx(bp, skb);
1453                 return;
1454         }
1455         skb_record_rx_queue(skb, bnapi->index);
1456         napi_gro_receive(&bnapi->napi, skb);
1457 }
1458
1459 /* returns the following:
1460  * 1       - 1 packet successfully received
1461  * 0       - successful TPA_START, packet not completed yet
1462  * -EBUSY  - completion ring does not have all the agg buffers yet
1463  * -ENOMEM - packet aborted due to out of memory
1464  * -EIO    - packet aborted due to hw error indicated in BD
1465  */
1466 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
1467                        u8 *event)
1468 {
1469         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1470         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1471         struct net_device *dev = bp->dev;
1472         struct rx_cmp *rxcmp;
1473         struct rx_cmp_ext *rxcmp1;
1474         u32 tmp_raw_cons = *raw_cons;
1475         u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1476         struct bnxt_sw_rx_bd *rx_buf;
1477         unsigned int len;
1478         u8 *data_ptr, agg_bufs, cmp_type;
1479         dma_addr_t dma_addr;
1480         struct sk_buff *skb;
1481         void *data;
1482         int rc = 0;
1483         u32 misc;
1484
1485         rxcmp = (struct rx_cmp *)
1486                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1487
1488         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1489         cp_cons = RING_CMP(tmp_raw_cons);
1490         rxcmp1 = (struct rx_cmp_ext *)
1491                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1492
1493         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1494                 return -EBUSY;
1495
1496         cmp_type = RX_CMP_TYPE(rxcmp);
1497
1498         prod = rxr->rx_prod;
1499
1500         if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1501                 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1502                                (struct rx_tpa_start_cmp_ext *)rxcmp1);
1503
1504                 *event |= BNXT_RX_EVENT;
1505                 goto next_rx_no_prod;
1506
1507         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1508                 skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
1509                                    (struct rx_tpa_end_cmp *)rxcmp,
1510                                    (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1511
1512                 if (unlikely(IS_ERR(skb)))
1513                         return -EBUSY;
1514
1515                 rc = -ENOMEM;
1516                 if (likely(skb)) {
1517                         bnxt_deliver_skb(bp, bnapi, skb);
1518                         rc = 1;
1519                 }
1520                 *event |= BNXT_RX_EVENT;
1521                 goto next_rx_no_prod;
1522         }
1523
1524         cons = rxcmp->rx_cmp_opaque;
1525         rx_buf = &rxr->rx_buf_ring[cons];
1526         data = rx_buf->data;
1527         data_ptr = rx_buf->data_ptr;
1528         if (unlikely(cons != rxr->rx_next_cons)) {
1529                 int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
1530
1531                 bnxt_sched_reset(bp, rxr);
1532                 return rc1;
1533         }
1534         prefetch(data_ptr);
1535
1536         misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1537         agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1538
1539         if (agg_bufs) {
1540                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1541                         return -EBUSY;
1542
1543                 cp_cons = NEXT_CMP(cp_cons);
1544                 *event |= BNXT_AGG_EVENT;
1545         }
1546         *event |= BNXT_RX_EVENT;
1547
1548         rx_buf->data = NULL;
1549         if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1550                 bnxt_reuse_rx_data(rxr, cons, data);
1551                 if (agg_bufs)
1552                         bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
1553
1554                 rc = -EIO;
1555                 goto next_rx;
1556         }
1557
1558         len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
1559         dma_addr = rx_buf->mapping;
1560
1561         if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1562                 rc = 1;
1563                 goto next_rx;
1564         }
1565
1566         if (len <= bp->rx_copy_thresh) {
1567                 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1568                 bnxt_reuse_rx_data(rxr, cons, data);
1569                 if (!skb) {
1570                         rc = -ENOMEM;
1571                         goto next_rx;
1572                 }
1573         } else {
1574                 u32 payload;
1575
1576                 if (rx_buf->data_ptr == data_ptr)
1577                         payload = misc & RX_CMP_PAYLOAD_OFFSET;
1578                 else
1579                         payload = 0;
1580                 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1581                                       payload | len);
1582                 if (!skb) {
1583                         rc = -ENOMEM;
1584                         goto next_rx;
1585                 }
1586         }
1587
1588         if (agg_bufs) {
1589                 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
1590                 if (!skb) {
1591                         rc = -ENOMEM;
1592                         goto next_rx;
1593                 }
1594         }
1595
1596         if (RX_CMP_HASH_VALID(rxcmp)) {
1597                 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1598                 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1599
1600                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1601                 if (hash_type != 1 && hash_type != 3)
1602                         type = PKT_HASH_TYPE_L3;
1603                 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1604         }
1605
1606         cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1607         skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
1608
1609         if ((rxcmp1->rx_cmp_flags2 &
1610              cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1611             (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1612                 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1613                 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK;
1614                 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1615
1616                 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1617         }
1618
1619         skb_checksum_none_assert(skb);
1620         if (RX_CMP_L4_CS_OK(rxcmp1)) {
1621                 if (dev->features & NETIF_F_RXCSUM) {
1622                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1623                         skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1624                 }
1625         } else {
1626                 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1627                         if (dev->features & NETIF_F_RXCSUM)
1628                                 cpr->rx_l4_csum_errors++;
1629                 }
1630         }
1631
1632         bnxt_deliver_skb(bp, bnapi, skb);
1633         rc = 1;
1634
1635 next_rx:
1636         rxr->rx_prod = NEXT_RX(prod);
1637         rxr->rx_next_cons = NEXT_RX(cons);
1638
1639 next_rx_no_prod:
1640         *raw_cons = tmp_raw_cons;
1641
1642         return rc;
1643 }
1644
1645 /* In netpoll mode, if we are using a combined completion ring, we need to
1646  * discard the rx packets and recycle the buffers.
1647  */
1648 static int bnxt_force_rx_discard(struct bnxt *bp, struct bnxt_napi *bnapi,
1649                                  u32 *raw_cons, u8 *event)
1650 {
1651         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1652         u32 tmp_raw_cons = *raw_cons;
1653         struct rx_cmp_ext *rxcmp1;
1654         struct rx_cmp *rxcmp;
1655         u16 cp_cons;
1656         u8 cmp_type;
1657
1658         cp_cons = RING_CMP(tmp_raw_cons);
1659         rxcmp = (struct rx_cmp *)
1660                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1661
1662         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1663         cp_cons = RING_CMP(tmp_raw_cons);
1664         rxcmp1 = (struct rx_cmp_ext *)
1665                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1666
1667         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1668                 return -EBUSY;
1669
1670         cmp_type = RX_CMP_TYPE(rxcmp);
1671         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1672                 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1673                         cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1674         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1675                 struct rx_tpa_end_cmp_ext *tpa_end1;
1676
1677                 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1678                 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1679                         cpu_to_le32(RX_TPA_END_CMP_ERRORS);
1680         }
1681         return bnxt_rx_pkt(bp, bnapi, raw_cons, event);
1682 }
1683
1684 #define BNXT_GET_EVENT_PORT(data)       \
1685         ((data) &                       \
1686          ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
1687
1688 static int bnxt_async_event_process(struct bnxt *bp,
1689                                     struct hwrm_async_event_cmpl *cmpl)
1690 {
1691         u16 event_id = le16_to_cpu(cmpl->event_id);
1692
1693         /* TODO CHIMP_FW: Define event id's for link change, error etc */
1694         switch (event_id) {
1695         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
1696                 u32 data1 = le32_to_cpu(cmpl->event_data1);
1697                 struct bnxt_link_info *link_info = &bp->link_info;
1698
1699                 if (BNXT_VF(bp))
1700                         goto async_event_process_exit;
1701
1702                 /* print unsupported speed warning in forced speed mode only */
1703                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
1704                     (data1 & 0x20000)) {
1705                         u16 fw_speed = link_info->force_link_speed;
1706                         u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
1707
1708                         if (speed != SPEED_UNKNOWN)
1709                                 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
1710                                             speed);
1711                 }
1712                 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
1713                 /* fall thru */
1714         }
1715         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
1716                 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
1717                 break;
1718         case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
1719                 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
1720                 break;
1721         case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
1722                 u32 data1 = le32_to_cpu(cmpl->event_data1);
1723                 u16 port_id = BNXT_GET_EVENT_PORT(data1);
1724
1725                 if (BNXT_VF(bp))
1726                         break;
1727
1728                 if (bp->pf.port_id != port_id)
1729                         break;
1730
1731                 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
1732                 break;
1733         }
1734         case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
1735                 if (BNXT_PF(bp))
1736                         goto async_event_process_exit;
1737                 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
1738                 break;
1739         default:
1740                 goto async_event_process_exit;
1741         }
1742         bnxt_queue_sp_work(bp);
1743 async_event_process_exit:
1744         bnxt_ulp_async_events(bp, cmpl);
1745         return 0;
1746 }
1747
1748 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
1749 {
1750         u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
1751         struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
1752         struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
1753                                 (struct hwrm_fwd_req_cmpl *)txcmp;
1754
1755         switch (cmpl_type) {
1756         case CMPL_BASE_TYPE_HWRM_DONE:
1757                 seq_id = le16_to_cpu(h_cmpl->sequence_id);
1758                 if (seq_id == bp->hwrm_intr_seq_id)
1759                         bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID;
1760                 else
1761                         netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
1762                 break;
1763
1764         case CMPL_BASE_TYPE_HWRM_FWD_REQ:
1765                 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
1766
1767                 if ((vf_id < bp->pf.first_vf_id) ||
1768                     (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
1769                         netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
1770                                    vf_id);
1771                         return -EINVAL;
1772                 }
1773
1774                 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
1775                 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
1776                 bnxt_queue_sp_work(bp);
1777                 break;
1778
1779         case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1780                 bnxt_async_event_process(bp,
1781                                          (struct hwrm_async_event_cmpl *)txcmp);
1782
1783         default:
1784                 break;
1785         }
1786
1787         return 0;
1788 }
1789
1790 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
1791 {
1792         struct bnxt_napi *bnapi = dev_instance;
1793         struct bnxt *bp = bnapi->bp;
1794         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1795         u32 cons = RING_CMP(cpr->cp_raw_cons);
1796
1797         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1798         napi_schedule(&bnapi->napi);
1799         return IRQ_HANDLED;
1800 }
1801
1802 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1803 {
1804         u32 raw_cons = cpr->cp_raw_cons;
1805         u16 cons = RING_CMP(raw_cons);
1806         struct tx_cmp *txcmp;
1807
1808         txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1809
1810         return TX_CMP_VALID(txcmp, raw_cons);
1811 }
1812
1813 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
1814 {
1815         struct bnxt_napi *bnapi = dev_instance;
1816         struct bnxt *bp = bnapi->bp;
1817         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1818         u32 cons = RING_CMP(cpr->cp_raw_cons);
1819         u32 int_status;
1820
1821         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1822
1823         if (!bnxt_has_work(bp, cpr)) {
1824                 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
1825                 /* return if erroneous interrupt */
1826                 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
1827                         return IRQ_NONE;
1828         }
1829
1830         /* disable ring IRQ */
1831         BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell);
1832
1833         /* Return here if interrupt is shared and is disabled. */
1834         if (unlikely(atomic_read(&bp->intr_sem) != 0))
1835                 return IRQ_HANDLED;
1836
1837         napi_schedule(&bnapi->napi);
1838         return IRQ_HANDLED;
1839 }
1840
1841 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
1842 {
1843         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1844         u32 raw_cons = cpr->cp_raw_cons;
1845         u32 cons;
1846         int tx_pkts = 0;
1847         int rx_pkts = 0;
1848         u8 event = 0;
1849         struct tx_cmp *txcmp;
1850
1851         while (1) {
1852                 int rc;
1853
1854                 cons = RING_CMP(raw_cons);
1855                 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1856
1857                 if (!TX_CMP_VALID(txcmp, raw_cons))
1858                         break;
1859
1860                 /* The valid test of the entry must be done first before
1861                  * reading any further.
1862                  */
1863                 dma_rmb();
1864                 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
1865                         tx_pkts++;
1866                         /* return full budget so NAPI will complete. */
1867                         if (unlikely(tx_pkts > bp->tx_wake_thresh))
1868                                 rx_pkts = budget;
1869                 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
1870                         if (likely(budget))
1871                                 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
1872                         else
1873                                 rc = bnxt_force_rx_discard(bp, bnapi, &raw_cons,
1874                                                            &event);
1875                         if (likely(rc >= 0))
1876                                 rx_pkts += rc;
1877                         /* Increment rx_pkts when rc is -ENOMEM to count towards
1878                          * the NAPI budget.  Otherwise, we may potentially loop
1879                          * here forever if we consistently cannot allocate
1880                          * buffers.
1881                          */
1882                         else if (rc == -ENOMEM && budget)
1883                                 rx_pkts++;
1884                         else if (rc == -EBUSY)  /* partial completion */
1885                                 break;
1886                 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
1887                                      CMPL_BASE_TYPE_HWRM_DONE) ||
1888                                     (TX_CMP_TYPE(txcmp) ==
1889                                      CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
1890                                     (TX_CMP_TYPE(txcmp) ==
1891                                      CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
1892                         bnxt_hwrm_handler(bp, txcmp);
1893                 }
1894                 raw_cons = NEXT_RAW_CMP(raw_cons);
1895
1896                 if (rx_pkts == budget)
1897                         break;
1898         }
1899
1900         if (event & BNXT_TX_EVENT) {
1901                 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
1902                 void __iomem *db = txr->tx_doorbell;
1903                 u16 prod = txr->tx_prod;
1904
1905                 /* Sync BD data before updating doorbell */
1906                 wmb();
1907
1908                 bnxt_db_write(bp, db, DB_KEY_TX | prod);
1909         }
1910
1911         cpr->cp_raw_cons = raw_cons;
1912         /* ACK completion ring before freeing tx ring and producing new
1913          * buffers in rx/agg rings to prevent overflowing the completion
1914          * ring.
1915          */
1916         BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
1917
1918         if (tx_pkts)
1919                 bnapi->tx_int(bp, bnapi, tx_pkts);
1920
1921         if (event & BNXT_RX_EVENT) {
1922                 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1923
1924                 bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod);
1925                 if (event & BNXT_AGG_EVENT)
1926                         bnxt_db_write(bp, rxr->rx_agg_doorbell,
1927                                       DB_KEY_RX | rxr->rx_agg_prod);
1928         }
1929         return rx_pkts;
1930 }
1931
1932 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
1933 {
1934         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
1935         struct bnxt *bp = bnapi->bp;
1936         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1937         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1938         struct tx_cmp *txcmp;
1939         struct rx_cmp_ext *rxcmp1;
1940         u32 cp_cons, tmp_raw_cons;
1941         u32 raw_cons = cpr->cp_raw_cons;
1942         u32 rx_pkts = 0;
1943         u8 event = 0;
1944
1945         while (1) {
1946                 int rc;
1947
1948                 cp_cons = RING_CMP(raw_cons);
1949                 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1950
1951                 if (!TX_CMP_VALID(txcmp, raw_cons))
1952                         break;
1953
1954                 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
1955                         tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
1956                         cp_cons = RING_CMP(tmp_raw_cons);
1957                         rxcmp1 = (struct rx_cmp_ext *)
1958                           &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1959
1960                         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1961                                 break;
1962
1963                         /* force an error to recycle the buffer */
1964                         rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1965                                 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1966
1967                         rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
1968                         if (likely(rc == -EIO) && budget)
1969                                 rx_pkts++;
1970                         else if (rc == -EBUSY)  /* partial completion */
1971                                 break;
1972                 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
1973                                     CMPL_BASE_TYPE_HWRM_DONE)) {
1974                         bnxt_hwrm_handler(bp, txcmp);
1975                 } else {
1976                         netdev_err(bp->dev,
1977                                    "Invalid completion received on special ring\n");
1978                 }
1979                 raw_cons = NEXT_RAW_CMP(raw_cons);
1980
1981                 if (rx_pkts == budget)
1982                         break;
1983         }
1984
1985         cpr->cp_raw_cons = raw_cons;
1986         BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
1987         bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod);
1988
1989         if (event & BNXT_AGG_EVENT)
1990                 bnxt_db_write(bp, rxr->rx_agg_doorbell,
1991                               DB_KEY_RX | rxr->rx_agg_prod);
1992
1993         if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
1994                 napi_complete_done(napi, rx_pkts);
1995                 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
1996         }
1997         return rx_pkts;
1998 }
1999
2000 static int bnxt_poll(struct napi_struct *napi, int budget)
2001 {
2002         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2003         struct bnxt *bp = bnapi->bp;
2004         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2005         int work_done = 0;
2006
2007         while (1) {
2008                 work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
2009
2010                 if (work_done >= budget)
2011                         break;
2012
2013                 if (!bnxt_has_work(bp, cpr)) {
2014                         if (napi_complete_done(napi, work_done))
2015                                 BNXT_CP_DB_REARM(cpr->cp_doorbell,
2016                                                  cpr->cp_raw_cons);
2017                         break;
2018                 }
2019         }
2020         mmiowb();
2021         return work_done;
2022 }
2023
2024 static void bnxt_free_tx_skbs(struct bnxt *bp)
2025 {
2026         int i, max_idx;
2027         struct pci_dev *pdev = bp->pdev;
2028
2029         if (!bp->tx_ring)
2030                 return;
2031
2032         max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2033         for (i = 0; i < bp->tx_nr_rings; i++) {
2034                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2035                 int j;
2036
2037                 for (j = 0; j < max_idx;) {
2038                         struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2039                         struct sk_buff *skb = tx_buf->skb;
2040                         int k, last;
2041
2042                         if (!skb) {
2043                                 j++;
2044                                 continue;
2045                         }
2046
2047                         tx_buf->skb = NULL;
2048
2049                         if (tx_buf->is_push) {
2050                                 dev_kfree_skb(skb);
2051                                 j += 2;
2052                                 continue;
2053                         }
2054
2055                         dma_unmap_single(&pdev->dev,
2056                                          dma_unmap_addr(tx_buf, mapping),
2057                                          skb_headlen(skb),
2058                                          PCI_DMA_TODEVICE);
2059
2060                         last = tx_buf->nr_frags;
2061                         j += 2;
2062                         for (k = 0; k < last; k++, j++) {
2063                                 int ring_idx = j & bp->tx_ring_mask;
2064                                 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2065
2066                                 tx_buf = &txr->tx_buf_ring[ring_idx];
2067                                 dma_unmap_page(
2068                                         &pdev->dev,
2069                                         dma_unmap_addr(tx_buf, mapping),
2070                                         skb_frag_size(frag), PCI_DMA_TODEVICE);
2071                         }
2072                         dev_kfree_skb(skb);
2073                 }
2074                 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2075         }
2076 }
2077
2078 static void bnxt_free_rx_skbs(struct bnxt *bp)
2079 {
2080         int i, max_idx, max_agg_idx;
2081         struct pci_dev *pdev = bp->pdev;
2082
2083         if (!bp->rx_ring)
2084                 return;
2085
2086         max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2087         max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2088         for (i = 0; i < bp->rx_nr_rings; i++) {
2089                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2090                 int j;
2091
2092                 if (rxr->rx_tpa) {
2093                         for (j = 0; j < MAX_TPA; j++) {
2094                                 struct bnxt_tpa_info *tpa_info =
2095                                                         &rxr->rx_tpa[j];
2096                                 u8 *data = tpa_info->data;
2097
2098                                 if (!data)
2099                                         continue;
2100
2101                                 dma_unmap_single_attrs(&pdev->dev,
2102                                                        tpa_info->mapping,
2103                                                        bp->rx_buf_use_size,
2104                                                        bp->rx_dir,
2105                                                        DMA_ATTR_WEAK_ORDERING);
2106
2107                                 tpa_info->data = NULL;
2108
2109                                 kfree(data);
2110                         }
2111                 }
2112
2113                 for (j = 0; j < max_idx; j++) {
2114                         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
2115                         dma_addr_t mapping = rx_buf->mapping;
2116                         void *data = rx_buf->data;
2117
2118                         if (!data)
2119                                 continue;
2120
2121                         rx_buf->data = NULL;
2122
2123                         if (BNXT_RX_PAGE_MODE(bp)) {
2124                                 mapping -= bp->rx_dma_offset;
2125                                 dma_unmap_page_attrs(&pdev->dev, mapping,
2126                                                      PAGE_SIZE, bp->rx_dir,
2127                                                      DMA_ATTR_WEAK_ORDERING);
2128                                 __free_page(data);
2129                         } else {
2130                                 dma_unmap_single_attrs(&pdev->dev, mapping,
2131                                                        bp->rx_buf_use_size,
2132                                                        bp->rx_dir,
2133                                                        DMA_ATTR_WEAK_ORDERING);
2134                                 kfree(data);
2135                         }
2136                 }
2137
2138                 for (j = 0; j < max_agg_idx; j++) {
2139                         struct bnxt_sw_rx_agg_bd *rx_agg_buf =
2140                                 &rxr->rx_agg_ring[j];
2141                         struct page *page = rx_agg_buf->page;
2142
2143                         if (!page)
2144                                 continue;
2145
2146                         dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2147                                              BNXT_RX_PAGE_SIZE,
2148                                              PCI_DMA_FROMDEVICE,
2149                                              DMA_ATTR_WEAK_ORDERING);
2150
2151                         rx_agg_buf->page = NULL;
2152                         __clear_bit(j, rxr->rx_agg_bmap);
2153
2154                         __free_page(page);
2155                 }
2156                 if (rxr->rx_page) {
2157                         __free_page(rxr->rx_page);
2158                         rxr->rx_page = NULL;
2159                 }
2160         }
2161 }
2162
2163 static void bnxt_free_skbs(struct bnxt *bp)
2164 {
2165         bnxt_free_tx_skbs(bp);
2166         bnxt_free_rx_skbs(bp);
2167 }
2168
2169 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
2170 {
2171         struct pci_dev *pdev = bp->pdev;
2172         int i;
2173
2174         for (i = 0; i < ring->nr_pages; i++) {
2175                 if (!ring->pg_arr[i])
2176                         continue;
2177
2178                 dma_free_coherent(&pdev->dev, ring->page_size,
2179                                   ring->pg_arr[i], ring->dma_arr[i]);
2180
2181                 ring->pg_arr[i] = NULL;
2182         }
2183         if (ring->pg_tbl) {
2184                 dma_free_coherent(&pdev->dev, ring->nr_pages * 8,
2185                                   ring->pg_tbl, ring->pg_tbl_map);
2186                 ring->pg_tbl = NULL;
2187         }
2188         if (ring->vmem_size && *ring->vmem) {
2189                 vfree(*ring->vmem);
2190                 *ring->vmem = NULL;
2191         }
2192 }
2193
2194 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
2195 {
2196         int i;
2197         struct pci_dev *pdev = bp->pdev;
2198
2199         if (ring->nr_pages > 1) {
2200                 ring->pg_tbl = dma_alloc_coherent(&pdev->dev,
2201                                                   ring->nr_pages * 8,
2202                                                   &ring->pg_tbl_map,
2203                                                   GFP_KERNEL);
2204                 if (!ring->pg_tbl)
2205                         return -ENOMEM;
2206         }
2207
2208         for (i = 0; i < ring->nr_pages; i++) {
2209                 ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2210                                                      ring->page_size,
2211                                                      &ring->dma_arr[i],
2212                                                      GFP_KERNEL);
2213                 if (!ring->pg_arr[i])
2214                         return -ENOMEM;
2215
2216                 if (ring->nr_pages > 1)
2217                         ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]);
2218         }
2219
2220         if (ring->vmem_size) {
2221                 *ring->vmem = vzalloc(ring->vmem_size);
2222                 if (!(*ring->vmem))
2223                         return -ENOMEM;
2224         }
2225         return 0;
2226 }
2227
2228 static void bnxt_free_rx_rings(struct bnxt *bp)
2229 {
2230         int i;
2231
2232         if (!bp->rx_ring)
2233                 return;
2234
2235         for (i = 0; i < bp->rx_nr_rings; i++) {
2236                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2237                 struct bnxt_ring_struct *ring;
2238
2239                 if (rxr->xdp_prog)
2240                         bpf_prog_put(rxr->xdp_prog);
2241
2242                 kfree(rxr->rx_tpa);
2243                 rxr->rx_tpa = NULL;
2244
2245                 kfree(rxr->rx_agg_bmap);
2246                 rxr->rx_agg_bmap = NULL;
2247
2248                 ring = &rxr->rx_ring_struct;
2249                 bnxt_free_ring(bp, ring);
2250
2251                 ring = &rxr->rx_agg_ring_struct;
2252                 bnxt_free_ring(bp, ring);
2253         }
2254 }
2255
2256 static int bnxt_alloc_rx_rings(struct bnxt *bp)
2257 {
2258         int i, rc, agg_rings = 0, tpa_rings = 0;
2259
2260         if (!bp->rx_ring)
2261                 return -ENOMEM;
2262
2263         if (bp->flags & BNXT_FLAG_AGG_RINGS)
2264                 agg_rings = 1;
2265
2266         if (bp->flags & BNXT_FLAG_TPA)
2267                 tpa_rings = 1;
2268
2269         for (i = 0; i < bp->rx_nr_rings; i++) {
2270                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2271                 struct bnxt_ring_struct *ring;
2272
2273                 ring = &rxr->rx_ring_struct;
2274
2275                 rc = bnxt_alloc_ring(bp, ring);
2276                 if (rc)
2277                         return rc;
2278
2279                 if (agg_rings) {
2280                         u16 mem_size;
2281
2282                         ring = &rxr->rx_agg_ring_struct;
2283                         rc = bnxt_alloc_ring(bp, ring);
2284                         if (rc)
2285                                 return rc;
2286
2287                         rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
2288                         mem_size = rxr->rx_agg_bmap_size / 8;
2289                         rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
2290                         if (!rxr->rx_agg_bmap)
2291                                 return -ENOMEM;
2292
2293                         if (tpa_rings) {
2294                                 rxr->rx_tpa = kcalloc(MAX_TPA,
2295                                                 sizeof(struct bnxt_tpa_info),
2296                                                 GFP_KERNEL);
2297                                 if (!rxr->rx_tpa)
2298                                         return -ENOMEM;
2299                         }
2300                 }
2301         }
2302         return 0;
2303 }
2304
2305 static void bnxt_free_tx_rings(struct bnxt *bp)
2306 {
2307         int i;
2308         struct pci_dev *pdev = bp->pdev;
2309
2310         if (!bp->tx_ring)
2311                 return;
2312
2313         for (i = 0; i < bp->tx_nr_rings; i++) {
2314                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2315                 struct bnxt_ring_struct *ring;
2316
2317                 if (txr->tx_push) {
2318                         dma_free_coherent(&pdev->dev, bp->tx_push_size,
2319                                           txr->tx_push, txr->tx_push_mapping);
2320                         txr->tx_push = NULL;
2321                 }
2322
2323                 ring = &txr->tx_ring_struct;
2324
2325                 bnxt_free_ring(bp, ring);
2326         }
2327 }
2328
2329 static int bnxt_alloc_tx_rings(struct bnxt *bp)
2330 {
2331         int i, j, rc;
2332         struct pci_dev *pdev = bp->pdev;
2333
2334         bp->tx_push_size = 0;
2335         if (bp->tx_push_thresh) {
2336                 int push_size;
2337
2338                 push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
2339                                         bp->tx_push_thresh);
2340
2341                 if (push_size > 256) {
2342                         push_size = 0;
2343                         bp->tx_push_thresh = 0;
2344                 }
2345
2346                 bp->tx_push_size = push_size;
2347         }
2348
2349         for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
2350                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2351                 struct bnxt_ring_struct *ring;
2352
2353                 ring = &txr->tx_ring_struct;
2354
2355                 rc = bnxt_alloc_ring(bp, ring);
2356                 if (rc)
2357                         return rc;
2358
2359                 if (bp->tx_push_size) {
2360                         dma_addr_t mapping;
2361
2362                         /* One pre-allocated DMA buffer to backup
2363                          * TX push operation
2364                          */
2365                         txr->tx_push = dma_alloc_coherent(&pdev->dev,
2366                                                 bp->tx_push_size,
2367                                                 &txr->tx_push_mapping,
2368                                                 GFP_KERNEL);
2369
2370                         if (!txr->tx_push)
2371                                 return -ENOMEM;
2372
2373                         mapping = txr->tx_push_mapping +
2374                                 sizeof(struct tx_push_bd);
2375                         txr->data_mapping = cpu_to_le64(mapping);
2376
2377                         memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
2378                 }
2379                 ring->queue_id = bp->q_info[j].queue_id;
2380                 if (i < bp->tx_nr_rings_xdp)
2381                         continue;
2382                 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
2383                         j++;
2384         }
2385         return 0;
2386 }
2387
2388 static void bnxt_free_cp_rings(struct bnxt *bp)
2389 {
2390         int i;
2391
2392         if (!bp->bnapi)
2393                 return;
2394
2395         for (i = 0; i < bp->cp_nr_rings; i++) {
2396                 struct bnxt_napi *bnapi = bp->bnapi[i];
2397                 struct bnxt_cp_ring_info *cpr;
2398                 struct bnxt_ring_struct *ring;
2399
2400                 if (!bnapi)
2401                         continue;
2402
2403                 cpr = &bnapi->cp_ring;
2404                 ring = &cpr->cp_ring_struct;
2405
2406                 bnxt_free_ring(bp, ring);
2407         }
2408 }
2409
2410 static int bnxt_alloc_cp_rings(struct bnxt *bp)
2411 {
2412         int i, rc;
2413
2414         for (i = 0; i < bp->cp_nr_rings; i++) {
2415                 struct bnxt_napi *bnapi = bp->bnapi[i];
2416                 struct bnxt_cp_ring_info *cpr;
2417                 struct bnxt_ring_struct *ring;
2418
2419                 if (!bnapi)
2420                         continue;
2421
2422                 cpr = &bnapi->cp_ring;
2423                 ring = &cpr->cp_ring_struct;
2424
2425                 rc = bnxt_alloc_ring(bp, ring);
2426                 if (rc)
2427                         return rc;
2428         }
2429         return 0;
2430 }
2431
2432 static void bnxt_init_ring_struct(struct bnxt *bp)
2433 {
2434         int i;
2435
2436         for (i = 0; i < bp->cp_nr_rings; i++) {
2437                 struct bnxt_napi *bnapi = bp->bnapi[i];
2438                 struct bnxt_cp_ring_info *cpr;
2439                 struct bnxt_rx_ring_info *rxr;
2440                 struct bnxt_tx_ring_info *txr;
2441                 struct bnxt_ring_struct *ring;
2442
2443                 if (!bnapi)
2444                         continue;
2445
2446                 cpr = &bnapi->cp_ring;
2447                 ring = &cpr->cp_ring_struct;
2448                 ring->nr_pages = bp->cp_nr_pages;
2449                 ring->page_size = HW_CMPD_RING_SIZE;
2450                 ring->pg_arr = (void **)cpr->cp_desc_ring;
2451                 ring->dma_arr = cpr->cp_desc_mapping;
2452                 ring->vmem_size = 0;
2453
2454                 rxr = bnapi->rx_ring;
2455                 if (!rxr)
2456                         goto skip_rx;
2457
2458                 ring = &rxr->rx_ring_struct;
2459                 ring->nr_pages = bp->rx_nr_pages;
2460                 ring->page_size = HW_RXBD_RING_SIZE;
2461                 ring->pg_arr = (void **)rxr->rx_desc_ring;
2462                 ring->dma_arr = rxr->rx_desc_mapping;
2463                 ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
2464                 ring->vmem = (void **)&rxr->rx_buf_ring;
2465
2466                 ring = &rxr->rx_agg_ring_struct;
2467                 ring->nr_pages = bp->rx_agg_nr_pages;
2468                 ring->page_size = HW_RXBD_RING_SIZE;
2469                 ring->pg_arr = (void **)rxr->rx_agg_desc_ring;
2470                 ring->dma_arr = rxr->rx_agg_desc_mapping;
2471                 ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
2472                 ring->vmem = (void **)&rxr->rx_agg_ring;
2473
2474 skip_rx:
2475                 txr = bnapi->tx_ring;
2476                 if (!txr)
2477                         continue;
2478
2479                 ring = &txr->tx_ring_struct;
2480                 ring->nr_pages = bp->tx_nr_pages;
2481                 ring->page_size = HW_RXBD_RING_SIZE;
2482                 ring->pg_arr = (void **)txr->tx_desc_ring;
2483                 ring->dma_arr = txr->tx_desc_mapping;
2484                 ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
2485                 ring->vmem = (void **)&txr->tx_buf_ring;
2486         }
2487 }
2488
2489 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
2490 {
2491         int i;
2492         u32 prod;
2493         struct rx_bd **rx_buf_ring;
2494
2495         rx_buf_ring = (struct rx_bd **)ring->pg_arr;
2496         for (i = 0, prod = 0; i < ring->nr_pages; i++) {
2497                 int j;
2498                 struct rx_bd *rxbd;
2499
2500                 rxbd = rx_buf_ring[i];
2501                 if (!rxbd)
2502                         continue;
2503
2504                 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
2505                         rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
2506                         rxbd->rx_bd_opaque = prod;
2507                 }
2508         }
2509 }
2510
2511 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
2512 {
2513         struct net_device *dev = bp->dev;
2514         struct bnxt_rx_ring_info *rxr;
2515         struct bnxt_ring_struct *ring;
2516         u32 prod, type;
2517         int i;
2518
2519         type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
2520                 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
2521
2522         if (NET_IP_ALIGN == 2)
2523                 type |= RX_BD_FLAGS_SOP;
2524
2525         rxr = &bp->rx_ring[ring_nr];
2526         ring = &rxr->rx_ring_struct;
2527         bnxt_init_rxbd_pages(ring, type);
2528
2529         if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
2530                 rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1);
2531                 if (IS_ERR(rxr->xdp_prog)) {
2532                         int rc = PTR_ERR(rxr->xdp_prog);
2533
2534                         rxr->xdp_prog = NULL;
2535                         return rc;
2536                 }
2537         }
2538         prod = rxr->rx_prod;
2539         for (i = 0; i < bp->rx_ring_size; i++) {
2540                 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
2541                         netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
2542                                     ring_nr, i, bp->rx_ring_size);
2543                         break;
2544                 }
2545                 prod = NEXT_RX(prod);
2546         }
2547         rxr->rx_prod = prod;
2548         ring->fw_ring_id = INVALID_HW_RING_ID;
2549
2550         ring = &rxr->rx_agg_ring_struct;
2551         ring->fw_ring_id = INVALID_HW_RING_ID;
2552
2553         if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
2554                 return 0;
2555
2556         type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
2557                 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
2558
2559         bnxt_init_rxbd_pages(ring, type);
2560
2561         prod = rxr->rx_agg_prod;
2562         for (i = 0; i < bp->rx_agg_ring_size; i++) {
2563                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
2564                         netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
2565                                     ring_nr, i, bp->rx_ring_size);
2566                         break;
2567                 }
2568                 prod = NEXT_RX_AGG(prod);
2569         }
2570         rxr->rx_agg_prod = prod;
2571
2572         if (bp->flags & BNXT_FLAG_TPA) {
2573                 if (rxr->rx_tpa) {
2574                         u8 *data;
2575                         dma_addr_t mapping;
2576
2577                         for (i = 0; i < MAX_TPA; i++) {
2578                                 data = __bnxt_alloc_rx_data(bp, &mapping,
2579                                                             GFP_KERNEL);
2580                                 if (!data)
2581                                         return -ENOMEM;
2582
2583                                 rxr->rx_tpa[i].data = data;
2584                                 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
2585                                 rxr->rx_tpa[i].mapping = mapping;
2586                         }
2587                 } else {
2588                         netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
2589                         return -ENOMEM;
2590                 }
2591         }
2592
2593         return 0;
2594 }
2595
2596 static void bnxt_init_cp_rings(struct bnxt *bp)
2597 {
2598         int i;
2599
2600         for (i = 0; i < bp->cp_nr_rings; i++) {
2601                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
2602                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
2603
2604                 ring->fw_ring_id = INVALID_HW_RING_ID;
2605         }
2606 }
2607
2608 static int bnxt_init_rx_rings(struct bnxt *bp)
2609 {
2610         int i, rc = 0;
2611
2612         if (BNXT_RX_PAGE_MODE(bp)) {
2613                 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
2614                 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
2615         } else {
2616                 bp->rx_offset = BNXT_RX_OFFSET;
2617                 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
2618         }
2619
2620         for (i = 0; i < bp->rx_nr_rings; i++) {
2621                 rc = bnxt_init_one_rx_ring(bp, i);
2622                 if (rc)
2623                         break;
2624         }
2625
2626         return rc;
2627 }
2628
2629 static int bnxt_init_tx_rings(struct bnxt *bp)
2630 {
2631         u16 i;
2632
2633         bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
2634                                    MAX_SKB_FRAGS + 1);
2635
2636         for (i = 0; i < bp->tx_nr_rings; i++) {
2637                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2638                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
2639
2640                 ring->fw_ring_id = INVALID_HW_RING_ID;
2641         }
2642
2643         return 0;
2644 }
2645
2646 static void bnxt_free_ring_grps(struct bnxt *bp)
2647 {
2648         kfree(bp->grp_info);
2649         bp->grp_info = NULL;
2650 }
2651
2652 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
2653 {
2654         int i;
2655
2656         if (irq_re_init) {
2657                 bp->grp_info = kcalloc(bp->cp_nr_rings,
2658                                        sizeof(struct bnxt_ring_grp_info),
2659                                        GFP_KERNEL);
2660                 if (!bp->grp_info)
2661                         return -ENOMEM;
2662         }
2663         for (i = 0; i < bp->cp_nr_rings; i++) {
2664                 if (irq_re_init)
2665                         bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
2666                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
2667                 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
2668                 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
2669                 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
2670         }
2671         return 0;
2672 }
2673
2674 static void bnxt_free_vnics(struct bnxt *bp)
2675 {
2676         kfree(bp->vnic_info);
2677         bp->vnic_info = NULL;
2678         bp->nr_vnics = 0;
2679 }
2680
2681 static int bnxt_alloc_vnics(struct bnxt *bp)
2682 {
2683         int num_vnics = 1;
2684
2685 #ifdef CONFIG_RFS_ACCEL
2686         if (bp->flags & BNXT_FLAG_RFS)
2687                 num_vnics += bp->rx_nr_rings;
2688 #endif
2689
2690         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
2691                 num_vnics++;
2692
2693         bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
2694                                 GFP_KERNEL);
2695         if (!bp->vnic_info)
2696                 return -ENOMEM;
2697
2698         bp->nr_vnics = num_vnics;
2699         return 0;
2700 }
2701
2702 static void bnxt_init_vnics(struct bnxt *bp)
2703 {
2704         int i;
2705
2706         for (i = 0; i < bp->nr_vnics; i++) {
2707                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2708
2709                 vnic->fw_vnic_id = INVALID_HW_RING_ID;
2710                 vnic->fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
2711                 vnic->fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
2712                 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
2713
2714                 if (bp->vnic_info[i].rss_hash_key) {
2715                         if (i == 0)
2716                                 prandom_bytes(vnic->rss_hash_key,
2717                                               HW_HASH_KEY_SIZE);
2718                         else
2719                                 memcpy(vnic->rss_hash_key,
2720                                        bp->vnic_info[0].rss_hash_key,
2721                                        HW_HASH_KEY_SIZE);
2722                 }
2723         }
2724 }
2725
2726 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
2727 {
2728         int pages;
2729
2730         pages = ring_size / desc_per_pg;
2731
2732         if (!pages)
2733                 return 1;
2734
2735         pages++;
2736
2737         while (pages & (pages - 1))
2738                 pages++;
2739
2740         return pages;
2741 }
2742
2743 void bnxt_set_tpa_flags(struct bnxt *bp)
2744 {
2745         bp->flags &= ~BNXT_FLAG_TPA;
2746         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
2747                 return;
2748         if (bp->dev->features & NETIF_F_LRO)
2749                 bp->flags |= BNXT_FLAG_LRO;
2750         if (bp->dev->features & NETIF_F_GRO)
2751                 bp->flags |= BNXT_FLAG_GRO;
2752 }
2753
2754 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
2755  * be set on entry.
2756  */
2757 void bnxt_set_ring_params(struct bnxt *bp)
2758 {
2759         u32 ring_size, rx_size, rx_space;
2760         u32 agg_factor = 0, agg_ring_size = 0;
2761
2762         /* 8 for CRC and VLAN */
2763         rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
2764
2765         rx_space = rx_size + NET_SKB_PAD +
2766                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2767
2768         bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
2769         ring_size = bp->rx_ring_size;
2770         bp->rx_agg_ring_size = 0;
2771         bp->rx_agg_nr_pages = 0;
2772
2773         if (bp->flags & BNXT_FLAG_TPA)
2774                 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
2775
2776         bp->flags &= ~BNXT_FLAG_JUMBO;
2777         if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
2778                 u32 jumbo_factor;
2779
2780                 bp->flags |= BNXT_FLAG_JUMBO;
2781                 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
2782                 if (jumbo_factor > agg_factor)
2783                         agg_factor = jumbo_factor;
2784         }
2785         agg_ring_size = ring_size * agg_factor;
2786
2787         if (agg_ring_size) {
2788                 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
2789                                                         RX_DESC_CNT);
2790                 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
2791                         u32 tmp = agg_ring_size;
2792
2793                         bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
2794                         agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
2795                         netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
2796                                     tmp, agg_ring_size);
2797                 }
2798                 bp->rx_agg_ring_size = agg_ring_size;
2799                 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
2800                 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
2801                 rx_space = rx_size + NET_SKB_PAD +
2802                         SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2803         }
2804
2805         bp->rx_buf_use_size = rx_size;
2806         bp->rx_buf_size = rx_space;
2807
2808         bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
2809         bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
2810
2811         ring_size = bp->tx_ring_size;
2812         bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
2813         bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
2814
2815         ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
2816         bp->cp_ring_size = ring_size;
2817
2818         bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
2819         if (bp->cp_nr_pages > MAX_CP_PAGES) {
2820                 bp->cp_nr_pages = MAX_CP_PAGES;
2821                 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
2822                 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
2823                             ring_size, bp->cp_ring_size);
2824         }
2825         bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
2826         bp->cp_ring_mask = bp->cp_bit - 1;
2827 }
2828
2829 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
2830 {
2831         if (page_mode) {
2832                 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
2833                         return -EOPNOTSUPP;
2834                 bp->dev->max_mtu = BNXT_MAX_PAGE_MODE_MTU;
2835                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
2836                 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
2837                 bp->dev->hw_features &= ~NETIF_F_LRO;
2838                 bp->dev->features &= ~NETIF_F_LRO;
2839                 bp->rx_dir = DMA_BIDIRECTIONAL;
2840                 bp->rx_skb_func = bnxt_rx_page_skb;
2841         } else {
2842                 bp->dev->max_mtu = BNXT_MAX_MTU;
2843                 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
2844                 bp->rx_dir = DMA_FROM_DEVICE;
2845                 bp->rx_skb_func = bnxt_rx_skb;
2846         }
2847         return 0;
2848 }
2849
2850 static void bnxt_free_vnic_attributes(struct bnxt *bp)
2851 {
2852         int i;
2853         struct bnxt_vnic_info *vnic;
2854         struct pci_dev *pdev = bp->pdev;
2855
2856         if (!bp->vnic_info)
2857                 return;
2858
2859         for (i = 0; i < bp->nr_vnics; i++) {
2860                 vnic = &bp->vnic_info[i];
2861
2862                 kfree(vnic->fw_grp_ids);
2863                 vnic->fw_grp_ids = NULL;
2864
2865                 kfree(vnic->uc_list);
2866                 vnic->uc_list = NULL;
2867
2868                 if (vnic->mc_list) {
2869                         dma_free_coherent(&pdev->dev, vnic->mc_list_size,
2870                                           vnic->mc_list, vnic->mc_list_mapping);
2871                         vnic->mc_list = NULL;
2872                 }
2873
2874                 if (vnic->rss_table) {
2875                         dma_free_coherent(&pdev->dev, PAGE_SIZE,
2876                                           vnic->rss_table,
2877                                           vnic->rss_table_dma_addr);
2878                         vnic->rss_table = NULL;
2879                 }
2880
2881                 vnic->rss_hash_key = NULL;
2882                 vnic->flags = 0;
2883         }
2884 }
2885
2886 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
2887 {
2888         int i, rc = 0, size;
2889         struct bnxt_vnic_info *vnic;
2890         struct pci_dev *pdev = bp->pdev;
2891         int max_rings;
2892
2893         for (i = 0; i < bp->nr_vnics; i++) {
2894                 vnic = &bp->vnic_info[i];
2895
2896                 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
2897                         int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
2898
2899                         if (mem_size > 0) {
2900                                 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
2901                                 if (!vnic->uc_list) {
2902                                         rc = -ENOMEM;
2903                                         goto out;
2904                                 }
2905                         }
2906                 }
2907
2908                 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
2909                         vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
2910                         vnic->mc_list =
2911                                 dma_alloc_coherent(&pdev->dev,
2912                                                    vnic->mc_list_size,
2913                                                    &vnic->mc_list_mapping,
2914                                                    GFP_KERNEL);
2915                         if (!vnic->mc_list) {
2916                                 rc = -ENOMEM;
2917                                 goto out;
2918                         }
2919                 }
2920
2921                 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
2922                         max_rings = bp->rx_nr_rings;
2923                 else
2924                         max_rings = 1;
2925
2926                 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
2927                 if (!vnic->fw_grp_ids) {
2928                         rc = -ENOMEM;
2929                         goto out;
2930                 }
2931
2932                 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
2933                     !(vnic->flags & BNXT_VNIC_RSS_FLAG))
2934                         continue;
2935
2936                 /* Allocate rss table and hash key */
2937                 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
2938                                                      &vnic->rss_table_dma_addr,
2939                                                      GFP_KERNEL);
2940                 if (!vnic->rss_table) {
2941                         rc = -ENOMEM;
2942                         goto out;
2943                 }
2944
2945                 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
2946
2947                 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
2948                 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
2949         }
2950         return 0;
2951
2952 out:
2953         return rc;
2954 }
2955
2956 static void bnxt_free_hwrm_resources(struct bnxt *bp)
2957 {
2958         struct pci_dev *pdev = bp->pdev;
2959
2960         dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
2961                           bp->hwrm_cmd_resp_dma_addr);
2962
2963         bp->hwrm_cmd_resp_addr = NULL;
2964         if (bp->hwrm_dbg_resp_addr) {
2965                 dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
2966                                   bp->hwrm_dbg_resp_addr,
2967                                   bp->hwrm_dbg_resp_dma_addr);
2968
2969                 bp->hwrm_dbg_resp_addr = NULL;
2970         }
2971 }
2972
2973 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2974 {
2975         struct pci_dev *pdev = bp->pdev;
2976
2977         bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
2978                                                    &bp->hwrm_cmd_resp_dma_addr,
2979                                                    GFP_KERNEL);
2980         if (!bp->hwrm_cmd_resp_addr)
2981                 return -ENOMEM;
2982         bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev,
2983                                                     HWRM_DBG_REG_BUF_SIZE,
2984                                                     &bp->hwrm_dbg_resp_dma_addr,
2985                                                     GFP_KERNEL);
2986         if (!bp->hwrm_dbg_resp_addr)
2987                 netdev_warn(bp->dev, "fail to alloc debug register dma mem\n");
2988
2989         return 0;
2990 }
2991
2992 static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
2993 {
2994         if (bp->hwrm_short_cmd_req_addr) {
2995                 struct pci_dev *pdev = bp->pdev;
2996
2997                 dma_free_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN,
2998                                   bp->hwrm_short_cmd_req_addr,
2999                                   bp->hwrm_short_cmd_req_dma_addr);
3000                 bp->hwrm_short_cmd_req_addr = NULL;
3001         }
3002 }
3003
3004 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
3005 {
3006         struct pci_dev *pdev = bp->pdev;
3007
3008         bp->hwrm_short_cmd_req_addr =
3009                 dma_alloc_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN,
3010                                    &bp->hwrm_short_cmd_req_dma_addr,
3011                                    GFP_KERNEL);
3012         if (!bp->hwrm_short_cmd_req_addr)
3013                 return -ENOMEM;
3014
3015         return 0;
3016 }
3017
3018 static void bnxt_free_stats(struct bnxt *bp)
3019 {
3020         u32 size, i;
3021         struct pci_dev *pdev = bp->pdev;
3022
3023         if (bp->hw_rx_port_stats) {
3024                 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
3025                                   bp->hw_rx_port_stats,
3026                                   bp->hw_rx_port_stats_map);
3027                 bp->hw_rx_port_stats = NULL;
3028                 bp->flags &= ~BNXT_FLAG_PORT_STATS;
3029         }
3030
3031         if (!bp->bnapi)
3032                 return;
3033
3034         size = sizeof(struct ctx_hw_stats);
3035
3036         for (i = 0; i < bp->cp_nr_rings; i++) {
3037                 struct bnxt_napi *bnapi = bp->bnapi[i];
3038                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3039
3040                 if (cpr->hw_stats) {
3041                         dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
3042                                           cpr->hw_stats_map);
3043                         cpr->hw_stats = NULL;
3044                 }
3045         }
3046 }
3047
3048 static int bnxt_alloc_stats(struct bnxt *bp)
3049 {
3050         u32 size, i;
3051         struct pci_dev *pdev = bp->pdev;
3052
3053         size = sizeof(struct ctx_hw_stats);
3054
3055         for (i = 0; i < bp->cp_nr_rings; i++) {
3056                 struct bnxt_napi *bnapi = bp->bnapi[i];
3057                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3058
3059                 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
3060                                                    &cpr->hw_stats_map,
3061                                                    GFP_KERNEL);
3062                 if (!cpr->hw_stats)
3063                         return -ENOMEM;
3064
3065                 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
3066         }
3067
3068         if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) {
3069                 bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
3070                                          sizeof(struct tx_port_stats) + 1024;
3071
3072                 bp->hw_rx_port_stats =
3073                         dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
3074                                            &bp->hw_rx_port_stats_map,
3075                                            GFP_KERNEL);
3076                 if (!bp->hw_rx_port_stats)
3077                         return -ENOMEM;
3078
3079                 bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) +
3080                                        512;
3081                 bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
3082                                            sizeof(struct rx_port_stats) + 512;
3083                 bp->flags |= BNXT_FLAG_PORT_STATS;
3084         }
3085         return 0;
3086 }
3087
3088 static void bnxt_clear_ring_indices(struct bnxt *bp)
3089 {
3090         int i;
3091
3092         if (!bp->bnapi)
3093                 return;
3094
3095         for (i = 0; i < bp->cp_nr_rings; i++) {
3096                 struct bnxt_napi *bnapi = bp->bnapi[i];
3097                 struct bnxt_cp_ring_info *cpr;
3098                 struct bnxt_rx_ring_info *rxr;
3099                 struct bnxt_tx_ring_info *txr;
3100
3101                 if (!bnapi)
3102                         continue;
3103
3104                 cpr = &bnapi->cp_ring;
3105                 cpr->cp_raw_cons = 0;
3106
3107                 txr = bnapi->tx_ring;
3108                 if (txr) {
3109                         txr->tx_prod = 0;
3110                         txr->tx_cons = 0;
3111                 }
3112
3113                 rxr = bnapi->rx_ring;
3114                 if (rxr) {
3115                         rxr->rx_prod = 0;
3116                         rxr->rx_agg_prod = 0;
3117                         rxr->rx_sw_agg_prod = 0;
3118                         rxr->rx_next_cons = 0;
3119                 }
3120         }
3121 }
3122
3123 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
3124 {
3125 #ifdef CONFIG_RFS_ACCEL
3126         int i;
3127
3128         /* Under rtnl_lock and all our NAPIs have been disabled.  It's
3129          * safe to delete the hash table.
3130          */
3131         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
3132                 struct hlist_head *head;
3133                 struct hlist_node *tmp;
3134                 struct bnxt_ntuple_filter *fltr;
3135
3136                 head = &bp->ntp_fltr_hash_tbl[i];
3137                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
3138                         hlist_del(&fltr->hash);
3139                         kfree(fltr);
3140                 }
3141         }
3142         if (irq_reinit) {
3143                 kfree(bp->ntp_fltr_bmap);
3144                 bp->ntp_fltr_bmap = NULL;
3145         }
3146         bp->ntp_fltr_count = 0;
3147 #endif
3148 }
3149
3150 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
3151 {
3152 #ifdef CONFIG_RFS_ACCEL
3153         int i, rc = 0;
3154
3155         if (!(bp->flags & BNXT_FLAG_RFS))
3156                 return 0;
3157
3158         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
3159                 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
3160
3161         bp->ntp_fltr_count = 0;
3162         bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
3163                                     sizeof(long),
3164                                     GFP_KERNEL);
3165
3166         if (!bp->ntp_fltr_bmap)
3167                 rc = -ENOMEM;
3168
3169         return rc;
3170 #else
3171         return 0;
3172 #endif
3173 }
3174
3175 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
3176 {
3177         bnxt_free_vnic_attributes(bp);
3178         bnxt_free_tx_rings(bp);
3179         bnxt_free_rx_rings(bp);
3180         bnxt_free_cp_rings(bp);
3181         bnxt_free_ntp_fltrs(bp, irq_re_init);
3182         if (irq_re_init) {
3183                 bnxt_free_stats(bp);
3184                 bnxt_free_ring_grps(bp);
3185                 bnxt_free_vnics(bp);
3186                 kfree(bp->tx_ring_map);
3187                 bp->tx_ring_map = NULL;
3188                 kfree(bp->tx_ring);
3189                 bp->tx_ring = NULL;
3190                 kfree(bp->rx_ring);
3191                 bp->rx_ring = NULL;
3192                 kfree(bp->bnapi);
3193                 bp->bnapi = NULL;
3194         } else {
3195                 bnxt_clear_ring_indices(bp);
3196         }
3197 }
3198
3199 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
3200 {
3201         int i, j, rc, size, arr_size;
3202         void *bnapi;
3203
3204         if (irq_re_init) {
3205                 /* Allocate bnapi mem pointer array and mem block for
3206                  * all queues
3207                  */
3208                 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
3209                                 bp->cp_nr_rings);
3210                 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
3211                 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
3212                 if (!bnapi)
3213                         return -ENOMEM;
3214
3215                 bp->bnapi = bnapi;
3216                 bnapi += arr_size;
3217                 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
3218                         bp->bnapi[i] = bnapi;
3219                         bp->bnapi[i]->index = i;
3220                         bp->bnapi[i]->bp = bp;
3221                 }
3222
3223                 bp->rx_ring = kcalloc(bp->rx_nr_rings,
3224                                       sizeof(struct bnxt_rx_ring_info),
3225                                       GFP_KERNEL);
3226                 if (!bp->rx_ring)
3227                         return -ENOMEM;
3228
3229                 for (i = 0; i < bp->rx_nr_rings; i++) {
3230                         bp->rx_ring[i].bnapi = bp->bnapi[i];
3231                         bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
3232                 }
3233
3234                 bp->tx_ring = kcalloc(bp->tx_nr_rings,
3235                                       sizeof(struct bnxt_tx_ring_info),
3236                                       GFP_KERNEL);
3237                 if (!bp->tx_ring)
3238                         return -ENOMEM;
3239
3240                 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
3241                                           GFP_KERNEL);
3242
3243                 if (!bp->tx_ring_map)
3244                         return -ENOMEM;
3245
3246                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
3247                         j = 0;
3248                 else
3249                         j = bp->rx_nr_rings;
3250
3251                 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
3252                         bp->tx_ring[i].bnapi = bp->bnapi[j];
3253                         bp->bnapi[j]->tx_ring = &bp->tx_ring[i];
3254                         bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
3255                         if (i >= bp->tx_nr_rings_xdp) {
3256                                 bp->tx_ring[i].txq_index = i -
3257                                         bp->tx_nr_rings_xdp;
3258                                 bp->bnapi[j]->tx_int = bnxt_tx_int;
3259                         } else {
3260                                 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
3261                                 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
3262                         }
3263                 }
3264
3265                 rc = bnxt_alloc_stats(bp);
3266                 if (rc)
3267                         goto alloc_mem_err;
3268
3269                 rc = bnxt_alloc_ntp_fltrs(bp);
3270                 if (rc)
3271                         goto alloc_mem_err;
3272
3273                 rc = bnxt_alloc_vnics(bp);
3274                 if (rc)
3275                         goto alloc_mem_err;
3276         }
3277
3278         bnxt_init_ring_struct(bp);
3279
3280         rc = bnxt_alloc_rx_rings(bp);
3281         if (rc)
3282                 goto alloc_mem_err;
3283
3284         rc = bnxt_alloc_tx_rings(bp);
3285         if (rc)
3286                 goto alloc_mem_err;
3287
3288         rc = bnxt_alloc_cp_rings(bp);
3289         if (rc)
3290                 goto alloc_mem_err;
3291
3292         bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
3293                                   BNXT_VNIC_UCAST_FLAG;
3294         rc = bnxt_alloc_vnic_attributes(bp);
3295         if (rc)
3296                 goto alloc_mem_err;
3297         return 0;
3298
3299 alloc_mem_err:
3300         bnxt_free_mem(bp, true);
3301         return rc;
3302 }
3303
3304 static void bnxt_disable_int(struct bnxt *bp)
3305 {
3306         int i;
3307
3308         if (!bp->bnapi)
3309                 return;
3310
3311         for (i = 0; i < bp->cp_nr_rings; i++) {
3312                 struct bnxt_napi *bnapi = bp->bnapi[i];
3313                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3314                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3315
3316                 if (ring->fw_ring_id != INVALID_HW_RING_ID)
3317                         BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
3318         }
3319 }
3320
3321 static void bnxt_disable_int_sync(struct bnxt *bp)
3322 {
3323         int i;
3324
3325         atomic_inc(&bp->intr_sem);
3326
3327         bnxt_disable_int(bp);
3328         for (i = 0; i < bp->cp_nr_rings; i++)
3329                 synchronize_irq(bp->irq_tbl[i].vector);
3330 }
3331
3332 static void bnxt_enable_int(struct bnxt *bp)
3333 {
3334         int i;
3335
3336         atomic_set(&bp->intr_sem, 0);
3337         for (i = 0; i < bp->cp_nr_rings; i++) {
3338                 struct bnxt_napi *bnapi = bp->bnapi[i];
3339                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3340
3341                 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
3342         }
3343 }
3344
3345 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
3346                             u16 cmpl_ring, u16 target_id)
3347 {
3348         struct input *req = request;
3349
3350         req->req_type = cpu_to_le16(req_type);
3351         req->cmpl_ring = cpu_to_le16(cmpl_ring);
3352         req->target_id = cpu_to_le16(target_id);
3353         req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
3354 }
3355
3356 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3357                                  int timeout, bool silent)
3358 {
3359         int i, intr_process, rc, tmo_count;
3360         struct input *req = msg;
3361         u32 *data = msg;
3362         __le32 *resp_len, *valid;
3363         u16 cp_ring_id, len = 0;
3364         struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
3365         u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
3366
3367         req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
3368         memset(resp, 0, PAGE_SIZE);
3369         cp_ring_id = le16_to_cpu(req->cmpl_ring);
3370         intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
3371
3372         if (bp->flags & BNXT_FLAG_SHORT_CMD) {
3373                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
3374                 struct hwrm_short_input short_input = {0};
3375
3376                 memcpy(short_cmd_req, req, msg_len);
3377                 memset(short_cmd_req + msg_len, 0, BNXT_HWRM_MAX_REQ_LEN -
3378                                                    msg_len);
3379
3380                 short_input.req_type = req->req_type;
3381                 short_input.signature =
3382                                 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
3383                 short_input.size = cpu_to_le16(msg_len);
3384                 short_input.req_addr =
3385                         cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
3386
3387                 data = (u32 *)&short_input;
3388                 msg_len = sizeof(short_input);
3389
3390                 /* Sync memory write before updating doorbell */
3391                 wmb();
3392
3393                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
3394         }
3395
3396         /* Write request msg to hwrm channel */
3397         __iowrite32_copy(bp->bar0, data, msg_len / 4);
3398
3399         for (i = msg_len; i < max_req_len; i += 4)
3400                 writel(0, bp->bar0 + i);
3401
3402         /* currently supports only one outstanding message */
3403         if (intr_process)
3404                 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
3405
3406         /* Ring channel doorbell */
3407         writel(1, bp->bar0 + 0x100);
3408
3409         if (!timeout)
3410                 timeout = DFLT_HWRM_CMD_TIMEOUT;
3411
3412         i = 0;
3413         tmo_count = timeout * 40;
3414         if (intr_process) {
3415                 /* Wait until hwrm response cmpl interrupt is processed */
3416                 while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
3417                        i++ < tmo_count) {
3418                         usleep_range(25, 40);
3419                 }
3420
3421                 if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
3422                         netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
3423                                    le16_to_cpu(req->req_type));
3424                         return -1;
3425                 }
3426         } else {
3427                 /* Check if response len is updated */
3428                 resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
3429                 for (i = 0; i < tmo_count; i++) {
3430                         len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
3431                               HWRM_RESP_LEN_SFT;
3432                         if (len)
3433                                 break;
3434                         usleep_range(25, 40);
3435                 }
3436
3437                 if (i >= tmo_count) {
3438                         netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
3439                                    timeout, le16_to_cpu(req->req_type),
3440                                    le16_to_cpu(req->seq_id), len);
3441                         return -1;
3442                 }
3443
3444                 /* Last word of resp contains valid bit */
3445                 valid = bp->hwrm_cmd_resp_addr + len - 4;
3446                 for (i = 0; i < 5; i++) {
3447                         if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
3448                                 break;
3449                         udelay(1);
3450                 }
3451
3452                 if (i >= 5) {
3453                         netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
3454                                    timeout, le16_to_cpu(req->req_type),
3455                                    le16_to_cpu(req->seq_id), len, *valid);
3456                         return -1;
3457                 }
3458         }
3459
3460         rc = le16_to_cpu(resp->error_code);
3461         if (rc && !silent)
3462                 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
3463                            le16_to_cpu(resp->req_type),
3464                            le16_to_cpu(resp->seq_id), rc);
3465         return rc;
3466 }
3467
3468 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3469 {
3470         return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
3471 }
3472
3473 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
3474                               int timeout)
3475 {
3476         return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
3477 }
3478
3479 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3480 {
3481         int rc;
3482
3483         mutex_lock(&bp->hwrm_cmd_lock);
3484         rc = _hwrm_send_message(bp, msg, msg_len, timeout);
3485         mutex_unlock(&bp->hwrm_cmd_lock);
3486         return rc;
3487 }
3488
3489 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
3490                              int timeout)
3491 {
3492         int rc;
3493
3494         mutex_lock(&bp->hwrm_cmd_lock);
3495         rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
3496         mutex_unlock(&bp->hwrm_cmd_lock);
3497         return rc;
3498 }
3499
3500 int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
3501                                      int bmap_size)
3502 {
3503         struct hwrm_func_drv_rgtr_input req = {0};
3504         DECLARE_BITMAP(async_events_bmap, 256);
3505         u32 *events = (u32 *)async_events_bmap;
3506         int i;
3507
3508         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
3509
3510         req.enables =
3511                 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
3512
3513         memset(async_events_bmap, 0, sizeof(async_events_bmap));
3514         for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
3515                 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
3516
3517         if (bmap && bmap_size) {
3518                 for (i = 0; i < bmap_size; i++) {
3519                         if (test_bit(i, bmap))
3520                                 __set_bit(i, async_events_bmap);
3521                 }
3522         }
3523
3524         for (i = 0; i < 8; i++)
3525                 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
3526
3527         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3528 }
3529
3530 static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
3531 {
3532         struct hwrm_func_drv_rgtr_input req = {0};
3533
3534         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
3535
3536         req.enables =
3537                 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
3538                             FUNC_DRV_RGTR_REQ_ENABLES_VER);
3539
3540         req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
3541         req.ver_maj = DRV_VER_MAJ;
3542         req.ver_min = DRV_VER_MIN;
3543         req.ver_upd = DRV_VER_UPD;
3544
3545         if (BNXT_PF(bp)) {
3546                 u32 data[8];
3547                 int i;
3548
3549                 memset(data, 0, sizeof(data));
3550                 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
3551                         u16 cmd = bnxt_vf_req_snif[i];
3552                         unsigned int bit, idx;
3553
3554                         idx = cmd / 32;
3555                         bit = cmd % 32;
3556                         data[idx] |= 1 << bit;
3557                 }
3558
3559                 for (i = 0; i < 8; i++)
3560                         req.vf_req_fwd[i] = cpu_to_le32(data[i]);
3561
3562                 req.enables |=
3563                         cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
3564         }
3565
3566         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3567 }
3568
3569 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
3570 {
3571         struct hwrm_func_drv_unrgtr_input req = {0};
3572
3573         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
3574         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3575 }
3576
3577 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
3578 {
3579         u32 rc = 0;
3580         struct hwrm_tunnel_dst_port_free_input req = {0};
3581
3582         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
3583         req.tunnel_type = tunnel_type;
3584
3585         switch (tunnel_type) {
3586         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
3587                 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
3588                 break;
3589         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
3590                 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
3591                 break;
3592         default:
3593                 break;
3594         }
3595
3596         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3597         if (rc)
3598                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
3599                            rc);
3600         return rc;
3601 }
3602
3603 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
3604                                            u8 tunnel_type)
3605 {
3606         u32 rc = 0;
3607         struct hwrm_tunnel_dst_port_alloc_input req = {0};
3608         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3609
3610         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
3611
3612         req.tunnel_type = tunnel_type;
3613         req.tunnel_dst_port_val = port;
3614
3615         mutex_lock(&bp->hwrm_cmd_lock);
3616         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3617         if (rc) {
3618                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
3619                            rc);
3620                 goto err_out;
3621         }
3622
3623         switch (tunnel_type) {
3624         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
3625                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
3626                 break;
3627         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
3628                 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
3629                 break;
3630         default:
3631                 break;
3632         }
3633
3634 err_out:
3635         mutex_unlock(&bp->hwrm_cmd_lock);
3636         return rc;
3637 }
3638
3639 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
3640 {
3641         struct hwrm_cfa_l2_set_rx_mask_input req = {0};
3642         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3643
3644         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
3645         req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
3646
3647         req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
3648         req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
3649         req.mask = cpu_to_le32(vnic->rx_mask);
3650         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3651 }
3652
3653 #ifdef CONFIG_RFS_ACCEL
3654 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
3655                                             struct bnxt_ntuple_filter *fltr)
3656 {
3657         struct hwrm_cfa_ntuple_filter_free_input req = {0};
3658
3659         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
3660         req.ntuple_filter_id = fltr->filter_id;
3661         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3662 }
3663
3664 #define BNXT_NTP_FLTR_FLAGS                                     \
3665         (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |     \
3666          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |        \
3667          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR |      \
3668          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |      \
3669          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |       \
3670          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |  \
3671          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |       \
3672          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |  \
3673          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |      \
3674          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |         \
3675          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |    \
3676          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |         \
3677          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |    \
3678          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
3679
3680 #define BNXT_NTP_TUNNEL_FLTR_FLAG                               \
3681                 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
3682
3683 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
3684                                              struct bnxt_ntuple_filter *fltr)
3685 {
3686         int rc = 0;
3687         struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
3688         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3689                 bp->hwrm_cmd_resp_addr;
3690         struct flow_keys *keys = &fltr->fkeys;
3691         struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
3692
3693         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
3694         req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
3695
3696         req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
3697
3698         req.ethertype = htons(ETH_P_IP);
3699         memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
3700         req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
3701         req.ip_protocol = keys->basic.ip_proto;
3702
3703         if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
3704                 int i;
3705
3706                 req.ethertype = htons(ETH_P_IPV6);
3707                 req.ip_addr_type =
3708                         CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
3709                 *(struct in6_addr *)&req.src_ipaddr[0] =
3710                         keys->addrs.v6addrs.src;
3711                 *(struct in6_addr *)&req.dst_ipaddr[0] =
3712                         keys->addrs.v6addrs.dst;
3713                 for (i = 0; i < 4; i++) {
3714                         req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
3715                         req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
3716                 }
3717         } else {
3718                 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
3719                 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
3720                 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
3721                 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
3722         }
3723         if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
3724                 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
3725                 req.tunnel_type =
3726                         CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
3727         }
3728
3729         req.src_port = keys->ports.src;
3730         req.src_port_mask = cpu_to_be16(0xffff);
3731         req.dst_port = keys->ports.dst;
3732         req.dst_port_mask = cpu_to_be16(0xffff);
3733
3734         req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
3735         mutex_lock(&bp->hwrm_cmd_lock);
3736         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3737         if (!rc)
3738                 fltr->filter_id = resp->ntuple_filter_id;
3739         mutex_unlock(&bp->hwrm_cmd_lock);
3740         return rc;
3741 }
3742 #endif
3743
3744 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
3745                                      u8 *mac_addr)
3746 {
3747         u32 rc = 0;
3748         struct hwrm_cfa_l2_filter_alloc_input req = {0};
3749         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3750
3751         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
3752         req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
3753         if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
3754                 req.flags |=
3755                         cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
3756         req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
3757         req.enables =
3758                 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
3759                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
3760                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
3761         memcpy(req.l2_addr, mac_addr, ETH_ALEN);
3762         req.l2_addr_mask[0] = 0xff;
3763         req.l2_addr_mask[1] = 0xff;
3764         req.l2_addr_mask[2] = 0xff;
3765         req.l2_addr_mask[3] = 0xff;
3766         req.l2_addr_mask[4] = 0xff;
3767         req.l2_addr_mask[5] = 0xff;
3768
3769         mutex_lock(&bp->hwrm_cmd_lock);
3770         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3771         if (!rc)
3772                 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
3773                                                         resp->l2_filter_id;
3774         mutex_unlock(&bp->hwrm_cmd_lock);
3775         return rc;
3776 }
3777
3778 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
3779 {
3780         u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
3781         int rc = 0;
3782
3783         /* Any associated ntuple filters will also be cleared by firmware. */
3784         mutex_lock(&bp->hwrm_cmd_lock);
3785         for (i = 0; i < num_of_vnics; i++) {
3786                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3787
3788                 for (j = 0; j < vnic->uc_filter_count; j++) {
3789                         struct hwrm_cfa_l2_filter_free_input req = {0};
3790
3791                         bnxt_hwrm_cmd_hdr_init(bp, &req,
3792                                                HWRM_CFA_L2_FILTER_FREE, -1, -1);
3793
3794                         req.l2_filter_id = vnic->fw_l2_filter_id[j];
3795
3796                         rc = _hwrm_send_message(bp, &req, sizeof(req),
3797                                                 HWRM_CMD_TIMEOUT);
3798                 }
3799                 vnic->uc_filter_count = 0;
3800         }
3801         mutex_unlock(&bp->hwrm_cmd_lock);
3802
3803         return rc;
3804 }
3805
3806 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
3807 {
3808         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3809         struct hwrm_vnic_tpa_cfg_input req = {0};
3810
3811         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
3812                 return 0;
3813
3814         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
3815
3816         if (tpa_flags) {
3817                 u16 mss = bp->dev->mtu - 40;
3818                 u32 nsegs, n, segs = 0, flags;
3819
3820                 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
3821                         VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
3822                         VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
3823                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
3824                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
3825                 if (tpa_flags & BNXT_FLAG_GRO)
3826                         flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
3827
3828                 req.flags = cpu_to_le32(flags);
3829
3830                 req.enables =
3831                         cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
3832                                     VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
3833                                     VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
3834
3835                 /* Number of segs are log2 units, and first packet is not
3836                  * included as part of this units.
3837                  */
3838                 if (mss <= BNXT_RX_PAGE_SIZE) {
3839                         n = BNXT_RX_PAGE_SIZE / mss;
3840                         nsegs = (MAX_SKB_FRAGS - 1) * n;
3841                 } else {
3842                         n = mss / BNXT_RX_PAGE_SIZE;
3843                         if (mss & (BNXT_RX_PAGE_SIZE - 1))
3844                                 n++;
3845                         nsegs = (MAX_SKB_FRAGS - n) / n;
3846                 }
3847
3848                 segs = ilog2(nsegs);
3849                 req.max_agg_segs = cpu_to_le16(segs);
3850                 req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
3851
3852                 req.min_agg_len = cpu_to_le32(512);
3853         }
3854         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
3855
3856         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3857 }
3858
3859 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
3860 {
3861         u32 i, j, max_rings;
3862         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3863         struct hwrm_vnic_rss_cfg_input req = {0};
3864
3865         if (vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
3866                 return 0;
3867
3868         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
3869         if (set_rss) {
3870                 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
3871                 if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
3872                         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3873                                 max_rings = bp->rx_nr_rings - 1;
3874                         else
3875                                 max_rings = bp->rx_nr_rings;
3876                 } else {
3877                         max_rings = 1;
3878                 }
3879
3880                 /* Fill the RSS indirection table with ring group ids */
3881                 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
3882                         if (j == max_rings)
3883                                 j = 0;
3884                         vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
3885                 }
3886
3887                 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
3888                 req.hash_key_tbl_addr =
3889                         cpu_to_le64(vnic->rss_hash_key_dma_addr);
3890         }
3891         req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
3892         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3893 }
3894
3895 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
3896 {
3897         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3898         struct hwrm_vnic_plcmodes_cfg_input req = {0};
3899
3900         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
3901         req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
3902                                 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
3903                                 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
3904         req.enables =
3905                 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
3906                             VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
3907         /* thresholds not implemented in firmware yet */
3908         req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
3909         req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
3910         req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
3911         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3912 }
3913
3914 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
3915                                         u16 ctx_idx)
3916 {
3917         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
3918
3919         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
3920         req.rss_cos_lb_ctx_id =
3921                 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
3922
3923         hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3924         bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
3925 }
3926
3927 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
3928 {
3929         int i, j;
3930
3931         for (i = 0; i < bp->nr_vnics; i++) {
3932                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3933
3934                 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
3935                         if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
3936                                 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
3937                 }
3938         }
3939         bp->rsscos_nr_ctxs = 0;
3940 }
3941
3942 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
3943 {
3944         int rc;
3945         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
3946         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
3947                                                 bp->hwrm_cmd_resp_addr;
3948
3949         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
3950                                -1);
3951
3952         mutex_lock(&bp->hwrm_cmd_lock);
3953         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3954         if (!rc)
3955                 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
3956                         le16_to_cpu(resp->rss_cos_lb_ctx_id);
3957         mutex_unlock(&bp->hwrm_cmd_lock);
3958
3959         return rc;
3960 }
3961
3962 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
3963 {
3964         unsigned int ring = 0, grp_idx;
3965         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3966         struct hwrm_vnic_cfg_input req = {0};
3967         u16 def_vlan = 0;
3968
3969         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
3970
3971         req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
3972         /* Only RSS support for now TBD: COS & LB */
3973         if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
3974                 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
3975                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
3976                                            VNIC_CFG_REQ_ENABLES_MRU);
3977         } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
3978                 req.rss_rule =
3979                         cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
3980                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
3981                                            VNIC_CFG_REQ_ENABLES_MRU);
3982                 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
3983         } else {
3984                 req.rss_rule = cpu_to_le16(0xffff);
3985         }
3986
3987         if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
3988             (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
3989                 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
3990                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
3991         } else {
3992                 req.cos_rule = cpu_to_le16(0xffff);
3993         }
3994
3995         if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3996                 ring = 0;
3997         else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
3998                 ring = vnic_id - 1;
3999         else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
4000                 ring = bp->rx_nr_rings - 1;
4001
4002         grp_idx = bp->rx_ring[ring].bnapi->index;
4003         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4004         req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
4005
4006         req.lb_rule = cpu_to_le16(0xffff);
4007         req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
4008                               VLAN_HLEN);
4009
4010 #ifdef CONFIG_BNXT_SRIOV
4011         if (BNXT_VF(bp))
4012                 def_vlan = bp->vf.vlan;
4013 #endif
4014         if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
4015                 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
4016         if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
4017                 req.flags |=
4018                         cpu_to_le32(VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE);
4019
4020         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4021 }
4022
4023 static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
4024 {
4025         u32 rc = 0;
4026
4027         if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
4028                 struct hwrm_vnic_free_input req = {0};
4029
4030                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
4031                 req.vnic_id =
4032                         cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
4033
4034                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4035                 if (rc)
4036                         return rc;
4037                 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
4038         }
4039         return rc;
4040 }
4041
4042 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
4043 {
4044         u16 i;
4045
4046         for (i = 0; i < bp->nr_vnics; i++)
4047                 bnxt_hwrm_vnic_free_one(bp, i);
4048 }
4049
4050 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
4051                                 unsigned int start_rx_ring_idx,
4052                                 unsigned int nr_rings)
4053 {
4054         int rc = 0;
4055         unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
4056         struct hwrm_vnic_alloc_input req = {0};
4057         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4058
4059         /* map ring groups to this vnic */
4060         for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
4061                 grp_idx = bp->rx_ring[i].bnapi->index;
4062                 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
4063                         netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
4064                                    j, nr_rings);
4065                         break;
4066                 }
4067                 bp->vnic_info[vnic_id].fw_grp_ids[j] =
4068                                         bp->grp_info[grp_idx].fw_grp_id;
4069         }
4070
4071         bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
4072         bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
4073         if (vnic_id == 0)
4074                 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
4075
4076         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
4077
4078         mutex_lock(&bp->hwrm_cmd_lock);
4079         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4080         if (!rc)
4081                 bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id);
4082         mutex_unlock(&bp->hwrm_cmd_lock);
4083         return rc;
4084 }
4085
4086 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
4087 {
4088         struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4089         struct hwrm_vnic_qcaps_input req = {0};
4090         int rc;
4091
4092         if (bp->hwrm_spec_code < 0x10600)
4093                 return 0;
4094
4095         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
4096         mutex_lock(&bp->hwrm_cmd_lock);
4097         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4098         if (!rc) {
4099                 if (resp->flags &
4100                     cpu_to_le32(VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
4101                         bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
4102         }
4103         mutex_unlock(&bp->hwrm_cmd_lock);
4104         return rc;
4105 }
4106
4107 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
4108 {
4109         u16 i;
4110         u32 rc = 0;
4111
4112         mutex_lock(&bp->hwrm_cmd_lock);
4113         for (i = 0; i < bp->rx_nr_rings; i++) {
4114                 struct hwrm_ring_grp_alloc_input req = {0};
4115                 struct hwrm_ring_grp_alloc_output *resp =
4116                                         bp->hwrm_cmd_resp_addr;
4117                 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
4118
4119                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
4120
4121                 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
4122                 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
4123                 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
4124                 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
4125
4126                 rc = _hwrm_send_message(bp, &req, sizeof(req),
4127                                         HWRM_CMD_TIMEOUT);
4128                 if (rc)
4129                         break;
4130
4131                 bp->grp_info[grp_idx].fw_grp_id =
4132                         le32_to_cpu(resp->ring_group_id);
4133         }
4134         mutex_unlock(&bp->hwrm_cmd_lock);
4135         return rc;
4136 }
4137
4138 static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
4139 {
4140         u16 i;
4141         u32 rc = 0;
4142         struct hwrm_ring_grp_free_input req = {0};
4143
4144         if (!bp->grp_info)
4145                 return 0;
4146
4147         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
4148
4149         mutex_lock(&bp->hwrm_cmd_lock);
4150         for (i = 0; i < bp->cp_nr_rings; i++) {
4151                 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
4152                         continue;
4153                 req.ring_group_id =
4154                         cpu_to_le32(bp->grp_info[i].fw_grp_id);
4155
4156                 rc = _hwrm_send_message(bp, &req, sizeof(req),
4157                                         HWRM_CMD_TIMEOUT);
4158                 if (rc)
4159                         break;
4160                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
4161         }
4162         mutex_unlock(&bp->hwrm_cmd_lock);
4163         return rc;
4164 }
4165
4166 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
4167                                     struct bnxt_ring_struct *ring,
4168                                     u32 ring_type, u32 map_index,
4169                                     u32 stats_ctx_id)
4170 {
4171         int rc = 0, err = 0;
4172         struct hwrm_ring_alloc_input req = {0};
4173         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4174         u16 ring_id;
4175
4176         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
4177
4178         req.enables = 0;
4179         if (ring->nr_pages > 1) {
4180                 req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map);
4181                 /* Page size is in log2 units */
4182                 req.page_size = BNXT_PAGE_SHIFT;
4183                 req.page_tbl_depth = 1;
4184         } else {
4185                 req.page_tbl_addr =  cpu_to_le64(ring->dma_arr[0]);
4186         }
4187         req.fbo = 0;
4188         /* Association of ring index with doorbell index and MSIX number */
4189         req.logical_id = cpu_to_le16(map_index);
4190
4191         switch (ring_type) {
4192         case HWRM_RING_ALLOC_TX:
4193                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
4194                 /* Association of transmit ring with completion ring */
4195                 req.cmpl_ring_id =
4196                         cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id);
4197                 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
4198                 req.stat_ctx_id = cpu_to_le32(stats_ctx_id);
4199                 req.queue_id = cpu_to_le16(ring->queue_id);
4200                 break;
4201         case HWRM_RING_ALLOC_RX:
4202                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
4203                 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
4204                 break;
4205         case HWRM_RING_ALLOC_AGG:
4206                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
4207                 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
4208                 break;
4209         case HWRM_RING_ALLOC_CMPL:
4210                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
4211                 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
4212                 if (bp->flags & BNXT_FLAG_USING_MSIX)
4213                         req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
4214                 break;
4215         default:
4216                 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
4217                            ring_type);
4218                 return -1;
4219         }
4220
4221         mutex_lock(&bp->hwrm_cmd_lock);
4222         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4223         err = le16_to_cpu(resp->error_code);
4224         ring_id = le16_to_cpu(resp->ring_id);
4225         mutex_unlock(&bp->hwrm_cmd_lock);
4226
4227         if (rc || err) {
4228                 switch (ring_type) {
4229                 case RING_FREE_REQ_RING_TYPE_L2_CMPL:
4230                         netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
4231                                    rc, err);
4232                         return -1;
4233
4234                 case RING_FREE_REQ_RING_TYPE_RX:
4235                         netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n",
4236                                    rc, err);
4237                         return -1;
4238
4239                 case RING_FREE_REQ_RING_TYPE_TX:
4240                         netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n",
4241                                    rc, err);
4242                         return -1;
4243
4244                 default:
4245                         netdev_err(bp->dev, "Invalid ring\n");
4246                         return -1;
4247                 }
4248         }
4249         ring->fw_ring_id = ring_id;
4250         return rc;
4251 }
4252
4253 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
4254 {
4255         int rc;
4256
4257         if (BNXT_PF(bp)) {
4258                 struct hwrm_func_cfg_input req = {0};
4259
4260                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
4261                 req.fid = cpu_to_le16(0xffff);
4262                 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
4263                 req.async_event_cr = cpu_to_le16(idx);
4264                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4265         } else {
4266                 struct hwrm_func_vf_cfg_input req = {0};
4267
4268                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
4269                 req.enables =
4270                         cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
4271                 req.async_event_cr = cpu_to_le16(idx);
4272                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4273         }
4274         return rc;
4275 }
4276
4277 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
4278 {
4279         int i, rc = 0;
4280
4281         for (i = 0; i < bp->cp_nr_rings; i++) {
4282                 struct bnxt_napi *bnapi = bp->bnapi[i];
4283                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4284                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4285
4286                 cpr->cp_doorbell = bp->bar1 + i * 0x80;
4287                 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i,
4288                                               INVALID_STATS_CTX_ID);
4289                 if (rc)
4290                         goto err_out;
4291                 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
4292                 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
4293
4294                 if (!i) {
4295                         rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
4296                         if (rc)
4297                                 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
4298                 }
4299         }
4300
4301         for (i = 0; i < bp->tx_nr_rings; i++) {
4302                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4303                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
4304                 u32 map_idx = txr->bnapi->index;
4305                 u16 fw_stats_ctx = bp->grp_info[map_idx].fw_stats_ctx;
4306
4307                 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX,
4308                                               map_idx, fw_stats_ctx);
4309                 if (rc)
4310                         goto err_out;
4311                 txr->tx_doorbell = bp->bar1 + map_idx * 0x80;
4312         }
4313
4314         for (i = 0; i < bp->rx_nr_rings; i++) {
4315                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4316                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
4317                 u32 map_idx = rxr->bnapi->index;
4318
4319                 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX,
4320                                               map_idx, INVALID_STATS_CTX_ID);
4321                 if (rc)
4322                         goto err_out;
4323                 rxr->rx_doorbell = bp->bar1 + map_idx * 0x80;
4324                 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
4325                 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
4326         }
4327
4328         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
4329                 for (i = 0; i < bp->rx_nr_rings; i++) {
4330                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4331                         struct bnxt_ring_struct *ring =
4332                                                 &rxr->rx_agg_ring_struct;
4333                         u32 grp_idx = rxr->bnapi->index;
4334                         u32 map_idx = grp_idx + bp->rx_nr_rings;
4335
4336                         rc = hwrm_ring_alloc_send_msg(bp, ring,
4337                                                       HWRM_RING_ALLOC_AGG,
4338                                                       map_idx,
4339                                                       INVALID_STATS_CTX_ID);
4340                         if (rc)
4341                                 goto err_out;
4342
4343                         rxr->rx_agg_doorbell = bp->bar1 + map_idx * 0x80;
4344                         writel(DB_KEY_RX | rxr->rx_agg_prod,
4345                                rxr->rx_agg_doorbell);
4346                         bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
4347                 }
4348         }
4349 err_out:
4350         return rc;
4351 }
4352
4353 static int hwrm_ring_free_send_msg(struct bnxt *bp,
4354                                    struct bnxt_ring_struct *ring,
4355                                    u32 ring_type, int cmpl_ring_id)
4356 {
4357         int rc;
4358         struct hwrm_ring_free_input req = {0};
4359         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
4360         u16 error_code;
4361
4362         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
4363         req.ring_type = ring_type;
4364         req.ring_id = cpu_to_le16(ring->fw_ring_id);
4365
4366         mutex_lock(&bp->hwrm_cmd_lock);
4367         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4368         error_code = le16_to_cpu(resp->error_code);
4369         mutex_unlock(&bp->hwrm_cmd_lock);
4370
4371         if (rc || error_code) {
4372                 switch (ring_type) {
4373                 case RING_FREE_REQ_RING_TYPE_L2_CMPL:
4374                         netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n",
4375                                    rc);
4376                         return rc;
4377                 case RING_FREE_REQ_RING_TYPE_RX:
4378                         netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n",
4379                                    rc);
4380                         return rc;
4381                 case RING_FREE_REQ_RING_TYPE_TX:
4382                         netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n",
4383                                    rc);
4384                         return rc;
4385                 default:
4386                         netdev_err(bp->dev, "Invalid ring\n");
4387                         return -1;
4388                 }
4389         }
4390         return 0;
4391 }
4392
4393 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
4394 {
4395         int i;
4396
4397         if (!bp->bnapi)
4398                 return;
4399
4400         for (i = 0; i < bp->tx_nr_rings; i++) {
4401                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4402                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
4403                 u32 grp_idx = txr->bnapi->index;
4404                 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
4405
4406                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4407                         hwrm_ring_free_send_msg(bp, ring,
4408                                                 RING_FREE_REQ_RING_TYPE_TX,
4409                                                 close_path ? cmpl_ring_id :
4410                                                 INVALID_HW_RING_ID);
4411                         ring->fw_ring_id = INVALID_HW_RING_ID;
4412                 }
4413         }
4414
4415         for (i = 0; i < bp->rx_nr_rings; i++) {
4416                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4417                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
4418                 u32 grp_idx = rxr->bnapi->index;
4419                 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
4420
4421                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4422                         hwrm_ring_free_send_msg(bp, ring,
4423                                                 RING_FREE_REQ_RING_TYPE_RX,
4424                                                 close_path ? cmpl_ring_id :
4425                                                 INVALID_HW_RING_ID);
4426                         ring->fw_ring_id = INVALID_HW_RING_ID;
4427                         bp->grp_info[grp_idx].rx_fw_ring_id =
4428                                 INVALID_HW_RING_ID;
4429                 }
4430         }
4431
4432         for (i = 0; i < bp->rx_nr_rings; i++) {
4433                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4434                 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
4435                 u32 grp_idx = rxr->bnapi->index;
4436                 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
4437
4438                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4439                         hwrm_ring_free_send_msg(bp, ring,
4440                                                 RING_FREE_REQ_RING_TYPE_RX,
4441                                                 close_path ? cmpl_ring_id :
4442                                                 INVALID_HW_RING_ID);
4443                         ring->fw_ring_id = INVALID_HW_RING_ID;
4444                         bp->grp_info[grp_idx].agg_fw_ring_id =
4445                                 INVALID_HW_RING_ID;
4446                 }
4447         }
4448
4449         /* The completion rings are about to be freed.  After that the
4450          * IRQ doorbell will not work anymore.  So we need to disable
4451          * IRQ here.
4452          */
4453         bnxt_disable_int_sync(bp);
4454
4455         for (i = 0; i < bp->cp_nr_rings; i++) {
4456                 struct bnxt_napi *bnapi = bp->bnapi[i];
4457                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4458                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4459
4460                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4461                         hwrm_ring_free_send_msg(bp, ring,
4462                                                 RING_FREE_REQ_RING_TYPE_L2_CMPL,
4463                                                 INVALID_HW_RING_ID);
4464                         ring->fw_ring_id = INVALID_HW_RING_ID;
4465                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
4466                 }
4467         }
4468 }
4469
4470 /* Caller must hold bp->hwrm_cmd_lock */
4471 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
4472 {
4473         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4474         struct hwrm_func_qcfg_input req = {0};
4475         int rc;
4476
4477         if (bp->hwrm_spec_code < 0x10601)
4478                 return 0;
4479
4480         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
4481         req.fid = cpu_to_le16(fid);
4482         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4483         if (!rc)
4484                 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
4485
4486         return rc;
4487 }
4488
4489 static int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings)
4490 {
4491         struct hwrm_func_cfg_input req = {0};
4492         int rc;
4493
4494         if (bp->hwrm_spec_code < 0x10601)
4495                 return 0;
4496
4497         if (BNXT_VF(bp))
4498                 return 0;
4499
4500         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
4501         req.fid = cpu_to_le16(0xffff);
4502         req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS);
4503         req.num_tx_rings = cpu_to_le16(*tx_rings);
4504         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4505         if (rc)
4506                 return rc;
4507
4508         mutex_lock(&bp->hwrm_cmd_lock);
4509         rc = __bnxt_hwrm_get_tx_rings(bp, 0xffff, tx_rings);
4510         mutex_unlock(&bp->hwrm_cmd_lock);
4511         if (!rc)
4512                 bp->tx_reserved_rings = *tx_rings;
4513         return rc;
4514 }
4515
4516 static int bnxt_hwrm_check_tx_rings(struct bnxt *bp, int tx_rings)
4517 {
4518         struct hwrm_func_cfg_input req = {0};
4519         int rc;
4520
4521         if (bp->hwrm_spec_code < 0x10801)
4522                 return 0;
4523
4524         if (BNXT_VF(bp))
4525                 return 0;
4526
4527         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
4528         req.fid = cpu_to_le16(0xffff);
4529         req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST);
4530         req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS);
4531         req.num_tx_rings = cpu_to_le16(tx_rings);
4532         rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4533         if (rc)
4534                 return -ENOMEM;
4535         return 0;
4536 }
4537
4538 static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs,
4539         u32 buf_tmrs, u16 flags,
4540         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
4541 {
4542         req->flags = cpu_to_le16(flags);
4543         req->num_cmpl_dma_aggr = cpu_to_le16((u16)max_bufs);
4544         req->num_cmpl_dma_aggr_during_int = cpu_to_le16(max_bufs >> 16);
4545         req->cmpl_aggr_dma_tmr = cpu_to_le16((u16)buf_tmrs);
4546         req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmrs >> 16);
4547         /* Minimum time between 2 interrupts set to buf_tmr x 2 */
4548         req->int_lat_tmr_min = cpu_to_le16((u16)buf_tmrs * 2);
4549         req->int_lat_tmr_max = cpu_to_le16((u16)buf_tmrs * 4);
4550         req->num_cmpl_aggr_int = cpu_to_le16((u16)max_bufs * 4);
4551 }
4552
4553 int bnxt_hwrm_set_coal(struct bnxt *bp)
4554 {
4555         int i, rc = 0;
4556         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
4557                                                            req_tx = {0}, *req;
4558         u16 max_buf, max_buf_irq;
4559         u16 buf_tmr, buf_tmr_irq;
4560         u32 flags;
4561
4562         bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
4563                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
4564         bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
4565                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
4566
4567         /* Each rx completion (2 records) should be DMAed immediately.
4568          * DMA 1/4 of the completion buffers at a time.
4569          */
4570         max_buf = min_t(u16, bp->rx_coal_bufs / 4, 2);
4571         /* max_buf must not be zero */
4572         max_buf = clamp_t(u16, max_buf, 1, 63);
4573         max_buf_irq = clamp_t(u16, bp->rx_coal_bufs_irq, 1, 63);
4574         buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks);
4575         /* buf timer set to 1/4 of interrupt timer */
4576         buf_tmr = max_t(u16, buf_tmr / 4, 1);
4577         buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks_irq);
4578         buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
4579
4580         flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
4581
4582         /* RING_IDLE generates more IRQs for lower latency.  Enable it only
4583          * if coal_ticks is less than 25 us.
4584          */
4585         if (bp->rx_coal_ticks < 25)
4586                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
4587
4588         bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
4589                                   buf_tmr_irq << 16 | buf_tmr, flags, &req_rx);
4590
4591         /* max_buf must not be zero */
4592         max_buf = clamp_t(u16, bp->tx_coal_bufs, 1, 63);
4593         max_buf_irq = clamp_t(u16, bp->tx_coal_bufs_irq, 1, 63);
4594         buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks);
4595         /* buf timer set to 1/4 of interrupt timer */
4596         buf_tmr = max_t(u16, buf_tmr / 4, 1);
4597         buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks_irq);
4598         buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
4599
4600         flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
4601         bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
4602                                   buf_tmr_irq << 16 | buf_tmr, flags, &req_tx);
4603
4604         mutex_lock(&bp->hwrm_cmd_lock);
4605         for (i = 0; i < bp->cp_nr_rings; i++) {
4606                 struct bnxt_napi *bnapi = bp->bnapi[i];
4607
4608                 req = &req_rx;
4609                 if (!bnapi->rx_ring)
4610                         req = &req_tx;
4611                 req->ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
4612
4613                 rc = _hwrm_send_message(bp, req, sizeof(*req),
4614                                         HWRM_CMD_TIMEOUT);
4615                 if (rc)
4616                         break;
4617         }
4618         mutex_unlock(&bp->hwrm_cmd_lock);
4619         return rc;
4620 }
4621
4622 static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
4623 {
4624         int rc = 0, i;
4625         struct hwrm_stat_ctx_free_input req = {0};
4626
4627         if (!bp->bnapi)
4628                 return 0;
4629
4630         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4631                 return 0;
4632
4633         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
4634
4635         mutex_lock(&bp->hwrm_cmd_lock);
4636         for (i = 0; i < bp->cp_nr_rings; i++) {
4637                 struct bnxt_napi *bnapi = bp->bnapi[i];
4638                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4639
4640                 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
4641                         req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
4642
4643                         rc = _hwrm_send_message(bp, &req, sizeof(req),
4644                                                 HWRM_CMD_TIMEOUT);
4645                         if (rc)
4646                                 break;
4647
4648                         cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4649                 }
4650         }
4651         mutex_unlock(&bp->hwrm_cmd_lock);
4652         return rc;
4653 }
4654
4655 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
4656 {
4657         int rc = 0, i;
4658         struct hwrm_stat_ctx_alloc_input req = {0};
4659         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4660
4661         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4662                 return 0;
4663
4664         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
4665
4666         req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
4667
4668         mutex_lock(&bp->hwrm_cmd_lock);
4669         for (i = 0; i < bp->cp_nr_rings; i++) {
4670                 struct bnxt_napi *bnapi = bp->bnapi[i];
4671                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4672
4673                 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
4674
4675                 rc = _hwrm_send_message(bp, &req, sizeof(req),
4676                                         HWRM_CMD_TIMEOUT);
4677                 if (rc)
4678                         break;
4679
4680                 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
4681
4682                 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
4683         }
4684         mutex_unlock(&bp->hwrm_cmd_lock);
4685         return rc;
4686 }
4687
4688 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
4689 {
4690         struct hwrm_func_qcfg_input req = {0};
4691         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4692         u16 flags;
4693         int rc;
4694
4695         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
4696         req.fid = cpu_to_le16(0xffff);
4697         mutex_lock(&bp->hwrm_cmd_lock);
4698         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4699         if (rc)
4700                 goto func_qcfg_exit;
4701
4702 #ifdef CONFIG_BNXT_SRIOV
4703         if (BNXT_VF(bp)) {
4704                 struct bnxt_vf_info *vf = &bp->vf;
4705
4706                 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
4707         }
4708 #endif
4709         flags = le16_to_cpu(resp->flags);
4710         if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
4711                      FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
4712                 bp->flags |= BNXT_FLAG_FW_LLDP_AGENT;
4713                 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
4714                         bp->flags |= BNXT_FLAG_FW_DCBX_AGENT;
4715         }
4716         if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
4717                 bp->flags |= BNXT_FLAG_MULTI_HOST;
4718
4719         switch (resp->port_partition_type) {
4720         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
4721         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
4722         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
4723                 bp->port_partition_type = resp->port_partition_type;
4724                 break;
4725         }
4726         if (bp->hwrm_spec_code < 0x10707 ||
4727             resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
4728                 bp->br_mode = BRIDGE_MODE_VEB;
4729         else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
4730                 bp->br_mode = BRIDGE_MODE_VEPA;
4731         else
4732                 bp->br_mode = BRIDGE_MODE_UNDEF;
4733
4734 func_qcfg_exit:
4735         mutex_unlock(&bp->hwrm_cmd_lock);
4736         return rc;
4737 }
4738
4739 static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
4740 {
4741         int rc = 0;
4742         struct hwrm_func_qcaps_input req = {0};
4743         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4744
4745         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
4746         req.fid = cpu_to_le16(0xffff);
4747
4748         mutex_lock(&bp->hwrm_cmd_lock);
4749         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4750         if (rc)
4751                 goto hwrm_func_qcaps_exit;
4752
4753         if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED))
4754                 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
4755         if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED))
4756                 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
4757
4758         bp->tx_push_thresh = 0;
4759         if (resp->flags &
4760             cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
4761                 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
4762
4763         if (BNXT_PF(bp)) {
4764                 struct bnxt_pf_info *pf = &bp->pf;
4765
4766                 pf->fw_fid = le16_to_cpu(resp->fid);
4767                 pf->port_id = le16_to_cpu(resp->port_id);
4768                 bp->dev->dev_port = pf->port_id;
4769                 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
4770                 pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
4771                 pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
4772                 pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
4773                 pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
4774                 pf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
4775                 if (!pf->max_hw_ring_grps)
4776                         pf->max_hw_ring_grps = pf->max_tx_rings;
4777                 pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
4778                 pf->max_vnics = le16_to_cpu(resp->max_vnics);
4779                 pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
4780                 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
4781                 pf->max_vfs = le16_to_cpu(resp->max_vfs);
4782                 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
4783                 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
4784                 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
4785                 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
4786                 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
4787                 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
4788                 if (resp->flags &
4789                     cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED))
4790                         bp->flags |= BNXT_FLAG_WOL_CAP;
4791         } else {
4792 #ifdef CONFIG_BNXT_SRIOV
4793                 struct bnxt_vf_info *vf = &bp->vf;
4794
4795                 vf->fw_fid = le16_to_cpu(resp->fid);
4796
4797                 vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
4798                 vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
4799                 vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
4800                 vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
4801                 vf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
4802                 if (!vf->max_hw_ring_grps)
4803                         vf->max_hw_ring_grps = vf->max_tx_rings;
4804                 vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
4805                 vf->max_vnics = le16_to_cpu(resp->max_vnics);
4806                 vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
4807
4808                 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
4809 #endif
4810         }
4811
4812 hwrm_func_qcaps_exit:
4813         mutex_unlock(&bp->hwrm_cmd_lock);
4814         return rc;
4815 }
4816
4817 static int bnxt_hwrm_func_reset(struct bnxt *bp)
4818 {
4819         struct hwrm_func_reset_input req = {0};
4820
4821         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
4822         req.enables = 0;
4823
4824         return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
4825 }
4826
4827 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
4828 {
4829         int rc = 0;
4830         struct hwrm_queue_qportcfg_input req = {0};
4831         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
4832         u8 i, *qptr;
4833
4834         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
4835
4836         mutex_lock(&bp->hwrm_cmd_lock);
4837         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4838         if (rc)
4839                 goto qportcfg_exit;
4840
4841         if (!resp->max_configurable_queues) {
4842                 rc = -EINVAL;
4843                 goto qportcfg_exit;
4844         }
4845         bp->max_tc = resp->max_configurable_queues;
4846         bp->max_lltc = resp->max_configurable_lossless_queues;
4847         if (bp->max_tc > BNXT_MAX_QUEUE)
4848                 bp->max_tc = BNXT_MAX_QUEUE;
4849
4850         if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
4851                 bp->max_tc = 1;
4852
4853         if (bp->max_lltc > bp->max_tc)
4854                 bp->max_lltc = bp->max_tc;
4855
4856         qptr = &resp->queue_id0;
4857         for (i = 0; i < bp->max_tc; i++) {
4858                 bp->q_info[i].queue_id = *qptr++;
4859                 bp->q_info[i].queue_profile = *qptr++;
4860         }
4861
4862 qportcfg_exit:
4863         mutex_unlock(&bp->hwrm_cmd_lock);
4864         return rc;
4865 }
4866
4867 static int bnxt_hwrm_ver_get(struct bnxt *bp)
4868 {
4869         int rc;
4870         struct hwrm_ver_get_input req = {0};
4871         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
4872         u32 dev_caps_cfg;
4873
4874         bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
4875         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
4876         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
4877         req.hwrm_intf_min = HWRM_VERSION_MINOR;
4878         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
4879         mutex_lock(&bp->hwrm_cmd_lock);
4880         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4881         if (rc)
4882                 goto hwrm_ver_get_exit;
4883
4884         memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
4885
4886         bp->hwrm_spec_code = resp->hwrm_intf_maj << 16 |
4887                              resp->hwrm_intf_min << 8 | resp->hwrm_intf_upd;
4888         if (resp->hwrm_intf_maj < 1) {
4889                 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
4890                             resp->hwrm_intf_maj, resp->hwrm_intf_min,
4891                             resp->hwrm_intf_upd);
4892                 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
4893         }
4894         snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d/%d.%d.%d",
4895                  resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
4896                  resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
4897
4898         bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
4899         if (!bp->hwrm_cmd_timeout)
4900                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
4901
4902         if (resp->hwrm_intf_maj >= 1)
4903                 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
4904
4905         bp->chip_num = le16_to_cpu(resp->chip_num);
4906         if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
4907             !resp->chip_metal)
4908                 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
4909
4910         dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
4911         if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
4912             (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
4913                 bp->flags |= BNXT_FLAG_SHORT_CMD;
4914
4915 hwrm_ver_get_exit:
4916         mutex_unlock(&bp->hwrm_cmd_lock);
4917         return rc;
4918 }
4919
4920 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
4921 {
4922 #if IS_ENABLED(CONFIG_RTC_LIB)
4923         struct hwrm_fw_set_time_input req = {0};
4924         struct rtc_time tm;
4925         struct timeval tv;
4926
4927         if (bp->hwrm_spec_code < 0x10400)
4928                 return -EOPNOTSUPP;
4929
4930         do_gettimeofday(&tv);
4931         rtc_time_to_tm(tv.tv_sec, &tm);
4932         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
4933         req.year = cpu_to_le16(1900 + tm.tm_year);
4934         req.month = 1 + tm.tm_mon;
4935         req.day = tm.tm_mday;
4936         req.hour = tm.tm_hour;
4937         req.minute = tm.tm_min;
4938         req.second = tm.tm_sec;
4939         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4940 #else
4941         return -EOPNOTSUPP;
4942 #endif
4943 }
4944
4945 static int bnxt_hwrm_port_qstats(struct bnxt *bp)
4946 {
4947         int rc;
4948         struct bnxt_pf_info *pf = &bp->pf;
4949         struct hwrm_port_qstats_input req = {0};
4950
4951         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
4952                 return 0;
4953
4954         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
4955         req.port_id = cpu_to_le16(pf->port_id);
4956         req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
4957         req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
4958         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4959         return rc;
4960 }
4961
4962 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
4963 {
4964         if (bp->vxlan_port_cnt) {
4965                 bnxt_hwrm_tunnel_dst_port_free(
4966                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
4967         }
4968         bp->vxlan_port_cnt = 0;
4969         if (bp->nge_port_cnt) {
4970                 bnxt_hwrm_tunnel_dst_port_free(
4971                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
4972         }
4973         bp->nge_port_cnt = 0;
4974 }
4975
4976 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
4977 {
4978         int rc, i;
4979         u32 tpa_flags = 0;
4980
4981         if (set_tpa)
4982                 tpa_flags = bp->flags & BNXT_FLAG_TPA;
4983         for (i = 0; i < bp->nr_vnics; i++) {
4984                 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
4985                 if (rc) {
4986                         netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
4987                                    i, rc);
4988                         return rc;
4989                 }
4990         }
4991         return 0;
4992 }
4993
4994 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
4995 {
4996         int i;
4997
4998         for (i = 0; i < bp->nr_vnics; i++)
4999                 bnxt_hwrm_vnic_set_rss(bp, i, false);
5000 }
5001
5002 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
5003                                     bool irq_re_init)
5004 {
5005         if (bp->vnic_info) {
5006                 bnxt_hwrm_clear_vnic_filter(bp);
5007                 /* clear all RSS setting before free vnic ctx */
5008                 bnxt_hwrm_clear_vnic_rss(bp);
5009                 bnxt_hwrm_vnic_ctx_free(bp);
5010                 /* before free the vnic, undo the vnic tpa settings */
5011                 if (bp->flags & BNXT_FLAG_TPA)
5012                         bnxt_set_tpa(bp, false);
5013                 bnxt_hwrm_vnic_free(bp);
5014         }
5015         bnxt_hwrm_ring_free(bp, close_path);
5016         bnxt_hwrm_ring_grp_free(bp);
5017         if (irq_re_init) {
5018                 bnxt_hwrm_stat_ctx_free(bp);
5019                 bnxt_hwrm_free_tunnel_ports(bp);
5020         }
5021 }
5022
5023 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
5024 {
5025         struct hwrm_func_cfg_input req = {0};
5026         int rc;
5027
5028         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
5029         req.fid = cpu_to_le16(0xffff);
5030         req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
5031         if (br_mode == BRIDGE_MODE_VEB)
5032                 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
5033         else if (br_mode == BRIDGE_MODE_VEPA)
5034                 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
5035         else
5036                 return -EINVAL;
5037         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5038         if (rc)
5039                 rc = -EIO;
5040         return rc;
5041 }
5042
5043 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
5044 {
5045         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5046         int rc;
5047
5048         if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
5049                 goto skip_rss_ctx;
5050
5051         /* allocate context for vnic */
5052         rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
5053         if (rc) {
5054                 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
5055                            vnic_id, rc);
5056                 goto vnic_setup_err;
5057         }
5058         bp->rsscos_nr_ctxs++;
5059
5060         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
5061                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
5062                 if (rc) {
5063                         netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
5064                                    vnic_id, rc);
5065                         goto vnic_setup_err;
5066                 }
5067                 bp->rsscos_nr_ctxs++;
5068         }
5069
5070 skip_rss_ctx:
5071         /* configure default vnic, ring grp */
5072         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
5073         if (rc) {
5074                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
5075                            vnic_id, rc);
5076                 goto vnic_setup_err;
5077         }
5078
5079         /* Enable RSS hashing on vnic */
5080         rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
5081         if (rc) {
5082                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
5083                            vnic_id, rc);
5084                 goto vnic_setup_err;
5085         }
5086
5087         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
5088                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
5089                 if (rc) {
5090                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
5091                                    vnic_id, rc);
5092                 }
5093         }
5094
5095 vnic_setup_err:
5096         return rc;
5097 }
5098
5099 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
5100 {
5101 #ifdef CONFIG_RFS_ACCEL
5102         int i, rc = 0;
5103
5104         for (i = 0; i < bp->rx_nr_rings; i++) {
5105                 struct bnxt_vnic_info *vnic;
5106                 u16 vnic_id = i + 1;
5107                 u16 ring_id = i;
5108
5109                 if (vnic_id >= bp->nr_vnics)
5110                         break;
5111
5112                 vnic = &bp->vnic_info[vnic_id];
5113                 vnic->flags |= BNXT_VNIC_RFS_FLAG;
5114                 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
5115                         vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
5116                 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
5117                 if (rc) {
5118                         netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
5119                                    vnic_id, rc);
5120                         break;
5121                 }
5122                 rc = bnxt_setup_vnic(bp, vnic_id);
5123                 if (rc)
5124                         break;
5125         }
5126         return rc;
5127 #else
5128         return 0;
5129 #endif
5130 }
5131
5132 /* Allow PF and VF with default VLAN to be in promiscuous mode */
5133 static bool bnxt_promisc_ok(struct bnxt *bp)
5134 {
5135 #ifdef CONFIG_BNXT_SRIOV
5136         if (BNXT_VF(bp) && !bp->vf.vlan)
5137                 return false;
5138 #endif
5139         return true;
5140 }
5141
5142 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
5143 {
5144         unsigned int rc = 0;
5145
5146         rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
5147         if (rc) {
5148                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
5149                            rc);
5150                 return rc;
5151         }
5152
5153         rc = bnxt_hwrm_vnic_cfg(bp, 1);
5154         if (rc) {
5155                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
5156                            rc);
5157                 return rc;
5158         }
5159         return rc;
5160 }
5161
5162 static int bnxt_cfg_rx_mode(struct bnxt *);
5163 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
5164
5165 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
5166 {
5167         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5168         int rc = 0;
5169         unsigned int rx_nr_rings = bp->rx_nr_rings;
5170
5171         if (irq_re_init) {
5172                 rc = bnxt_hwrm_stat_ctx_alloc(bp);
5173                 if (rc) {
5174                         netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
5175                                    rc);
5176                         goto err_out;
5177                 }
5178                 if (bp->tx_reserved_rings != bp->tx_nr_rings) {
5179                         int tx = bp->tx_nr_rings;
5180
5181                         if (bnxt_hwrm_reserve_tx_rings(bp, &tx) ||
5182                             tx < bp->tx_nr_rings) {
5183                                 rc = -ENOMEM;
5184                                 goto err_out;
5185                         }
5186                 }
5187         }
5188
5189         rc = bnxt_hwrm_ring_alloc(bp);
5190         if (rc) {
5191                 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
5192                 goto err_out;
5193         }
5194
5195         rc = bnxt_hwrm_ring_grp_alloc(bp);
5196         if (rc) {
5197                 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
5198                 goto err_out;
5199         }
5200
5201         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5202                 rx_nr_rings--;
5203
5204         /* default vnic 0 */
5205         rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
5206         if (rc) {
5207                 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
5208                 goto err_out;
5209         }
5210
5211         rc = bnxt_setup_vnic(bp, 0);
5212         if (rc)
5213                 goto err_out;
5214
5215         if (bp->flags & BNXT_FLAG_RFS) {
5216                 rc = bnxt_alloc_rfs_vnics(bp);
5217                 if (rc)
5218                         goto err_out;
5219         }
5220
5221         if (bp->flags & BNXT_FLAG_TPA) {
5222                 rc = bnxt_set_tpa(bp, true);
5223                 if (rc)
5224                         goto err_out;
5225         }
5226
5227         if (BNXT_VF(bp))
5228                 bnxt_update_vf_mac(bp);
5229
5230         /* Filter for default vnic 0 */
5231         rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
5232         if (rc) {
5233                 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
5234                 goto err_out;
5235         }
5236         vnic->uc_filter_count = 1;
5237
5238         vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
5239
5240         if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
5241                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
5242
5243         if (bp->dev->flags & IFF_ALLMULTI) {
5244                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
5245                 vnic->mc_list_count = 0;
5246         } else {
5247                 u32 mask = 0;
5248
5249                 bnxt_mc_list_updated(bp, &mask);
5250                 vnic->rx_mask |= mask;
5251         }
5252
5253         rc = bnxt_cfg_rx_mode(bp);
5254         if (rc)
5255                 goto err_out;
5256
5257         rc = bnxt_hwrm_set_coal(bp);
5258         if (rc)
5259                 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
5260                                 rc);
5261
5262         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
5263                 rc = bnxt_setup_nitroa0_vnic(bp);
5264                 if (rc)
5265                         netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
5266                                    rc);
5267         }
5268
5269         if (BNXT_VF(bp)) {
5270                 bnxt_hwrm_func_qcfg(bp);
5271                 netdev_update_features(bp->dev);
5272         }
5273
5274         return 0;
5275
5276 err_out:
5277         bnxt_hwrm_resource_free(bp, 0, true);
5278
5279         return rc;
5280 }
5281
5282 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
5283 {
5284         bnxt_hwrm_resource_free(bp, 1, irq_re_init);
5285         return 0;
5286 }
5287
5288 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
5289 {
5290         bnxt_init_cp_rings(bp);
5291         bnxt_init_rx_rings(bp);
5292         bnxt_init_tx_rings(bp);
5293         bnxt_init_ring_grps(bp, irq_re_init);
5294         bnxt_init_vnics(bp);
5295
5296         return bnxt_init_chip(bp, irq_re_init);
5297 }
5298
5299 static int bnxt_set_real_num_queues(struct bnxt *bp)
5300 {
5301         int rc;
5302         struct net_device *dev = bp->dev;
5303
5304         rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
5305                                           bp->tx_nr_rings_xdp);
5306         if (rc)
5307                 return rc;
5308
5309         rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
5310         if (rc)
5311                 return rc;
5312
5313 #ifdef CONFIG_RFS_ACCEL
5314         if (bp->flags & BNXT_FLAG_RFS)
5315                 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
5316 #endif
5317
5318         return rc;
5319 }
5320
5321 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5322                            bool shared)
5323 {
5324         int _rx = *rx, _tx = *tx;
5325
5326         if (shared) {
5327                 *rx = min_t(int, _rx, max);
5328                 *tx = min_t(int, _tx, max);
5329         } else {
5330                 if (max < 2)
5331                         return -ENOMEM;
5332
5333                 while (_rx + _tx > max) {
5334                         if (_rx > _tx && _rx > 1)
5335                                 _rx--;
5336                         else if (_tx > 1)
5337                                 _tx--;
5338                 }
5339                 *rx = _rx;
5340                 *tx = _tx;
5341         }
5342         return 0;
5343 }
5344
5345 static void bnxt_setup_msix(struct bnxt *bp)
5346 {
5347         const int len = sizeof(bp->irq_tbl[0].name);
5348         struct net_device *dev = bp->dev;
5349         int tcs, i;
5350
5351         tcs = netdev_get_num_tc(dev);
5352         if (tcs > 1) {
5353                 int i, off, count;
5354
5355                 for (i = 0; i < tcs; i++) {
5356                         count = bp->tx_nr_rings_per_tc;
5357                         off = i * count;
5358                         netdev_set_tc_queue(dev, i, count, off);
5359                 }
5360         }
5361
5362         for (i = 0; i < bp->cp_nr_rings; i++) {
5363                 char *attr;
5364
5365                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5366                         attr = "TxRx";
5367                 else if (i < bp->rx_nr_rings)
5368                         attr = "rx";
5369                 else
5370                         attr = "tx";
5371
5372                 snprintf(bp->irq_tbl[i].name, len, "%s-%s-%d", dev->name, attr,
5373                          i);
5374                 bp->irq_tbl[i].handler = bnxt_msix;
5375         }
5376 }
5377
5378 static void bnxt_setup_inta(struct bnxt *bp)
5379 {
5380         const int len = sizeof(bp->irq_tbl[0].name);
5381
5382         if (netdev_get_num_tc(bp->dev))
5383                 netdev_reset_tc(bp->dev);
5384
5385         snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
5386                  0);
5387         bp->irq_tbl[0].handler = bnxt_inta;
5388 }
5389
5390 static int bnxt_setup_int_mode(struct bnxt *bp)
5391 {
5392         int rc;
5393
5394         if (bp->flags & BNXT_FLAG_USING_MSIX)
5395                 bnxt_setup_msix(bp);
5396         else
5397                 bnxt_setup_inta(bp);
5398
5399         rc = bnxt_set_real_num_queues(bp);
5400         return rc;
5401 }
5402
5403 #ifdef CONFIG_RFS_ACCEL
5404 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
5405 {
5406 #if defined(CONFIG_BNXT_SRIOV)
5407         if (BNXT_VF(bp))
5408                 return bp->vf.max_rsscos_ctxs;
5409 #endif
5410         return bp->pf.max_rsscos_ctxs;
5411 }
5412
5413 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
5414 {
5415 #if defined(CONFIG_BNXT_SRIOV)
5416         if (BNXT_VF(bp))
5417                 return bp->vf.max_vnics;
5418 #endif
5419         return bp->pf.max_vnics;
5420 }
5421 #endif
5422
5423 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
5424 {
5425 #if defined(CONFIG_BNXT_SRIOV)
5426         if (BNXT_VF(bp))
5427                 return bp->vf.max_stat_ctxs;
5428 #endif
5429         return bp->pf.max_stat_ctxs;
5430 }
5431
5432 void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max)
5433 {
5434 #if defined(CONFIG_BNXT_SRIOV)
5435         if (BNXT_VF(bp))
5436                 bp->vf.max_stat_ctxs = max;
5437         else
5438 #endif
5439                 bp->pf.max_stat_ctxs = max;
5440 }
5441
5442 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
5443 {
5444 #if defined(CONFIG_BNXT_SRIOV)
5445         if (BNXT_VF(bp))
5446                 return bp->vf.max_cp_rings;
5447 #endif
5448         return bp->pf.max_cp_rings;
5449 }
5450
5451 void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max)
5452 {
5453 #if defined(CONFIG_BNXT_SRIOV)
5454         if (BNXT_VF(bp))
5455                 bp->vf.max_cp_rings = max;
5456         else
5457 #endif
5458                 bp->pf.max_cp_rings = max;
5459 }
5460
5461 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
5462 {
5463 #if defined(CONFIG_BNXT_SRIOV)
5464         if (BNXT_VF(bp))
5465                 return min_t(unsigned int, bp->vf.max_irqs,
5466                              bp->vf.max_cp_rings);
5467 #endif
5468         return min_t(unsigned int, bp->pf.max_irqs, bp->pf.max_cp_rings);
5469 }
5470
5471 void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
5472 {
5473 #if defined(CONFIG_BNXT_SRIOV)
5474         if (BNXT_VF(bp))
5475                 bp->vf.max_irqs = max_irqs;
5476         else
5477 #endif
5478                 bp->pf.max_irqs = max_irqs;
5479 }
5480
5481 static int bnxt_init_msix(struct bnxt *bp)
5482 {
5483         int i, total_vecs, rc = 0, min = 1;
5484         struct msix_entry *msix_ent;
5485
5486         total_vecs = bnxt_get_max_func_irqs(bp);
5487         msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
5488         if (!msix_ent)
5489                 return -ENOMEM;
5490
5491         for (i = 0; i < total_vecs; i++) {
5492                 msix_ent[i].entry = i;
5493                 msix_ent[i].vector = 0;
5494         }
5495
5496         if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
5497                 min = 2;
5498
5499         total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
5500         if (total_vecs < 0) {
5501                 rc = -ENODEV;
5502                 goto msix_setup_exit;
5503         }
5504
5505         bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
5506         if (bp->irq_tbl) {
5507                 for (i = 0; i < total_vecs; i++)
5508                         bp->irq_tbl[i].vector = msix_ent[i].vector;
5509
5510                 bp->total_irqs = total_vecs;
5511                 /* Trim rings based upon num of vectors allocated */
5512                 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
5513                                      total_vecs, min == 1);
5514                 if (rc)
5515                         goto msix_setup_exit;
5516
5517                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
5518                 bp->cp_nr_rings = (min == 1) ?
5519                                   max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
5520                                   bp->tx_nr_rings + bp->rx_nr_rings;
5521
5522         } else {
5523                 rc = -ENOMEM;
5524                 goto msix_setup_exit;
5525         }
5526         bp->flags |= BNXT_FLAG_USING_MSIX;
5527         kfree(msix_ent);
5528         return 0;
5529
5530 msix_setup_exit:
5531         netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
5532         kfree(bp->irq_tbl);
5533         bp->irq_tbl = NULL;
5534         pci_disable_msix(bp->pdev);
5535         kfree(msix_ent);
5536         return rc;
5537 }
5538
5539 static int bnxt_init_inta(struct bnxt *bp)
5540 {
5541         bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
5542         if (!bp->irq_tbl)
5543                 return -ENOMEM;
5544
5545         bp->total_irqs = 1;
5546         bp->rx_nr_rings = 1;
5547         bp->tx_nr_rings = 1;
5548         bp->cp_nr_rings = 1;
5549         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
5550         bp->flags |= BNXT_FLAG_SHARED_RINGS;
5551         bp->irq_tbl[0].vector = bp->pdev->irq;
5552         return 0;
5553 }
5554
5555 static int bnxt_init_int_mode(struct bnxt *bp)
5556 {
5557         int rc = 0;
5558
5559         if (bp->flags & BNXT_FLAG_MSIX_CAP)
5560                 rc = bnxt_init_msix(bp);
5561
5562         if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
5563                 /* fallback to INTA */
5564                 rc = bnxt_init_inta(bp);
5565         }
5566         return rc;
5567 }
5568
5569 static void bnxt_clear_int_mode(struct bnxt *bp)
5570 {
5571         if (bp->flags & BNXT_FLAG_USING_MSIX)
5572                 pci_disable_msix(bp->pdev);
5573
5574         kfree(bp->irq_tbl);
5575         bp->irq_tbl = NULL;
5576         bp->flags &= ~BNXT_FLAG_USING_MSIX;
5577 }
5578
5579 static void bnxt_free_irq(struct bnxt *bp)
5580 {
5581         struct bnxt_irq *irq;
5582         int i;
5583
5584 #ifdef CONFIG_RFS_ACCEL
5585         free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
5586         bp->dev->rx_cpu_rmap = NULL;
5587 #endif
5588         if (!bp->irq_tbl)
5589                 return;
5590
5591         for (i = 0; i < bp->cp_nr_rings; i++) {
5592                 irq = &bp->irq_tbl[i];
5593                 if (irq->requested) {
5594                         if (irq->have_cpumask) {
5595                                 irq_set_affinity_hint(irq->vector, NULL);
5596                                 free_cpumask_var(irq->cpu_mask);
5597                                 irq->have_cpumask = 0;
5598                         }
5599                         free_irq(irq->vector, bp->bnapi[i]);
5600                 }
5601
5602                 irq->requested = 0;
5603         }
5604 }
5605
5606 static int bnxt_request_irq(struct bnxt *bp)
5607 {
5608         int i, j, rc = 0;
5609         unsigned long flags = 0;
5610 #ifdef CONFIG_RFS_ACCEL
5611         struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap;
5612 #endif
5613
5614         if (!(bp->flags & BNXT_FLAG_USING_MSIX))
5615                 flags = IRQF_SHARED;
5616
5617         for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
5618                 struct bnxt_irq *irq = &bp->irq_tbl[i];
5619 #ifdef CONFIG_RFS_ACCEL
5620                 if (rmap && bp->bnapi[i]->rx_ring) {
5621                         rc = irq_cpu_rmap_add(rmap, irq->vector);
5622                         if (rc)
5623                                 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
5624                                             j);
5625                         j++;
5626                 }
5627 #endif
5628                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5629                                  bp->bnapi[i]);
5630                 if (rc)
5631                         break;
5632
5633                 irq->requested = 1;
5634
5635                 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
5636                         int numa_node = dev_to_node(&bp->pdev->dev);
5637
5638                         irq->have_cpumask = 1;
5639                         cpumask_set_cpu(cpumask_local_spread(i, numa_node),
5640                                         irq->cpu_mask);
5641                         rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
5642                         if (rc) {
5643                                 netdev_warn(bp->dev,
5644                                             "Set affinity failed, IRQ = %d\n",
5645                                             irq->vector);
5646                                 break;
5647                         }
5648                 }
5649         }
5650         return rc;
5651 }
5652
5653 static void bnxt_del_napi(struct bnxt *bp)
5654 {
5655         int i;
5656
5657         if (!bp->bnapi)
5658                 return;
5659
5660         for (i = 0; i < bp->cp_nr_rings; i++) {
5661                 struct bnxt_napi *bnapi = bp->bnapi[i];
5662
5663                 napi_hash_del(&bnapi->napi);
5664                 netif_napi_del(&bnapi->napi);
5665         }
5666         /* We called napi_hash_del() before netif_napi_del(), we need
5667          * to respect an RCU grace period before freeing napi structures.
5668          */
5669         synchronize_net();
5670 }
5671
5672 static void bnxt_init_napi(struct bnxt *bp)
5673 {
5674         int i;
5675         unsigned int cp_nr_rings = bp->cp_nr_rings;
5676         struct bnxt_napi *bnapi;
5677
5678         if (bp->flags & BNXT_FLAG_USING_MSIX) {
5679                 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5680                         cp_nr_rings--;
5681                 for (i = 0; i < cp_nr_rings; i++) {
5682                         bnapi = bp->bnapi[i];
5683                         netif_napi_add(bp->dev, &bnapi->napi,
5684                                        bnxt_poll, 64);
5685                 }
5686                 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
5687                         bnapi = bp->bnapi[cp_nr_rings];
5688                         netif_napi_add(bp->dev, &bnapi->napi,
5689                                        bnxt_poll_nitroa0, 64);
5690                 }
5691         } else {
5692                 bnapi = bp->bnapi[0];
5693                 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
5694         }
5695 }
5696
5697 static void bnxt_disable_napi(struct bnxt *bp)
5698 {
5699         int i;
5700
5701         if (!bp->bnapi)
5702                 return;
5703
5704         for (i = 0; i < bp->cp_nr_rings; i++)
5705                 napi_disable(&bp->bnapi[i]->napi);
5706 }
5707
5708 static void bnxt_enable_napi(struct bnxt *bp)
5709 {
5710         int i;
5711
5712         for (i = 0; i < bp->cp_nr_rings; i++) {
5713                 bp->bnapi[i]->in_reset = false;
5714                 napi_enable(&bp->bnapi[i]->napi);
5715         }
5716 }
5717
5718 void bnxt_tx_disable(struct bnxt *bp)
5719 {
5720         int i;
5721         struct bnxt_tx_ring_info *txr;
5722
5723         if (bp->tx_ring) {
5724                 for (i = 0; i < bp->tx_nr_rings; i++) {
5725                         txr = &bp->tx_ring[i];
5726                         txr->dev_state = BNXT_DEV_STATE_CLOSING;
5727                 }
5728         }
5729         /* Stop all TX queues */
5730         netif_tx_disable(bp->dev);
5731         netif_carrier_off(bp->dev);
5732 }
5733
5734 void bnxt_tx_enable(struct bnxt *bp)
5735 {
5736         int i;
5737         struct bnxt_tx_ring_info *txr;
5738
5739         for (i = 0; i < bp->tx_nr_rings; i++) {
5740                 txr = &bp->tx_ring[i];
5741                 txr->dev_state = 0;
5742         }
5743         netif_tx_wake_all_queues(bp->dev);
5744         if (bp->link_info.link_up)
5745                 netif_carrier_on(bp->dev);
5746 }
5747
5748 static void bnxt_report_link(struct bnxt *bp)
5749 {
5750         if (bp->link_info.link_up) {
5751                 const char *duplex;
5752                 const char *flow_ctrl;
5753                 u32 speed;
5754                 u16 fec;
5755
5756                 netif_carrier_on(bp->dev);
5757                 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
5758                         duplex = "full";
5759                 else
5760                         duplex = "half";
5761                 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
5762                         flow_ctrl = "ON - receive & transmit";
5763                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
5764                         flow_ctrl = "ON - transmit";
5765                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
5766                         flow_ctrl = "ON - receive";
5767                 else
5768                         flow_ctrl = "none";
5769                 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
5770                 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
5771                             speed, duplex, flow_ctrl);
5772                 if (bp->flags & BNXT_FLAG_EEE_CAP)
5773                         netdev_info(bp->dev, "EEE is %s\n",
5774                                     bp->eee.eee_active ? "active" :
5775                                                          "not active");
5776                 fec = bp->link_info.fec_cfg;
5777                 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
5778                         netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n",
5779                                     (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
5780                                     (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" :
5781                                      (fec & BNXT_FEC_ENC_RS) ? "RS" : "None");
5782         } else {
5783                 netif_carrier_off(bp->dev);
5784                 netdev_err(bp->dev, "NIC Link is Down\n");
5785         }
5786 }
5787
5788 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
5789 {
5790         int rc = 0;
5791         struct hwrm_port_phy_qcaps_input req = {0};
5792         struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5793         struct bnxt_link_info *link_info = &bp->link_info;
5794
5795         if (bp->hwrm_spec_code < 0x10201)
5796                 return 0;
5797
5798         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
5799
5800         mutex_lock(&bp->hwrm_cmd_lock);
5801         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5802         if (rc)
5803                 goto hwrm_phy_qcaps_exit;
5804
5805         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
5806                 struct ethtool_eee *eee = &bp->eee;
5807                 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
5808
5809                 bp->flags |= BNXT_FLAG_EEE_CAP;
5810                 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
5811                 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
5812                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
5813                 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
5814                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
5815         }
5816         if (resp->supported_speeds_auto_mode)
5817                 link_info->support_auto_speeds =
5818                         le16_to_cpu(resp->supported_speeds_auto_mode);
5819
5820         bp->port_count = resp->port_cnt;
5821
5822 hwrm_phy_qcaps_exit:
5823         mutex_unlock(&bp->hwrm_cmd_lock);
5824         return rc;
5825 }
5826
5827 static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
5828 {
5829         int rc = 0;
5830         struct bnxt_link_info *link_info = &bp->link_info;
5831         struct hwrm_port_phy_qcfg_input req = {0};
5832         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5833         u8 link_up = link_info->link_up;
5834         u16 diff;
5835
5836         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
5837
5838         mutex_lock(&bp->hwrm_cmd_lock);
5839         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5840         if (rc) {
5841                 mutex_unlock(&bp->hwrm_cmd_lock);
5842                 return rc;
5843         }
5844
5845         memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
5846         link_info->phy_link_status = resp->link;
5847         link_info->duplex = resp->duplex_cfg;
5848         if (bp->hwrm_spec_code >= 0x10800)
5849                 link_info->duplex = resp->duplex_state;
5850         link_info->pause = resp->pause;
5851         link_info->auto_mode = resp->auto_mode;
5852         link_info->auto_pause_setting = resp->auto_pause;
5853         link_info->lp_pause = resp->link_partner_adv_pause;
5854         link_info->force_pause_setting = resp->force_pause;
5855         link_info->duplex_setting = resp->duplex_cfg;
5856         if (link_info->phy_link_status == BNXT_LINK_LINK)
5857                 link_info->link_speed = le16_to_cpu(resp->link_speed);
5858         else
5859                 link_info->link_speed = 0;
5860         link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
5861         link_info->support_speeds = le16_to_cpu(resp->support_speeds);
5862         link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
5863         link_info->lp_auto_link_speeds =
5864                 le16_to_cpu(resp->link_partner_adv_speeds);
5865         link_info->preemphasis = le32_to_cpu(resp->preemphasis);
5866         link_info->phy_ver[0] = resp->phy_maj;
5867         link_info->phy_ver[1] = resp->phy_min;
5868         link_info->phy_ver[2] = resp->phy_bld;
5869         link_info->media_type = resp->media_type;
5870         link_info->phy_type = resp->phy_type;
5871         link_info->transceiver = resp->xcvr_pkg_type;
5872         link_info->phy_addr = resp->eee_config_phy_addr &
5873                               PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
5874         link_info->module_status = resp->module_status;
5875
5876         if (bp->flags & BNXT_FLAG_EEE_CAP) {
5877                 struct ethtool_eee *eee = &bp->eee;
5878                 u16 fw_speeds;
5879
5880                 eee->eee_active = 0;
5881                 if (resp->eee_config_phy_addr &
5882                     PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
5883                         eee->eee_active = 1;
5884                         fw_speeds = le16_to_cpu(
5885                                 resp->link_partner_adv_eee_link_speed_mask);
5886                         eee->lp_advertised =
5887                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
5888                 }
5889
5890                 /* Pull initial EEE config */
5891                 if (!chng_link_state) {
5892                         if (resp->eee_config_phy_addr &
5893                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
5894                                 eee->eee_enabled = 1;
5895
5896                         fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
5897                         eee->advertised =
5898                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
5899
5900                         if (resp->eee_config_phy_addr &
5901                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
5902                                 __le32 tmr;
5903
5904                                 eee->tx_lpi_enabled = 1;
5905                                 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
5906                                 eee->tx_lpi_timer = le32_to_cpu(tmr) &
5907                                         PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
5908                         }
5909                 }
5910         }
5911
5912         link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
5913         if (bp->hwrm_spec_code >= 0x10504)
5914                 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
5915
5916         /* TODO: need to add more logic to report VF link */
5917         if (chng_link_state) {
5918                 if (link_info->phy_link_status == BNXT_LINK_LINK)
5919                         link_info->link_up = 1;
5920                 else
5921                         link_info->link_up = 0;
5922                 if (link_up != link_info->link_up)
5923                         bnxt_report_link(bp);
5924         } else {
5925                 /* alwasy link down if not require to update link state */
5926                 link_info->link_up = 0;
5927         }
5928         mutex_unlock(&bp->hwrm_cmd_lock);
5929
5930         if (!BNXT_SINGLE_PF(bp))
5931                 return 0;
5932
5933         diff = link_info->support_auto_speeds ^ link_info->advertising;
5934         if ((link_info->support_auto_speeds | diff) !=
5935             link_info->support_auto_speeds) {
5936                 /* An advertised speed is no longer supported, so we need to
5937                  * update the advertisement settings.  Caller holds RTNL
5938                  * so we can modify link settings.
5939                  */
5940                 link_info->advertising = link_info->support_auto_speeds;
5941                 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
5942                         bnxt_hwrm_set_link_setting(bp, true, false);
5943         }
5944         return 0;
5945 }
5946
5947 static void bnxt_get_port_module_status(struct bnxt *bp)
5948 {
5949         struct bnxt_link_info *link_info = &bp->link_info;
5950         struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
5951         u8 module_status;
5952
5953         if (bnxt_update_link(bp, true))
5954                 return;
5955
5956         module_status = link_info->module_status;
5957         switch (module_status) {
5958         case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
5959         case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
5960         case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
5961                 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
5962                             bp->pf.port_id);
5963                 if (bp->hwrm_spec_code >= 0x10201) {
5964                         netdev_warn(bp->dev, "Module part number %s\n",
5965                                     resp->phy_vendor_partnumber);
5966                 }
5967                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
5968                         netdev_warn(bp->dev, "TX is disabled\n");
5969                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
5970                         netdev_warn(bp->dev, "SFP+ module is shutdown\n");
5971         }
5972 }
5973
5974 static void
5975 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
5976 {
5977         if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
5978                 if (bp->hwrm_spec_code >= 0x10201)
5979                         req->auto_pause =
5980                                 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
5981                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
5982                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
5983                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
5984                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
5985                 req->enables |=
5986                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
5987         } else {
5988                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
5989                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
5990                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
5991                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
5992                 req->enables |=
5993                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
5994                 if (bp->hwrm_spec_code >= 0x10201) {
5995                         req->auto_pause = req->force_pause;
5996                         req->enables |= cpu_to_le32(
5997                                 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
5998                 }
5999         }
6000 }
6001
6002 static void bnxt_hwrm_set_link_common(struct bnxt *bp,
6003                                       struct hwrm_port_phy_cfg_input *req)
6004 {
6005         u8 autoneg = bp->link_info.autoneg;
6006         u16 fw_link_speed = bp->link_info.req_link_speed;
6007         u16 advertising = bp->link_info.advertising;
6008
6009         if (autoneg & BNXT_AUTONEG_SPEED) {
6010                 req->auto_mode |=
6011                         PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
6012
6013                 req->enables |= cpu_to_le32(
6014                         PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
6015                 req->auto_link_speed_mask = cpu_to_le16(advertising);
6016
6017                 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
6018                 req->flags |=
6019                         cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
6020         } else {
6021                 req->force_link_speed = cpu_to_le16(fw_link_speed);
6022                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
6023         }
6024
6025         /* tell chimp that the setting takes effect immediately */
6026         req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
6027 }
6028
6029 int bnxt_hwrm_set_pause(struct bnxt *bp)
6030 {
6031         struct hwrm_port_phy_cfg_input req = {0};
6032         int rc;
6033
6034         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
6035         bnxt_hwrm_set_pause_common(bp, &req);
6036
6037         if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
6038             bp->link_info.force_link_chng)
6039                 bnxt_hwrm_set_link_common(bp, &req);
6040
6041         mutex_lock(&bp->hwrm_cmd_lock);
6042         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6043         if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
6044                 /* since changing of pause setting doesn't trigger any link
6045                  * change event, the driver needs to update the current pause
6046                  * result upon successfully return of the phy_cfg command
6047                  */
6048                 bp->link_info.pause =
6049                 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
6050                 bp->link_info.auto_pause_setting = 0;
6051                 if (!bp->link_info.force_link_chng)
6052                         bnxt_report_link(bp);
6053         }
6054         bp->link_info.force_link_chng = false;
6055         mutex_unlock(&bp->hwrm_cmd_lock);
6056         return rc;
6057 }
6058
6059 static void bnxt_hwrm_set_eee(struct bnxt *bp,
6060                               struct hwrm_port_phy_cfg_input *req)
6061 {
6062         struct ethtool_eee *eee = &bp->eee;
6063
6064         if (eee->eee_enabled) {
6065                 u16 eee_speeds;
6066                 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
6067
6068                 if (eee->tx_lpi_enabled)
6069                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
6070                 else
6071                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
6072
6073                 req->flags |= cpu_to_le32(flags);
6074                 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
6075                 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
6076                 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
6077         } else {
6078                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
6079         }
6080 }
6081
6082 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
6083 {
6084         struct hwrm_port_phy_cfg_input req = {0};
6085
6086         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
6087         if (set_pause)
6088                 bnxt_hwrm_set_pause_common(bp, &req);
6089
6090         bnxt_hwrm_set_link_common(bp, &req);
6091
6092         if (set_eee)
6093                 bnxt_hwrm_set_eee(bp, &req);
6094         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6095 }
6096
6097 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
6098 {
6099         struct hwrm_port_phy_cfg_input req = {0};
6100
6101         if (!BNXT_SINGLE_PF(bp))
6102                 return 0;
6103
6104         if (pci_num_vf(bp->pdev))
6105                 return 0;
6106
6107         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
6108         req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
6109         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6110 }
6111
6112 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
6113 {
6114         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6115         struct hwrm_port_led_qcaps_input req = {0};
6116         struct bnxt_pf_info *pf = &bp->pf;
6117         int rc;
6118
6119         if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
6120                 return 0;
6121
6122         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
6123         req.port_id = cpu_to_le16(pf->port_id);
6124         mutex_lock(&bp->hwrm_cmd_lock);
6125         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6126         if (rc) {
6127                 mutex_unlock(&bp->hwrm_cmd_lock);
6128                 return rc;
6129         }
6130         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
6131                 int i;
6132
6133                 bp->num_leds = resp->num_leds;
6134                 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
6135                                                  bp->num_leds);
6136                 for (i = 0; i < bp->num_leds; i++) {
6137                         struct bnxt_led_info *led = &bp->leds[i];
6138                         __le16 caps = led->led_state_caps;
6139
6140                         if (!led->led_group_id ||
6141                             !BNXT_LED_ALT_BLINK_CAP(caps)) {
6142                                 bp->num_leds = 0;
6143                                 break;
6144                         }
6145                 }
6146         }
6147         mutex_unlock(&bp->hwrm_cmd_lock);
6148         return 0;
6149 }
6150
6151 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
6152 {
6153         struct hwrm_wol_filter_alloc_input req = {0};
6154         struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
6155         int rc;
6156
6157         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
6158         req.port_id = cpu_to_le16(bp->pf.port_id);
6159         req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
6160         req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
6161         memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
6162         mutex_lock(&bp->hwrm_cmd_lock);
6163         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6164         if (!rc)
6165                 bp->wol_filter_id = resp->wol_filter_id;
6166         mutex_unlock(&bp->hwrm_cmd_lock);
6167         return rc;
6168 }
6169
6170 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
6171 {
6172         struct hwrm_wol_filter_free_input req = {0};
6173         int rc;
6174
6175         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
6176         req.port_id = cpu_to_le16(bp->pf.port_id);
6177         req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
6178         req.wol_filter_id = bp->wol_filter_id;
6179         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6180         return rc;
6181 }
6182
6183 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
6184 {
6185         struct hwrm_wol_filter_qcfg_input req = {0};
6186         struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6187         u16 next_handle = 0;
6188         int rc;
6189
6190         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
6191         req.port_id = cpu_to_le16(bp->pf.port_id);
6192         req.handle = cpu_to_le16(handle);
6193         mutex_lock(&bp->hwrm_cmd_lock);
6194         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6195         if (!rc) {
6196                 next_handle = le16_to_cpu(resp->next_handle);
6197                 if (next_handle != 0) {
6198                         if (resp->wol_type ==
6199                             WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
6200                                 bp->wol = 1;
6201                                 bp->wol_filter_id = resp->wol_filter_id;
6202                         }
6203                 }
6204         }
6205         mutex_unlock(&bp->hwrm_cmd_lock);
6206         return next_handle;
6207 }
6208
6209 static void bnxt_get_wol_settings(struct bnxt *bp)
6210 {
6211         u16 handle = 0;
6212
6213         if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
6214                 return;
6215
6216         do {
6217                 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
6218         } while (handle && handle != 0xffff);
6219 }
6220
6221 static bool bnxt_eee_config_ok(struct bnxt *bp)
6222 {
6223         struct ethtool_eee *eee = &bp->eee;
6224         struct bnxt_link_info *link_info = &bp->link_info;
6225
6226         if (!(bp->flags & BNXT_FLAG_EEE_CAP))
6227                 return true;
6228
6229         if (eee->eee_enabled) {
6230                 u32 advertising =
6231                         _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
6232
6233                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
6234                         eee->eee_enabled = 0;
6235                         return false;
6236                 }
6237                 if (eee->advertised & ~advertising) {
6238                         eee->advertised = advertising & eee->supported;
6239                         return false;
6240                 }
6241         }
6242         return true;
6243 }
6244
6245 static int bnxt_update_phy_setting(struct bnxt *bp)
6246 {
6247         int rc;
6248         bool update_link = false;
6249         bool update_pause = false;
6250         bool update_eee = false;
6251         struct bnxt_link_info *link_info = &bp->link_info;
6252
6253         rc = bnxt_update_link(bp, true);
6254         if (rc) {
6255                 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
6256                            rc);
6257                 return rc;
6258         }
6259         if (!BNXT_SINGLE_PF(bp))
6260                 return 0;
6261
6262         if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
6263             (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
6264             link_info->req_flow_ctrl)
6265                 update_pause = true;
6266         if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
6267             link_info->force_pause_setting != link_info->req_flow_ctrl)
6268                 update_pause = true;
6269         if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
6270                 if (BNXT_AUTO_MODE(link_info->auto_mode))
6271                         update_link = true;
6272                 if (link_info->req_link_speed != link_info->force_link_speed)
6273                         update_link = true;
6274                 if (link_info->req_duplex != link_info->duplex_setting)
6275                         update_link = true;
6276         } else {
6277                 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
6278                         update_link = true;
6279                 if (link_info->advertising != link_info->auto_link_speeds)
6280                         update_link = true;
6281         }
6282
6283         /* The last close may have shutdown the link, so need to call
6284          * PHY_CFG to bring it back up.
6285          */
6286         if (!netif_carrier_ok(bp->dev))
6287                 update_link = true;
6288
6289         if (!bnxt_eee_config_ok(bp))
6290                 update_eee = true;
6291
6292         if (update_link)
6293                 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
6294         else if (update_pause)
6295                 rc = bnxt_hwrm_set_pause(bp);
6296         if (rc) {
6297                 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
6298                            rc);
6299                 return rc;
6300         }
6301
6302         return rc;
6303 }
6304
6305 /* Common routine to pre-map certain register block to different GRC window.
6306  * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
6307  * in PF and 3 windows in VF that can be customized to map in different
6308  * register blocks.
6309  */
6310 static void bnxt_preset_reg_win(struct bnxt *bp)
6311 {
6312         if (BNXT_PF(bp)) {
6313                 /* CAG registers map to GRC window #4 */
6314                 writel(BNXT_CAG_REG_BASE,
6315                        bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
6316         }
6317 }
6318
6319 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
6320 {
6321         int rc = 0;
6322
6323         bnxt_preset_reg_win(bp);
6324         netif_carrier_off(bp->dev);
6325         if (irq_re_init) {
6326                 rc = bnxt_setup_int_mode(bp);
6327                 if (rc) {
6328                         netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
6329                                    rc);
6330                         return rc;
6331                 }
6332         }
6333         if ((bp->flags & BNXT_FLAG_RFS) &&
6334             !(bp->flags & BNXT_FLAG_USING_MSIX)) {
6335                 /* disable RFS if falling back to INTA */
6336                 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
6337                 bp->flags &= ~BNXT_FLAG_RFS;
6338         }
6339
6340         rc = bnxt_alloc_mem(bp, irq_re_init);
6341         if (rc) {
6342                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
6343                 goto open_err_free_mem;
6344         }
6345
6346         if (irq_re_init) {
6347                 bnxt_init_napi(bp);
6348                 rc = bnxt_request_irq(bp);
6349                 if (rc) {
6350                         netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
6351                         goto open_err_irq;
6352                 }
6353         }
6354
6355         bnxt_enable_napi(bp);
6356
6357         rc = bnxt_init_nic(bp, irq_re_init);
6358         if (rc) {
6359                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
6360                 goto open_err;
6361         }
6362
6363         if (link_re_init) {
6364                 mutex_lock(&bp->link_lock);
6365                 rc = bnxt_update_phy_setting(bp);
6366                 mutex_unlock(&bp->link_lock);
6367                 if (rc)
6368                         netdev_warn(bp->dev, "failed to update phy settings\n");
6369         }
6370
6371         if (irq_re_init)
6372                 udp_tunnel_get_rx_info(bp->dev);
6373
6374         set_bit(BNXT_STATE_OPEN, &bp->state);
6375         bnxt_enable_int(bp);
6376         /* Enable TX queues */
6377         bnxt_tx_enable(bp);
6378         mod_timer(&bp->timer, jiffies + bp->current_interval);
6379         /* Poll link status and check for SFP+ module status */
6380         bnxt_get_port_module_status(bp);
6381
6382         /* VF-reps may need to be re-opened after the PF is re-opened */
6383         if (BNXT_PF(bp))
6384                 bnxt_vf_reps_open(bp);
6385         return 0;
6386
6387 open_err:
6388         bnxt_disable_napi(bp);
6389
6390 open_err_irq:
6391         bnxt_del_napi(bp);
6392
6393 open_err_free_mem:
6394         bnxt_free_skbs(bp);
6395         bnxt_free_irq(bp);
6396         bnxt_free_mem(bp, true);
6397         return rc;
6398 }
6399
6400 /* rtnl_lock held */
6401 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
6402 {
6403         int rc = 0;
6404
6405         rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
6406         if (rc) {
6407                 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
6408                 dev_close(bp->dev);
6409         }
6410         return rc;
6411 }
6412
6413 /* rtnl_lock held, open the NIC half way by allocating all resources, but
6414  * NAPI, IRQ, and TX are not enabled.  This is mainly used for offline
6415  * self tests.
6416  */
6417 int bnxt_half_open_nic(struct bnxt *bp)
6418 {
6419         int rc = 0;
6420
6421         rc = bnxt_alloc_mem(bp, false);
6422         if (rc) {
6423                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
6424                 goto half_open_err;
6425         }
6426         rc = bnxt_init_nic(bp, false);
6427         if (rc) {
6428                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
6429                 goto half_open_err;
6430         }
6431         return 0;
6432
6433 half_open_err:
6434         bnxt_free_skbs(bp);
6435         bnxt_free_mem(bp, false);
6436         dev_close(bp->dev);
6437         return rc;
6438 }
6439
6440 /* rtnl_lock held, this call can only be made after a previous successful
6441  * call to bnxt_half_open_nic().
6442  */
6443 void bnxt_half_close_nic(struct bnxt *bp)
6444 {
6445         bnxt_hwrm_resource_free(bp, false, false);
6446         bnxt_free_skbs(bp);
6447         bnxt_free_mem(bp, false);
6448 }
6449
6450 static int bnxt_open(struct net_device *dev)
6451 {
6452         struct bnxt *bp = netdev_priv(dev);
6453
6454         return __bnxt_open_nic(bp, true, true);
6455 }
6456
6457 static bool bnxt_drv_busy(struct bnxt *bp)
6458 {
6459         return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
6460                 test_bit(BNXT_STATE_READ_STATS, &bp->state));
6461 }
6462
6463 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
6464 {
6465         int rc = 0;
6466
6467 #ifdef CONFIG_BNXT_SRIOV
6468         if (bp->sriov_cfg) {
6469                 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
6470                                                       !bp->sriov_cfg,
6471                                                       BNXT_SRIOV_CFG_WAIT_TMO);
6472                 if (rc)
6473                         netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
6474         }
6475
6476         /* Close the VF-reps before closing PF */
6477         if (BNXT_PF(bp))
6478                 bnxt_vf_reps_close(bp);
6479 #endif
6480         /* Change device state to avoid TX queue wake up's */
6481         bnxt_tx_disable(bp);
6482
6483         clear_bit(BNXT_STATE_OPEN, &bp->state);
6484         smp_mb__after_atomic();
6485         while (bnxt_drv_busy(bp))
6486                 msleep(20);
6487
6488         /* Flush rings and and disable interrupts */
6489         bnxt_shutdown_nic(bp, irq_re_init);
6490
6491         /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
6492
6493         bnxt_disable_napi(bp);
6494         del_timer_sync(&bp->timer);
6495         bnxt_free_skbs(bp);
6496
6497         if (irq_re_init) {
6498                 bnxt_free_irq(bp);
6499                 bnxt_del_napi(bp);
6500         }
6501         bnxt_free_mem(bp, irq_re_init);
6502         return rc;
6503 }
6504
6505 static int bnxt_close(struct net_device *dev)
6506 {
6507         struct bnxt *bp = netdev_priv(dev);
6508
6509         bnxt_close_nic(bp, true, true);
6510         bnxt_hwrm_shutdown_link(bp);
6511         return 0;
6512 }
6513
6514 /* rtnl_lock held */
6515 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6516 {
6517         switch (cmd) {
6518         case SIOCGMIIPHY:
6519                 /* fallthru */
6520         case SIOCGMIIREG: {
6521                 if (!netif_running(dev))
6522                         return -EAGAIN;
6523
6524                 return 0;
6525         }
6526
6527         case SIOCSMIIREG:
6528                 if (!netif_running(dev))
6529                         return -EAGAIN;
6530
6531                 return 0;
6532
6533         default:
6534                 /* do nothing */
6535                 break;
6536         }
6537         return -EOPNOTSUPP;
6538 }
6539
6540 static void
6541 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6542 {
6543         u32 i;
6544         struct bnxt *bp = netdev_priv(dev);
6545
6546         set_bit(BNXT_STATE_READ_STATS, &bp->state);
6547         /* Make sure bnxt_close_nic() sees that we are reading stats before
6548          * we check the BNXT_STATE_OPEN flag.
6549          */
6550         smp_mb__after_atomic();
6551         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
6552                 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
6553                 return;
6554         }
6555
6556         /* TODO check if we need to synchronize with bnxt_close path */
6557         for (i = 0; i < bp->cp_nr_rings; i++) {
6558                 struct bnxt_napi *bnapi = bp->bnapi[i];
6559                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6560                 struct ctx_hw_stats *hw_stats = cpr->hw_stats;
6561
6562                 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
6563                 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
6564                 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
6565
6566                 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
6567                 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
6568                 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
6569
6570                 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
6571                 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
6572                 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
6573
6574                 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
6575                 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
6576                 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
6577
6578                 stats->rx_missed_errors +=
6579                         le64_to_cpu(hw_stats->rx_discard_pkts);
6580
6581                 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
6582
6583                 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
6584         }
6585
6586         if (bp->flags & BNXT_FLAG_PORT_STATS) {
6587                 struct rx_port_stats *rx = bp->hw_rx_port_stats;
6588                 struct tx_port_stats *tx = bp->hw_tx_port_stats;
6589
6590                 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
6591                 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
6592                 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
6593                                           le64_to_cpu(rx->rx_ovrsz_frames) +
6594                                           le64_to_cpu(rx->rx_runt_frames);
6595                 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
6596                                    le64_to_cpu(rx->rx_jbr_frames);
6597                 stats->collisions = le64_to_cpu(tx->tx_total_collisions);
6598                 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
6599                 stats->tx_errors = le64_to_cpu(tx->tx_err);
6600         }
6601         clear_bit(BNXT_STATE_READ_STATS, &bp->state);
6602 }
6603
6604 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
6605 {
6606         struct net_device *dev = bp->dev;
6607         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
6608         struct netdev_hw_addr *ha;
6609         u8 *haddr;
6610         int mc_count = 0;
6611         bool update = false;
6612         int off = 0;
6613
6614         netdev_for_each_mc_addr(ha, dev) {
6615                 if (mc_count >= BNXT_MAX_MC_ADDRS) {
6616                         *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
6617                         vnic->mc_list_count = 0;
6618                         return false;
6619                 }
6620                 haddr = ha->addr;
6621                 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
6622                         memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
6623                         update = true;
6624                 }
6625                 off += ETH_ALEN;
6626                 mc_count++;
6627         }
6628         if (mc_count)
6629                 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
6630
6631         if (mc_count != vnic->mc_list_count) {
6632                 vnic->mc_list_count = mc_count;
6633                 update = true;
6634         }
6635         return update;
6636 }
6637
6638 static bool bnxt_uc_list_updated(struct bnxt *bp)
6639 {
6640         struct net_device *dev = bp->dev;
6641         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
6642         struct netdev_hw_addr *ha;
6643         int off = 0;
6644
6645         if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
6646                 return true;
6647
6648         netdev_for_each_uc_addr(ha, dev) {
6649                 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
6650                         return true;
6651
6652                 off += ETH_ALEN;
6653         }
6654         return false;
6655 }
6656
6657 static void bnxt_set_rx_mode(struct net_device *dev)
6658 {
6659         struct bnxt *bp = netdev_priv(dev);
6660         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
6661         u32 mask = vnic->rx_mask;
6662         bool mc_update = false;
6663         bool uc_update;
6664
6665         if (!netif_running(dev))
6666                 return;
6667
6668         mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
6669                   CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
6670                   CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
6671
6672         if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
6673                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
6674
6675         uc_update = bnxt_uc_list_updated(bp);
6676
6677         if (dev->flags & IFF_ALLMULTI) {
6678                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
6679                 vnic->mc_list_count = 0;
6680         } else {
6681                 mc_update = bnxt_mc_list_updated(bp, &mask);
6682         }
6683
6684         if (mask != vnic->rx_mask || uc_update || mc_update) {
6685                 vnic->rx_mask = mask;
6686
6687                 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
6688                 bnxt_queue_sp_work(bp);
6689         }
6690 }
6691
6692 static int bnxt_cfg_rx_mode(struct bnxt *bp)
6693 {
6694         struct net_device *dev = bp->dev;
6695         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
6696         struct netdev_hw_addr *ha;
6697         int i, off = 0, rc;
6698         bool uc_update;
6699
6700         netif_addr_lock_bh(dev);
6701         uc_update = bnxt_uc_list_updated(bp);
6702         netif_addr_unlock_bh(dev);
6703
6704         if (!uc_update)
6705                 goto skip_uc;
6706
6707         mutex_lock(&bp->hwrm_cmd_lock);
6708         for (i = 1; i < vnic->uc_filter_count; i++) {
6709                 struct hwrm_cfa_l2_filter_free_input req = {0};
6710
6711                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
6712                                        -1);
6713
6714                 req.l2_filter_id = vnic->fw_l2_filter_id[i];
6715
6716                 rc = _hwrm_send_message(bp, &req, sizeof(req),
6717                                         HWRM_CMD_TIMEOUT);
6718         }
6719         mutex_unlock(&bp->hwrm_cmd_lock);
6720
6721         vnic->uc_filter_count = 1;
6722
6723         netif_addr_lock_bh(dev);
6724         if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
6725                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
6726         } else {
6727                 netdev_for_each_uc_addr(ha, dev) {
6728                         memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
6729                         off += ETH_ALEN;
6730                         vnic->uc_filter_count++;
6731                 }
6732         }
6733         netif_addr_unlock_bh(dev);
6734
6735         for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
6736                 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
6737                 if (rc) {
6738                         netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
6739                                    rc);
6740                         vnic->uc_filter_count = i;
6741                         return rc;
6742                 }
6743         }
6744
6745 skip_uc:
6746         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
6747         if (rc)
6748                 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
6749                            rc);
6750
6751         return rc;
6752 }
6753
6754 /* If the chip and firmware supports RFS */
6755 static bool bnxt_rfs_supported(struct bnxt *bp)
6756 {
6757         if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
6758                 return true;
6759         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
6760                 return true;
6761         return false;
6762 }
6763
6764 /* If runtime conditions support RFS */
6765 static bool bnxt_rfs_capable(struct bnxt *bp)
6766 {
6767 #ifdef CONFIG_RFS_ACCEL
6768         int vnics, max_vnics, max_rss_ctxs;
6769
6770         if (!(bp->flags & BNXT_FLAG_MSIX_CAP))
6771                 return false;
6772
6773         vnics = 1 + bp->rx_nr_rings;
6774         max_vnics = bnxt_get_max_func_vnics(bp);
6775         max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
6776
6777         /* RSS contexts not a limiting factor */
6778         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
6779                 max_rss_ctxs = max_vnics;
6780         if (vnics > max_vnics || vnics > max_rss_ctxs) {
6781                 netdev_warn(bp->dev,
6782                             "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
6783                             min(max_rss_ctxs - 1, max_vnics - 1));
6784                 return false;
6785         }
6786
6787         return true;
6788 #else
6789         return false;
6790 #endif
6791 }
6792
6793 static netdev_features_t bnxt_fix_features(struct net_device *dev,
6794                                            netdev_features_t features)
6795 {
6796         struct bnxt *bp = netdev_priv(dev);
6797
6798         if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
6799                 features &= ~NETIF_F_NTUPLE;
6800
6801         /* Both CTAG and STAG VLAN accelaration on the RX side have to be
6802          * turned on or off together.
6803          */
6804         if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
6805             (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
6806                 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
6807                         features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
6808                                       NETIF_F_HW_VLAN_STAG_RX);
6809                 else
6810                         features |= NETIF_F_HW_VLAN_CTAG_RX |
6811                                     NETIF_F_HW_VLAN_STAG_RX;
6812         }
6813 #ifdef CONFIG_BNXT_SRIOV
6814         if (BNXT_VF(bp)) {
6815                 if (bp->vf.vlan) {
6816                         features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
6817                                       NETIF_F_HW_VLAN_STAG_RX);
6818                 }
6819         }
6820 #endif
6821         return features;
6822 }
6823
6824 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
6825 {
6826         struct bnxt *bp = netdev_priv(dev);
6827         u32 flags = bp->flags;
6828         u32 changes;
6829         int rc = 0;
6830         bool re_init = false;
6831         bool update_tpa = false;
6832
6833         flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
6834         if ((features & NETIF_F_GRO) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
6835                 flags |= BNXT_FLAG_GRO;
6836         if (features & NETIF_F_LRO)
6837                 flags |= BNXT_FLAG_LRO;
6838
6839         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
6840                 flags &= ~BNXT_FLAG_TPA;
6841
6842         if (features & NETIF_F_HW_VLAN_CTAG_RX)
6843                 flags |= BNXT_FLAG_STRIP_VLAN;
6844
6845         if (features & NETIF_F_NTUPLE)
6846                 flags |= BNXT_FLAG_RFS;
6847
6848         changes = flags ^ bp->flags;
6849         if (changes & BNXT_FLAG_TPA) {
6850                 update_tpa = true;
6851                 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
6852                     (flags & BNXT_FLAG_TPA) == 0)
6853                         re_init = true;
6854         }
6855
6856         if (changes & ~BNXT_FLAG_TPA)
6857                 re_init = true;
6858
6859         if (flags != bp->flags) {
6860                 u32 old_flags = bp->flags;
6861
6862                 bp->flags = flags;
6863
6864                 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
6865                         if (update_tpa)
6866                                 bnxt_set_ring_params(bp);
6867                         return rc;
6868                 }
6869
6870                 if (re_init) {
6871                         bnxt_close_nic(bp, false, false);
6872                         if (update_tpa)
6873                                 bnxt_set_ring_params(bp);
6874
6875                         return bnxt_open_nic(bp, false, false);
6876                 }
6877                 if (update_tpa) {
6878                         rc = bnxt_set_tpa(bp,
6879                                           (flags & BNXT_FLAG_TPA) ?
6880                                           true : false);
6881                         if (rc)
6882                                 bp->flags = old_flags;
6883                 }
6884         }
6885         return rc;
6886 }
6887
6888 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
6889 {
6890         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
6891         int i = bnapi->index;
6892
6893         if (!txr)
6894                 return;
6895
6896         netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
6897                     i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
6898                     txr->tx_cons);
6899 }
6900
6901 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
6902 {
6903         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
6904         int i = bnapi->index;
6905
6906         if (!rxr)
6907                 return;
6908
6909         netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
6910                     i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
6911                     rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
6912                     rxr->rx_sw_agg_prod);
6913 }
6914
6915 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
6916 {
6917         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6918         int i = bnapi->index;
6919
6920         netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
6921                     i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
6922 }
6923
6924 static void bnxt_dbg_dump_states(struct bnxt *bp)
6925 {
6926         int i;
6927         struct bnxt_napi *bnapi;
6928
6929         for (i = 0; i < bp->cp_nr_rings; i++) {
6930                 bnapi = bp->bnapi[i];
6931                 if (netif_msg_drv(bp)) {
6932                         bnxt_dump_tx_sw_state(bnapi);
6933                         bnxt_dump_rx_sw_state(bnapi);
6934                         bnxt_dump_cp_sw_state(bnapi);
6935                 }
6936         }
6937 }
6938
6939 static void bnxt_reset_task(struct bnxt *bp, bool silent)
6940 {
6941         if (!silent)
6942                 bnxt_dbg_dump_states(bp);
6943         if (netif_running(bp->dev)) {
6944                 int rc;
6945
6946                 if (!silent)
6947                         bnxt_ulp_stop(bp);
6948                 bnxt_close_nic(bp, false, false);
6949                 rc = bnxt_open_nic(bp, false, false);
6950                 if (!silent && !rc)
6951                         bnxt_ulp_start(bp);
6952         }
6953 }
6954
6955 static void bnxt_tx_timeout(struct net_device *dev)
6956 {
6957         struct bnxt *bp = netdev_priv(dev);
6958
6959         netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
6960         set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
6961         bnxt_queue_sp_work(bp);
6962 }
6963
6964 #ifdef CONFIG_NET_POLL_CONTROLLER
6965 static void bnxt_poll_controller(struct net_device *dev)
6966 {
6967         struct bnxt *bp = netdev_priv(dev);
6968         int i;
6969
6970         /* Only process tx rings/combined rings in netpoll mode. */
6971         for (i = 0; i < bp->tx_nr_rings; i++) {
6972                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
6973
6974                 napi_schedule(&txr->bnapi->napi);
6975         }
6976 }
6977 #endif
6978
6979 static void bnxt_timer(unsigned long data)
6980 {
6981         struct bnxt *bp = (struct bnxt *)data;
6982         struct net_device *dev = bp->dev;
6983
6984         if (!netif_running(dev))
6985                 return;
6986
6987         if (atomic_read(&bp->intr_sem) != 0)
6988                 goto bnxt_restart_timer;
6989
6990         if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
6991             bp->stats_coal_ticks) {
6992                 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
6993                 bnxt_queue_sp_work(bp);
6994         }
6995 bnxt_restart_timer:
6996         mod_timer(&bp->timer, jiffies + bp->current_interval);
6997 }
6998
6999 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
7000 {
7001         /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
7002          * set.  If the device is being closed, bnxt_close() may be holding
7003          * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear.  So we
7004          * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
7005          */
7006         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
7007         rtnl_lock();
7008 }
7009
7010 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
7011 {
7012         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
7013         rtnl_unlock();
7014 }
7015
7016 /* Only called from bnxt_sp_task() */
7017 static void bnxt_reset(struct bnxt *bp, bool silent)
7018 {
7019         bnxt_rtnl_lock_sp(bp);
7020         if (test_bit(BNXT_STATE_OPEN, &bp->state))
7021                 bnxt_reset_task(bp, silent);
7022         bnxt_rtnl_unlock_sp(bp);
7023 }
7024
7025 static void bnxt_cfg_ntp_filters(struct bnxt *);
7026
7027 static void bnxt_sp_task(struct work_struct *work)
7028 {
7029         struct bnxt *bp = container_of(work, struct bnxt, sp_task);
7030
7031         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
7032         smp_mb__after_atomic();
7033         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
7034                 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
7035                 return;
7036         }
7037
7038         if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
7039                 bnxt_cfg_rx_mode(bp);
7040
7041         if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
7042                 bnxt_cfg_ntp_filters(bp);
7043         if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
7044                 bnxt_hwrm_exec_fwd_req(bp);
7045         if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
7046                 bnxt_hwrm_tunnel_dst_port_alloc(
7047                         bp, bp->vxlan_port,
7048                         TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
7049         }
7050         if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
7051                 bnxt_hwrm_tunnel_dst_port_free(
7052                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
7053         }
7054         if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
7055                 bnxt_hwrm_tunnel_dst_port_alloc(
7056                         bp, bp->nge_port,
7057                         TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
7058         }
7059         if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
7060                 bnxt_hwrm_tunnel_dst_port_free(
7061                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
7062         }
7063         if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
7064                 bnxt_hwrm_port_qstats(bp);
7065
7066         if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
7067                 int rc;
7068
7069                 mutex_lock(&bp->link_lock);
7070                 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
7071                                        &bp->sp_event))
7072                         bnxt_hwrm_phy_qcaps(bp);
7073
7074                 rc = bnxt_update_link(bp, true);
7075                 mutex_unlock(&bp->link_lock);
7076                 if (rc)
7077                         netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
7078                                    rc);
7079         }
7080         if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
7081                 mutex_lock(&bp->link_lock);
7082                 bnxt_get_port_module_status(bp);
7083                 mutex_unlock(&bp->link_lock);
7084         }
7085         /* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
7086          * must be the last functions to be called before exiting.
7087          */
7088         if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
7089                 bnxt_reset(bp, false);
7090
7091         if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
7092                 bnxt_reset(bp, true);
7093
7094         smp_mb__before_atomic();
7095         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
7096 }
7097
7098 /* Under rtnl_lock */
7099 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
7100                      int tx_xdp)
7101 {
7102         int max_rx, max_tx, tx_sets = 1;
7103         int tx_rings_needed;
7104         int rc;
7105
7106         if (tcs)
7107                 tx_sets = tcs;
7108
7109         rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
7110         if (rc)
7111                 return rc;
7112
7113         if (max_rx < rx)
7114                 return -ENOMEM;
7115
7116         tx_rings_needed = tx * tx_sets + tx_xdp;
7117         if (max_tx < tx_rings_needed)
7118                 return -ENOMEM;
7119
7120         return bnxt_hwrm_check_tx_rings(bp, tx_rings_needed);
7121 }
7122
7123 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
7124 {
7125         if (bp->bar2) {
7126                 pci_iounmap(pdev, bp->bar2);
7127                 bp->bar2 = NULL;
7128         }
7129
7130         if (bp->bar1) {
7131                 pci_iounmap(pdev, bp->bar1);
7132                 bp->bar1 = NULL;
7133         }
7134
7135         if (bp->bar0) {
7136                 pci_iounmap(pdev, bp->bar0);
7137                 bp->bar0 = NULL;
7138         }
7139 }
7140
7141 static void bnxt_cleanup_pci(struct bnxt *bp)
7142 {
7143         bnxt_unmap_bars(bp, bp->pdev);
7144         pci_release_regions(bp->pdev);
7145         pci_disable_device(bp->pdev);
7146 }
7147
7148 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
7149 {
7150         int rc;
7151         struct bnxt *bp = netdev_priv(dev);
7152
7153         SET_NETDEV_DEV(dev, &pdev->dev);
7154
7155         /* enable device (incl. PCI PM wakeup), and bus-mastering */
7156         rc = pci_enable_device(pdev);
7157         if (rc) {
7158                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
7159                 goto init_err;
7160         }
7161
7162         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7163                 dev_err(&pdev->dev,
7164                         "Cannot find PCI device base address, aborting\n");
7165                 rc = -ENODEV;
7166                 goto init_err_disable;
7167         }
7168
7169         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7170         if (rc) {
7171                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
7172                 goto init_err_disable;
7173         }
7174
7175         if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
7176             dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
7177                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
7178                 goto init_err_disable;
7179         }
7180
7181         pci_set_master(pdev);
7182
7183         bp->dev = dev;
7184         bp->pdev = pdev;
7185
7186         bp->bar0 = pci_ioremap_bar(pdev, 0);
7187         if (!bp->bar0) {
7188                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
7189                 rc = -ENOMEM;
7190                 goto init_err_release;
7191         }
7192
7193         bp->bar1 = pci_ioremap_bar(pdev, 2);
7194         if (!bp->bar1) {
7195                 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
7196                 rc = -ENOMEM;
7197                 goto init_err_release;
7198         }
7199
7200         bp->bar2 = pci_ioremap_bar(pdev, 4);
7201         if (!bp->bar2) {
7202                 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
7203                 rc = -ENOMEM;
7204                 goto init_err_release;
7205         }
7206
7207         pci_enable_pcie_error_reporting(pdev);
7208
7209         INIT_WORK(&bp->sp_task, bnxt_sp_task);
7210
7211         spin_lock_init(&bp->ntp_fltr_lock);
7212
7213         bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
7214         bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
7215
7216         /* tick values in micro seconds */
7217         bp->rx_coal_ticks = 12;
7218         bp->rx_coal_bufs = 30;
7219         bp->rx_coal_ticks_irq = 1;
7220         bp->rx_coal_bufs_irq = 2;
7221
7222         bp->tx_coal_ticks = 25;
7223         bp->tx_coal_bufs = 30;
7224         bp->tx_coal_ticks_irq = 2;
7225         bp->tx_coal_bufs_irq = 2;
7226
7227         bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
7228
7229         init_timer(&bp->timer);
7230         bp->timer.data = (unsigned long)bp;
7231         bp->timer.function = bnxt_timer;
7232         bp->current_interval = BNXT_TIMER_INTERVAL;
7233
7234         clear_bit(BNXT_STATE_OPEN, &bp->state);
7235         return 0;
7236
7237 init_err_release:
7238         bnxt_unmap_bars(bp, pdev);
7239         pci_release_regions(pdev);
7240
7241 init_err_disable:
7242         pci_disable_device(pdev);
7243
7244 init_err:
7245         return rc;
7246 }
7247
7248 /* rtnl_lock held */
7249 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
7250 {
7251         struct sockaddr *addr = p;
7252         struct bnxt *bp = netdev_priv(dev);
7253         int rc = 0;
7254
7255         if (!is_valid_ether_addr(addr->sa_data))
7256                 return -EADDRNOTAVAIL;
7257
7258         rc = bnxt_approve_mac(bp, addr->sa_data);
7259         if (rc)
7260                 return rc;
7261
7262         if (ether_addr_equal(addr->sa_data, dev->dev_addr))
7263                 return 0;
7264
7265         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7266         if (netif_running(dev)) {
7267                 bnxt_close_nic(bp, false, false);
7268                 rc = bnxt_open_nic(bp, false, false);
7269         }
7270
7271         return rc;
7272 }
7273
7274 /* rtnl_lock held */
7275 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
7276 {
7277         struct bnxt *bp = netdev_priv(dev);
7278
7279         if (netif_running(dev))
7280                 bnxt_close_nic(bp, false, false);
7281
7282         dev->mtu = new_mtu;
7283         bnxt_set_ring_params(bp);
7284
7285         if (netif_running(dev))
7286                 return bnxt_open_nic(bp, false, false);
7287
7288         return 0;
7289 }
7290
7291 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
7292 {
7293         struct bnxt *bp = netdev_priv(dev);
7294         bool sh = false;
7295         int rc;
7296
7297         if (tc > bp->max_tc) {
7298                 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
7299                            tc, bp->max_tc);
7300                 return -EINVAL;
7301         }
7302
7303         if (netdev_get_num_tc(dev) == tc)
7304                 return 0;
7305
7306         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
7307                 sh = true;
7308
7309         rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
7310                               sh, tc, bp->tx_nr_rings_xdp);
7311         if (rc)
7312                 return rc;
7313
7314         /* Needs to close the device and do hw resource re-allocations */
7315         if (netif_running(bp->dev))
7316                 bnxt_close_nic(bp, true, false);
7317
7318         if (tc) {
7319                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
7320                 netdev_set_num_tc(dev, tc);
7321         } else {
7322                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
7323                 netdev_reset_tc(dev);
7324         }
7325         bp->tx_nr_rings += bp->tx_nr_rings_xdp;
7326         bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
7327                                bp->tx_nr_rings + bp->rx_nr_rings;
7328         bp->num_stat_ctxs = bp->cp_nr_rings;
7329
7330         if (netif_running(bp->dev))
7331                 return bnxt_open_nic(bp, true, false);
7332
7333         return 0;
7334 }
7335
7336 static int bnxt_setup_flower(struct net_device *dev,
7337                              struct tc_cls_flower_offload *cls_flower)
7338 {
7339         struct bnxt *bp = netdev_priv(dev);
7340
7341         if (BNXT_VF(bp))
7342                 return -EOPNOTSUPP;
7343
7344         return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, cls_flower);
7345 }
7346
7347 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
7348                          void *type_data)
7349 {
7350         switch (type) {
7351         case TC_SETUP_CLSFLOWER:
7352                 return bnxt_setup_flower(dev, type_data);
7353         case TC_SETUP_MQPRIO: {
7354                 struct tc_mqprio_qopt *mqprio = type_data;
7355
7356                 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
7357
7358                 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
7359         }
7360         default:
7361                 return -EOPNOTSUPP;
7362         }
7363 }
7364
7365 #ifdef CONFIG_RFS_ACCEL
7366 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
7367                             struct bnxt_ntuple_filter *f2)
7368 {
7369         struct flow_keys *keys1 = &f1->fkeys;
7370         struct flow_keys *keys2 = &f2->fkeys;
7371
7372         if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
7373             keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
7374             keys1->ports.ports == keys2->ports.ports &&
7375             keys1->basic.ip_proto == keys2->basic.ip_proto &&
7376             keys1->basic.n_proto == keys2->basic.n_proto &&
7377             keys1->control.flags == keys2->control.flags &&
7378             ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
7379             ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
7380                 return true;
7381
7382         return false;
7383 }
7384
7385 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
7386                               u16 rxq_index, u32 flow_id)
7387 {
7388         struct bnxt *bp = netdev_priv(dev);
7389         struct bnxt_ntuple_filter *fltr, *new_fltr;
7390         struct flow_keys *fkeys;
7391         struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
7392         int rc = 0, idx, bit_id, l2_idx = 0;
7393         struct hlist_head *head;
7394
7395         if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
7396                 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
7397                 int off = 0, j;
7398
7399                 netif_addr_lock_bh(dev);
7400                 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
7401                         if (ether_addr_equal(eth->h_dest,
7402                                              vnic->uc_list + off)) {
7403                                 l2_idx = j + 1;
7404                                 break;
7405                         }
7406                 }
7407                 netif_addr_unlock_bh(dev);
7408                 if (!l2_idx)
7409                         return -EINVAL;
7410         }
7411         new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
7412         if (!new_fltr)
7413                 return -ENOMEM;
7414
7415         fkeys = &new_fltr->fkeys;
7416         if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
7417                 rc = -EPROTONOSUPPORT;
7418                 goto err_free;
7419         }
7420
7421         if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
7422              fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
7423             ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
7424              (fkeys->basic.ip_proto != IPPROTO_UDP))) {
7425                 rc = -EPROTONOSUPPORT;
7426                 goto err_free;
7427         }
7428         if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
7429             bp->hwrm_spec_code < 0x10601) {
7430                 rc = -EPROTONOSUPPORT;
7431                 goto err_free;
7432         }
7433         if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) &&
7434             bp->hwrm_spec_code < 0x10601) {
7435                 rc = -EPROTONOSUPPORT;
7436                 goto err_free;
7437         }
7438
7439         memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
7440         memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
7441
7442         idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
7443         head = &bp->ntp_fltr_hash_tbl[idx];
7444         rcu_read_lock();
7445         hlist_for_each_entry_rcu(fltr, head, hash) {
7446                 if (bnxt_fltr_match(fltr, new_fltr)) {
7447                         rcu_read_unlock();
7448                         rc = 0;
7449                         goto err_free;
7450                 }
7451         }
7452         rcu_read_unlock();
7453
7454         spin_lock_bh(&bp->ntp_fltr_lock);
7455         bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
7456                                          BNXT_NTP_FLTR_MAX_FLTR, 0);
7457         if (bit_id < 0) {
7458                 spin_unlock_bh(&bp->ntp_fltr_lock);
7459                 rc = -ENOMEM;
7460                 goto err_free;
7461         }
7462
7463         new_fltr->sw_id = (u16)bit_id;
7464         new_fltr->flow_id = flow_id;
7465         new_fltr->l2_fltr_idx = l2_idx;
7466         new_fltr->rxq = rxq_index;
7467         hlist_add_head_rcu(&new_fltr->hash, head);
7468         bp->ntp_fltr_count++;
7469         spin_unlock_bh(&bp->ntp_fltr_lock);
7470
7471         set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
7472         bnxt_queue_sp_work(bp);
7473
7474         return new_fltr->sw_id;
7475
7476 err_free:
7477         kfree(new_fltr);
7478         return rc;
7479 }
7480
7481 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
7482 {
7483         int i;
7484
7485         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
7486                 struct hlist_head *head;
7487                 struct hlist_node *tmp;
7488                 struct bnxt_ntuple_filter *fltr;
7489                 int rc;
7490
7491                 head = &bp->ntp_fltr_hash_tbl[i];
7492                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
7493                         bool del = false;
7494
7495                         if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
7496                                 if (rps_may_expire_flow(bp->dev, fltr->rxq,
7497                                                         fltr->flow_id,
7498                                                         fltr->sw_id)) {
7499                                         bnxt_hwrm_cfa_ntuple_filter_free(bp,
7500                                                                          fltr);
7501                                         del = true;
7502                                 }
7503                         } else {
7504                                 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
7505                                                                        fltr);
7506                                 if (rc)
7507                                         del = true;
7508                                 else
7509                                         set_bit(BNXT_FLTR_VALID, &fltr->state);
7510                         }
7511
7512                         if (del) {
7513                                 spin_lock_bh(&bp->ntp_fltr_lock);
7514                                 hlist_del_rcu(&fltr->hash);
7515                                 bp->ntp_fltr_count--;
7516                                 spin_unlock_bh(&bp->ntp_fltr_lock);
7517                                 synchronize_rcu();
7518                                 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
7519                                 kfree(fltr);
7520                         }
7521                 }
7522         }
7523         if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
7524                 netdev_info(bp->dev, "Receive PF driver unload event!");
7525 }
7526
7527 #else
7528
7529 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
7530 {
7531 }
7532
7533 #endif /* CONFIG_RFS_ACCEL */
7534
7535 static void bnxt_udp_tunnel_add(struct net_device *dev,
7536                                 struct udp_tunnel_info *ti)
7537 {
7538         struct bnxt *bp = netdev_priv(dev);
7539
7540         if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
7541                 return;
7542
7543         if (!netif_running(dev))
7544                 return;
7545
7546         switch (ti->type) {
7547         case UDP_TUNNEL_TYPE_VXLAN:
7548                 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
7549                         return;
7550
7551                 bp->vxlan_port_cnt++;
7552                 if (bp->vxlan_port_cnt == 1) {
7553                         bp->vxlan_port = ti->port;
7554                         set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
7555                         bnxt_queue_sp_work(bp);
7556                 }
7557                 break;
7558         case UDP_TUNNEL_TYPE_GENEVE:
7559                 if (bp->nge_port_cnt && bp->nge_port != ti->port)
7560                         return;
7561
7562                 bp->nge_port_cnt++;
7563                 if (bp->nge_port_cnt == 1) {
7564                         bp->nge_port = ti->port;
7565                         set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
7566                 }
7567                 break;
7568         default:
7569                 return;
7570         }
7571
7572         bnxt_queue_sp_work(bp);
7573 }
7574
7575 static void bnxt_udp_tunnel_del(struct net_device *dev,
7576                                 struct udp_tunnel_info *ti)
7577 {
7578         struct bnxt *bp = netdev_priv(dev);
7579
7580         if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
7581                 return;
7582
7583         if (!netif_running(dev))
7584                 return;
7585
7586         switch (ti->type) {
7587         case UDP_TUNNEL_TYPE_VXLAN:
7588                 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
7589                         return;
7590                 bp->vxlan_port_cnt--;
7591
7592                 if (bp->vxlan_port_cnt != 0)
7593                         return;
7594
7595                 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
7596                 break;
7597         case UDP_TUNNEL_TYPE_GENEVE:
7598                 if (!bp->nge_port_cnt || bp->nge_port != ti->port)
7599                         return;
7600                 bp->nge_port_cnt--;
7601
7602                 if (bp->nge_port_cnt != 0)
7603                         return;
7604
7605                 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
7606                 break;
7607         default:
7608                 return;
7609         }
7610
7611         bnxt_queue_sp_work(bp);
7612 }
7613
7614 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
7615                                struct net_device *dev, u32 filter_mask,
7616                                int nlflags)
7617 {
7618         struct bnxt *bp = netdev_priv(dev);
7619
7620         return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
7621                                        nlflags, filter_mask, NULL);
7622 }
7623
7624 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
7625                                u16 flags)
7626 {
7627         struct bnxt *bp = netdev_priv(dev);
7628         struct nlattr *attr, *br_spec;
7629         int rem, rc = 0;
7630
7631         if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
7632                 return -EOPNOTSUPP;
7633
7634         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
7635         if (!br_spec)
7636                 return -EINVAL;
7637
7638         nla_for_each_nested(attr, br_spec, rem) {
7639                 u16 mode;
7640
7641                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
7642                         continue;
7643
7644                 if (nla_len(attr) < sizeof(mode))
7645                         return -EINVAL;
7646
7647                 mode = nla_get_u16(attr);
7648                 if (mode == bp->br_mode)
7649                         break;
7650
7651                 rc = bnxt_hwrm_set_br_mode(bp, mode);
7652                 if (!rc)
7653                         bp->br_mode = mode;
7654                 break;
7655         }
7656         return rc;
7657 }
7658
7659 static int bnxt_get_phys_port_name(struct net_device *dev, char *buf,
7660                                    size_t len)
7661 {
7662         struct bnxt *bp = netdev_priv(dev);
7663         int rc;
7664
7665         /* The PF and it's VF-reps only support the switchdev framework */
7666         if (!BNXT_PF(bp))
7667                 return -EOPNOTSUPP;
7668
7669         rc = snprintf(buf, len, "p%d", bp->pf.port_id);
7670
7671         if (rc >= len)
7672                 return -EOPNOTSUPP;
7673         return 0;
7674 }
7675
7676 int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr)
7677 {
7678         if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
7679                 return -EOPNOTSUPP;
7680
7681         /* The PF and it's VF-reps only support the switchdev framework */
7682         if (!BNXT_PF(bp))
7683                 return -EOPNOTSUPP;
7684
7685         switch (attr->id) {
7686         case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
7687                 /* In SRIOV each PF-pool (PF + child VFs) serves as a
7688                  * switching domain, the PF's perm mac-addr can be used
7689                  * as the unique parent-id
7690                  */
7691                 attr->u.ppid.id_len = ETH_ALEN;
7692                 ether_addr_copy(attr->u.ppid.id, bp->pf.mac_addr);
7693                 break;
7694         default:
7695                 return -EOPNOTSUPP;
7696         }
7697         return 0;
7698 }
7699
7700 static int bnxt_swdev_port_attr_get(struct net_device *dev,
7701                                     struct switchdev_attr *attr)
7702 {
7703         return bnxt_port_attr_get(netdev_priv(dev), attr);
7704 }
7705
7706 static const struct switchdev_ops bnxt_switchdev_ops = {
7707         .switchdev_port_attr_get        = bnxt_swdev_port_attr_get
7708 };
7709
7710 static const struct net_device_ops bnxt_netdev_ops = {
7711         .ndo_open               = bnxt_open,
7712         .ndo_start_xmit         = bnxt_start_xmit,
7713         .ndo_stop               = bnxt_close,
7714         .ndo_get_stats64        = bnxt_get_stats64,
7715         .ndo_set_rx_mode        = bnxt_set_rx_mode,
7716         .ndo_do_ioctl           = bnxt_ioctl,
7717         .ndo_validate_addr      = eth_validate_addr,
7718         .ndo_set_mac_address    = bnxt_change_mac_addr,
7719         .ndo_change_mtu         = bnxt_change_mtu,
7720         .ndo_fix_features       = bnxt_fix_features,
7721         .ndo_set_features       = bnxt_set_features,
7722         .ndo_tx_timeout         = bnxt_tx_timeout,
7723 #ifdef CONFIG_BNXT_SRIOV
7724         .ndo_get_vf_config      = bnxt_get_vf_config,
7725         .ndo_set_vf_mac         = bnxt_set_vf_mac,
7726         .ndo_set_vf_vlan        = bnxt_set_vf_vlan,
7727         .ndo_set_vf_rate        = bnxt_set_vf_bw,
7728         .ndo_set_vf_link_state  = bnxt_set_vf_link_state,
7729         .ndo_set_vf_spoofchk    = bnxt_set_vf_spoofchk,
7730 #endif
7731 #ifdef CONFIG_NET_POLL_CONTROLLER
7732         .ndo_poll_controller    = bnxt_poll_controller,
7733 #endif
7734         .ndo_setup_tc           = bnxt_setup_tc,
7735 #ifdef CONFIG_RFS_ACCEL
7736         .ndo_rx_flow_steer      = bnxt_rx_flow_steer,
7737 #endif
7738         .ndo_udp_tunnel_add     = bnxt_udp_tunnel_add,
7739         .ndo_udp_tunnel_del     = bnxt_udp_tunnel_del,
7740         .ndo_xdp                = bnxt_xdp,
7741         .ndo_bridge_getlink     = bnxt_bridge_getlink,
7742         .ndo_bridge_setlink     = bnxt_bridge_setlink,
7743         .ndo_get_phys_port_name = bnxt_get_phys_port_name
7744 };
7745
7746 static void bnxt_remove_one(struct pci_dev *pdev)
7747 {
7748         struct net_device *dev = pci_get_drvdata(pdev);
7749         struct bnxt *bp = netdev_priv(dev);
7750
7751         if (BNXT_PF(bp)) {
7752                 bnxt_sriov_disable(bp);
7753                 bnxt_dl_unregister(bp);
7754         }
7755
7756         pci_disable_pcie_error_reporting(pdev);
7757         unregister_netdev(dev);
7758         bnxt_shutdown_tc(bp);
7759         bnxt_cancel_sp_work(bp);
7760         bp->sp_event = 0;
7761
7762         bnxt_clear_int_mode(bp);
7763         bnxt_hwrm_func_drv_unrgtr(bp);
7764         bnxt_free_hwrm_resources(bp);
7765         bnxt_free_hwrm_short_cmd_req(bp);
7766         bnxt_ethtool_free(bp);
7767         bnxt_dcb_free(bp);
7768         kfree(bp->edev);
7769         bp->edev = NULL;
7770         if (bp->xdp_prog)
7771                 bpf_prog_put(bp->xdp_prog);
7772         bnxt_cleanup_pci(bp);
7773         free_netdev(dev);
7774 }
7775
7776 static int bnxt_probe_phy(struct bnxt *bp)
7777 {
7778         int rc = 0;
7779         struct bnxt_link_info *link_info = &bp->link_info;
7780
7781         rc = bnxt_hwrm_phy_qcaps(bp);
7782         if (rc) {
7783                 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
7784                            rc);
7785                 return rc;
7786         }
7787         mutex_init(&bp->link_lock);
7788
7789         rc = bnxt_update_link(bp, false);
7790         if (rc) {
7791                 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
7792                            rc);
7793                 return rc;
7794         }
7795
7796         /* Older firmware does not have supported_auto_speeds, so assume
7797          * that all supported speeds can be autonegotiated.
7798          */
7799         if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
7800                 link_info->support_auto_speeds = link_info->support_speeds;
7801
7802         /*initialize the ethool setting copy with NVM settings */
7803         if (BNXT_AUTO_MODE(link_info->auto_mode)) {
7804                 link_info->autoneg = BNXT_AUTONEG_SPEED;
7805                 if (bp->hwrm_spec_code >= 0x10201) {
7806                         if (link_info->auto_pause_setting &
7807                             PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
7808                                 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
7809                 } else {
7810                         link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
7811                 }
7812                 link_info->advertising = link_info->auto_link_speeds;
7813         } else {
7814                 link_info->req_link_speed = link_info->force_link_speed;
7815                 link_info->req_duplex = link_info->duplex_setting;
7816         }
7817         if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
7818                 link_info->req_flow_ctrl =
7819                         link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
7820         else
7821                 link_info->req_flow_ctrl = link_info->force_pause_setting;
7822         return rc;
7823 }
7824
7825 static int bnxt_get_max_irq(struct pci_dev *pdev)
7826 {
7827         u16 ctrl;
7828
7829         if (!pdev->msix_cap)
7830                 return 1;
7831
7832         pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
7833         return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
7834 }
7835
7836 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
7837                                 int *max_cp)
7838 {
7839         int max_ring_grps = 0;
7840
7841 #ifdef CONFIG_BNXT_SRIOV
7842         if (!BNXT_PF(bp)) {
7843                 *max_tx = bp->vf.max_tx_rings;
7844                 *max_rx = bp->vf.max_rx_rings;
7845                 *max_cp = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings);
7846                 *max_cp = min_t(int, *max_cp, bp->vf.max_stat_ctxs);
7847                 max_ring_grps = bp->vf.max_hw_ring_grps;
7848         } else
7849 #endif
7850         {
7851                 *max_tx = bp->pf.max_tx_rings;
7852                 *max_rx = bp->pf.max_rx_rings;
7853                 *max_cp = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings);
7854                 *max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs);
7855                 max_ring_grps = bp->pf.max_hw_ring_grps;
7856         }
7857         if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
7858                 *max_cp -= 1;
7859                 *max_rx -= 2;
7860         }
7861         if (bp->flags & BNXT_FLAG_AGG_RINGS)
7862                 *max_rx >>= 1;
7863         *max_rx = min_t(int, *max_rx, max_ring_grps);
7864 }
7865
7866 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
7867 {
7868         int rx, tx, cp;
7869
7870         _bnxt_get_max_rings(bp, &rx, &tx, &cp);
7871         *max_rx = rx;
7872         *max_tx = tx;
7873         if (!rx || !tx || !cp)
7874                 return -ENOMEM;
7875
7876         return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
7877 }
7878
7879 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
7880                                bool shared)
7881 {
7882         int rc;
7883
7884         rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
7885         if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
7886                 /* Not enough rings, try disabling agg rings. */
7887                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
7888                 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
7889                 if (rc) {
7890                         /* set BNXT_FLAG_AGG_RINGS back for consistency */
7891                         bp->flags |= BNXT_FLAG_AGG_RINGS;
7892                         return rc;
7893                 }
7894                 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
7895                 bp->dev->hw_features &= ~NETIF_F_LRO;
7896                 bp->dev->features &= ~NETIF_F_LRO;
7897                 bnxt_set_ring_params(bp);
7898         }
7899
7900         if (bp->flags & BNXT_FLAG_ROCE_CAP) {
7901                 int max_cp, max_stat, max_irq;
7902
7903                 /* Reserve minimum resources for RoCE */
7904                 max_cp = bnxt_get_max_func_cp_rings(bp);
7905                 max_stat = bnxt_get_max_func_stat_ctxs(bp);
7906                 max_irq = bnxt_get_max_func_irqs(bp);
7907                 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
7908                     max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
7909                     max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
7910                         return 0;
7911
7912                 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
7913                 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
7914                 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
7915                 max_cp = min_t(int, max_cp, max_irq);
7916                 max_cp = min_t(int, max_cp, max_stat);
7917                 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
7918                 if (rc)
7919                         rc = 0;
7920         }
7921         return rc;
7922 }
7923
7924 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
7925 {
7926         int dflt_rings, max_rx_rings, max_tx_rings, rc;
7927
7928         if (sh)
7929                 bp->flags |= BNXT_FLAG_SHARED_RINGS;
7930         dflt_rings = netif_get_num_default_rss_queues();
7931         /* Reduce default rings to reduce memory usage on multi-port cards */
7932         if (bp->port_count > 1)
7933                 dflt_rings = min_t(int, dflt_rings, 4);
7934         rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
7935         if (rc)
7936                 return rc;
7937         bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
7938         bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
7939
7940         rc = bnxt_hwrm_reserve_tx_rings(bp, &bp->tx_nr_rings_per_tc);
7941         if (rc)
7942                 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
7943
7944         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
7945         bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
7946                                bp->tx_nr_rings + bp->rx_nr_rings;
7947         bp->num_stat_ctxs = bp->cp_nr_rings;
7948         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7949                 bp->rx_nr_rings++;
7950                 bp->cp_nr_rings++;
7951         }
7952         return rc;
7953 }
7954
7955 void bnxt_restore_pf_fw_resources(struct bnxt *bp)
7956 {
7957         ASSERT_RTNL();
7958         bnxt_hwrm_func_qcaps(bp);
7959         bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP);
7960 }
7961
7962 static int bnxt_init_mac_addr(struct bnxt *bp)
7963 {
7964         int rc = 0;
7965
7966         if (BNXT_PF(bp)) {
7967                 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
7968         } else {
7969 #ifdef CONFIG_BNXT_SRIOV
7970                 struct bnxt_vf_info *vf = &bp->vf;
7971
7972                 if (is_valid_ether_addr(vf->mac_addr)) {
7973                         /* overwrite netdev dev_adr with admin VF MAC */
7974                         memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
7975                 } else {
7976                         eth_hw_addr_random(bp->dev);
7977                         rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
7978                 }
7979 #endif
7980         }
7981         return rc;
7982 }
7983
7984 static void bnxt_parse_log_pcie_link(struct bnxt *bp)
7985 {
7986         enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
7987         enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
7988
7989         if (pcie_get_minimum_link(pci_physfn(bp->pdev), &speed, &width) ||
7990             speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
7991                 netdev_info(bp->dev, "Failed to determine PCIe Link Info\n");
7992         else
7993                 netdev_info(bp->dev, "PCIe: Speed %s Width x%d\n",
7994                             speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
7995                             speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
7996                             speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
7997                             "Unknown", width);
7998 }
7999
8000 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8001 {
8002         static int version_printed;
8003         struct net_device *dev;
8004         struct bnxt *bp;
8005         int rc, max_irqs;
8006
8007         if (pci_is_bridge(pdev))
8008                 return -ENODEV;
8009
8010         if (version_printed++ == 0)
8011                 pr_info("%s", version);
8012
8013         max_irqs = bnxt_get_max_irq(pdev);
8014         dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
8015         if (!dev)
8016                 return -ENOMEM;
8017
8018         bp = netdev_priv(dev);
8019
8020         if (bnxt_vf_pciid(ent->driver_data))
8021                 bp->flags |= BNXT_FLAG_VF;
8022
8023         if (pdev->msix_cap)
8024                 bp->flags |= BNXT_FLAG_MSIX_CAP;
8025
8026         rc = bnxt_init_board(pdev, dev);
8027         if (rc < 0)
8028                 goto init_err_free;
8029
8030         dev->netdev_ops = &bnxt_netdev_ops;
8031         dev->watchdog_timeo = BNXT_TX_TIMEOUT;
8032         dev->ethtool_ops = &bnxt_ethtool_ops;
8033         SWITCHDEV_SET_OPS(dev, &bnxt_switchdev_ops);
8034         pci_set_drvdata(pdev, dev);
8035
8036         rc = bnxt_alloc_hwrm_resources(bp);
8037         if (rc)
8038                 goto init_err_pci_clean;
8039
8040         mutex_init(&bp->hwrm_cmd_lock);
8041         rc = bnxt_hwrm_ver_get(bp);
8042         if (rc)
8043                 goto init_err_pci_clean;
8044
8045         if (bp->flags & BNXT_FLAG_SHORT_CMD) {
8046                 rc = bnxt_alloc_hwrm_short_cmd_req(bp);
8047                 if (rc)
8048                         goto init_err_pci_clean;
8049         }
8050
8051         rc = bnxt_hwrm_func_reset(bp);
8052         if (rc)
8053                 goto init_err_pci_clean;
8054
8055         bnxt_hwrm_fw_set_time(bp);
8056
8057         dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
8058                            NETIF_F_TSO | NETIF_F_TSO6 |
8059                            NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
8060                            NETIF_F_GSO_IPXIP4 |
8061                            NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
8062                            NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
8063                            NETIF_F_RXCSUM | NETIF_F_GRO;
8064
8065         if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
8066                 dev->hw_features |= NETIF_F_LRO;
8067
8068         dev->hw_enc_features =
8069                         NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
8070                         NETIF_F_TSO | NETIF_F_TSO6 |
8071                         NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
8072                         NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
8073                         NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
8074         dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
8075                                     NETIF_F_GSO_GRE_CSUM;
8076         dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
8077         dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
8078                             NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
8079         dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
8080         dev->priv_flags |= IFF_UNICAST_FLT;
8081
8082         /* MTU range: 60 - 9500 */
8083         dev->min_mtu = ETH_ZLEN;
8084         dev->max_mtu = BNXT_MAX_MTU;
8085
8086 #ifdef CONFIG_BNXT_SRIOV
8087         init_waitqueue_head(&bp->sriov_cfg_wait);
8088         mutex_init(&bp->sriov_lock);
8089 #endif
8090         bp->gro_func = bnxt_gro_func_5730x;
8091         if (BNXT_CHIP_P4_PLUS(bp))
8092                 bp->gro_func = bnxt_gro_func_5731x;
8093         else
8094                 bp->flags |= BNXT_FLAG_DOUBLE_DB;
8095
8096         rc = bnxt_hwrm_func_drv_rgtr(bp);
8097         if (rc)
8098                 goto init_err_pci_clean;
8099
8100         rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
8101         if (rc)
8102                 goto init_err_pci_clean;
8103
8104         bp->ulp_probe = bnxt_ulp_probe;
8105
8106         /* Get the MAX capabilities for this function */
8107         rc = bnxt_hwrm_func_qcaps(bp);
8108         if (rc) {
8109                 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
8110                            rc);
8111                 rc = -1;
8112                 goto init_err_pci_clean;
8113         }
8114         rc = bnxt_init_mac_addr(bp);
8115         if (rc) {
8116                 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
8117                 rc = -EADDRNOTAVAIL;
8118                 goto init_err_pci_clean;
8119         }
8120         rc = bnxt_hwrm_queue_qportcfg(bp);
8121         if (rc) {
8122                 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
8123                            rc);
8124                 rc = -1;
8125                 goto init_err_pci_clean;
8126         }
8127
8128         bnxt_hwrm_func_qcfg(bp);
8129         bnxt_hwrm_port_led_qcaps(bp);
8130         bnxt_ethtool_init(bp);
8131         bnxt_dcb_init(bp);
8132
8133         rc = bnxt_probe_phy(bp);
8134         if (rc)
8135                 goto init_err_pci_clean;
8136
8137         bnxt_set_rx_skb_mode(bp, false);
8138         bnxt_set_tpa_flags(bp);
8139         bnxt_set_ring_params(bp);
8140         bnxt_set_max_func_irqs(bp, max_irqs);
8141         rc = bnxt_set_dflt_rings(bp, true);
8142         if (rc) {
8143                 netdev_err(bp->dev, "Not enough rings available.\n");
8144                 rc = -ENOMEM;
8145                 goto init_err_pci_clean;
8146         }
8147
8148         /* Default RSS hash cfg. */
8149         bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
8150                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
8151                            VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
8152                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
8153         if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
8154                 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
8155                 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
8156                                     VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
8157         }
8158
8159         bnxt_hwrm_vnic_qcaps(bp);
8160         if (bnxt_rfs_supported(bp)) {
8161                 dev->hw_features |= NETIF_F_NTUPLE;
8162                 if (bnxt_rfs_capable(bp)) {
8163                         bp->flags |= BNXT_FLAG_RFS;
8164                         dev->features |= NETIF_F_NTUPLE;
8165                 }
8166         }
8167
8168         if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
8169                 bp->flags |= BNXT_FLAG_STRIP_VLAN;
8170
8171         rc = bnxt_init_int_mode(bp);
8172         if (rc)
8173                 goto init_err_pci_clean;
8174
8175         bnxt_get_wol_settings(bp);
8176         if (bp->flags & BNXT_FLAG_WOL_CAP)
8177                 device_set_wakeup_enable(&pdev->dev, bp->wol);
8178         else
8179                 device_set_wakeup_capable(&pdev->dev, false);
8180
8181         if (BNXT_PF(bp)) {
8182                 if (!bnxt_pf_wq) {
8183                         bnxt_pf_wq =
8184                                 create_singlethread_workqueue("bnxt_pf_wq");
8185                         if (!bnxt_pf_wq) {
8186                                 dev_err(&pdev->dev, "Unable to create workqueue.\n");
8187                                 goto init_err_pci_clean;
8188                         }
8189                 }
8190                 bnxt_init_tc(bp);
8191         }
8192
8193         rc = register_netdev(dev);
8194         if (rc)
8195                 goto init_err_cleanup_tc;
8196
8197         if (BNXT_PF(bp))
8198                 bnxt_dl_register(bp);
8199
8200         netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
8201                     board_info[ent->driver_data].name,
8202                     (long)pci_resource_start(pdev, 0), dev->dev_addr);
8203
8204         bnxt_parse_log_pcie_link(bp);
8205
8206         return 0;
8207
8208 init_err_cleanup_tc:
8209         bnxt_shutdown_tc(bp);
8210         bnxt_clear_int_mode(bp);
8211
8212 init_err_pci_clean:
8213         bnxt_cleanup_pci(bp);
8214
8215 init_err_free:
8216         free_netdev(dev);
8217         return rc;
8218 }
8219
8220 static void bnxt_shutdown(struct pci_dev *pdev)
8221 {
8222         struct net_device *dev = pci_get_drvdata(pdev);
8223         struct bnxt *bp;
8224
8225         if (!dev)
8226                 return;
8227
8228         rtnl_lock();
8229         bp = netdev_priv(dev);
8230         if (!bp)
8231                 goto shutdown_exit;
8232
8233         if (netif_running(dev))
8234                 dev_close(dev);
8235
8236         bnxt_ulp_shutdown(bp);
8237
8238         if (system_state == SYSTEM_POWER_OFF) {
8239                 bnxt_clear_int_mode(bp);
8240                 pci_wake_from_d3(pdev, bp->wol);
8241                 pci_set_power_state(pdev, PCI_D3hot);
8242         }
8243
8244 shutdown_exit:
8245         rtnl_unlock();
8246 }
8247
8248 #ifdef CONFIG_PM_SLEEP
8249 static int bnxt_suspend(struct device *device)
8250 {
8251         struct pci_dev *pdev = to_pci_dev(device);
8252         struct net_device *dev = pci_get_drvdata(pdev);
8253         struct bnxt *bp = netdev_priv(dev);
8254         int rc = 0;
8255
8256         rtnl_lock();
8257         if (netif_running(dev)) {
8258                 netif_device_detach(dev);
8259                 rc = bnxt_close(dev);
8260         }
8261         bnxt_hwrm_func_drv_unrgtr(bp);
8262         rtnl_unlock();
8263         return rc;
8264 }
8265
8266 static int bnxt_resume(struct device *device)
8267 {
8268         struct pci_dev *pdev = to_pci_dev(device);
8269         struct net_device *dev = pci_get_drvdata(pdev);
8270         struct bnxt *bp = netdev_priv(dev);
8271         int rc = 0;
8272
8273         rtnl_lock();
8274         if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) {
8275                 rc = -ENODEV;
8276                 goto resume_exit;
8277         }
8278         rc = bnxt_hwrm_func_reset(bp);
8279         if (rc) {
8280                 rc = -EBUSY;
8281                 goto resume_exit;
8282         }
8283         bnxt_get_wol_settings(bp);
8284         if (netif_running(dev)) {
8285                 rc = bnxt_open(dev);
8286                 if (!rc)
8287                         netif_device_attach(dev);
8288         }
8289
8290 resume_exit:
8291         rtnl_unlock();
8292         return rc;
8293 }
8294
8295 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
8296 #define BNXT_PM_OPS (&bnxt_pm_ops)
8297
8298 #else
8299
8300 #define BNXT_PM_OPS NULL
8301
8302 #endif /* CONFIG_PM_SLEEP */
8303
8304 /**
8305  * bnxt_io_error_detected - called when PCI error is detected
8306  * @pdev: Pointer to PCI device
8307  * @state: The current pci connection state
8308  *
8309  * This function is called after a PCI bus error affecting
8310  * this device has been detected.
8311  */
8312 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
8313                                                pci_channel_state_t state)
8314 {
8315         struct net_device *netdev = pci_get_drvdata(pdev);
8316         struct bnxt *bp = netdev_priv(netdev);
8317
8318         netdev_info(netdev, "PCI I/O error detected\n");
8319
8320         rtnl_lock();
8321         netif_device_detach(netdev);
8322
8323         bnxt_ulp_stop(bp);
8324
8325         if (state == pci_channel_io_perm_failure) {
8326                 rtnl_unlock();
8327                 return PCI_ERS_RESULT_DISCONNECT;
8328         }
8329
8330         if (netif_running(netdev))
8331                 bnxt_close(netdev);
8332
8333         pci_disable_device(pdev);
8334         rtnl_unlock();
8335
8336         /* Request a slot slot reset. */
8337         return PCI_ERS_RESULT_NEED_RESET;
8338 }
8339
8340 /**
8341  * bnxt_io_slot_reset - called after the pci bus has been reset.
8342  * @pdev: Pointer to PCI device
8343  *
8344  * Restart the card from scratch, as if from a cold-boot.
8345  * At this point, the card has exprienced a hard reset,
8346  * followed by fixups by BIOS, and has its config space
8347  * set up identically to what it was at cold boot.
8348  */
8349 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
8350 {
8351         struct net_device *netdev = pci_get_drvdata(pdev);
8352         struct bnxt *bp = netdev_priv(netdev);
8353         int err = 0;
8354         pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8355
8356         netdev_info(bp->dev, "PCI Slot Reset\n");
8357
8358         rtnl_lock();
8359
8360         if (pci_enable_device(pdev)) {
8361                 dev_err(&pdev->dev,
8362                         "Cannot re-enable PCI device after reset.\n");
8363         } else {
8364                 pci_set_master(pdev);
8365
8366                 err = bnxt_hwrm_func_reset(bp);
8367                 if (!err && netif_running(netdev))
8368                         err = bnxt_open(netdev);
8369
8370                 if (!err) {
8371                         result = PCI_ERS_RESULT_RECOVERED;
8372                         bnxt_ulp_start(bp);
8373                 }
8374         }
8375
8376         if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
8377                 dev_close(netdev);
8378
8379         rtnl_unlock();
8380
8381         err = pci_cleanup_aer_uncorrect_error_status(pdev);
8382         if (err) {
8383                 dev_err(&pdev->dev,
8384                         "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8385                          err); /* non-fatal, continue */
8386         }
8387
8388         return PCI_ERS_RESULT_RECOVERED;
8389 }
8390
8391 /**
8392  * bnxt_io_resume - called when traffic can start flowing again.
8393  * @pdev: Pointer to PCI device
8394  *
8395  * This callback is called when the error recovery driver tells
8396  * us that its OK to resume normal operation.
8397  */
8398 static void bnxt_io_resume(struct pci_dev *pdev)
8399 {
8400         struct net_device *netdev = pci_get_drvdata(pdev);
8401
8402         rtnl_lock();
8403
8404         netif_device_attach(netdev);
8405
8406         rtnl_unlock();
8407 }
8408
8409 static const struct pci_error_handlers bnxt_err_handler = {
8410         .error_detected = bnxt_io_error_detected,
8411         .slot_reset     = bnxt_io_slot_reset,
8412         .resume         = bnxt_io_resume
8413 };
8414
8415 static struct pci_driver bnxt_pci_driver = {
8416         .name           = DRV_MODULE_NAME,
8417         .id_table       = bnxt_pci_tbl,
8418         .probe          = bnxt_init_one,
8419         .remove         = bnxt_remove_one,
8420         .shutdown       = bnxt_shutdown,
8421         .driver.pm      = BNXT_PM_OPS,
8422         .err_handler    = &bnxt_err_handler,
8423 #if defined(CONFIG_BNXT_SRIOV)
8424         .sriov_configure = bnxt_sriov_configure,
8425 #endif
8426 };
8427
8428 static int __init bnxt_init(void)
8429 {
8430         return pci_register_driver(&bnxt_pci_driver);
8431 }
8432
8433 static void __exit bnxt_exit(void)
8434 {
8435         pci_unregister_driver(&bnxt_pci_driver);
8436         if (bnxt_pf_wq)
8437                 destroy_workqueue(bnxt_pf_wq);
8438 }
8439
8440 module_init(bnxt_init);
8441 module_exit(bnxt_exit);