Linux-libre 4.4.135-gnu
[librecmc/linux-libre.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_main.c
1 /* bnx2x_main.c: QLogic Everest network driver.
2  *
3  * Copyright (c) 2007-2013 Broadcom Corporation
4  * Copyright (c) 2014 QLogic Corporation
5  * All rights reserved
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation.
10  *
11  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
12  * Written by: Eliezer Tamir
13  * Based on code from Michael Chan's bnx2 driver
14  * UDP CSUM errata workaround by Arik Gendelman
15  * Slowpath and fastpath rework by Vladislav Zolotarov
16  * Statistics and Link management by Yitchak Gertner
17  *
18  */
19
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/kernel.h>
25 #include <linux/device.h>  /* for dev_info() */
26 #include <linux/timer.h>
27 #include <linux/errno.h>
28 #include <linux/ioport.h>
29 #include <linux/slab.h>
30 #include <linux/interrupt.h>
31 #include <linux/pci.h>
32 #include <linux/aer.h>
33 #include <linux/init.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/skbuff.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/bitops.h>
39 #include <linux/irq.h>
40 #include <linux/delay.h>
41 #include <asm/byteorder.h>
42 #include <linux/time.h>
43 #include <linux/ethtool.h>
44 #include <linux/mii.h>
45 #include <linux/if_vlan.h>
46 #include <linux/crash_dump.h>
47 #include <net/ip.h>
48 #include <net/ipv6.h>
49 #include <net/tcp.h>
50 #include <net/vxlan.h>
51 #include <net/checksum.h>
52 #include <net/ip6_checksum.h>
53 #include <linux/workqueue.h>
54 #include <linux/crc32.h>
55 #include <linux/crc32c.h>
56 #include <linux/prefetch.h>
57 #include <linux/zlib.h>
58 #include <linux/io.h>
59 #include <linux/semaphore.h>
60 #include <linux/stringify.h>
61 #include <linux/vmalloc.h>
62
63 #include "bnx2x.h"
64 #include "bnx2x_init.h"
65 #include "bnx2x_init_ops.h"
66 #include "bnx2x_cmn.h"
67 #include "bnx2x_vfpf.h"
68 #include "bnx2x_dcb.h"
69 #include "bnx2x_sp.h"
70 #include <linux/firmware.h>
71 #include "bnx2x_fw_file_hdr.h"
72 /* FW files */
73 /*(DEBLOBBED)*/
74 #define FW_FILE_NAME_E1         "/*(DEBLOBBED)*/"
75 #define FW_FILE_NAME_E1H        "/*(DEBLOBBED)*/"
76 #define FW_FILE_NAME_E2         "/*(DEBLOBBED)*/"
77 #define bnx2x_init_block(bp, start, end) \
78   return (printk(KERN_ERR "%s: Missing Free firmware\n", bp->dev->name),\
79           -EINVAL)
80
81 /* Time in jiffies before concluding the transmitter is hung */
82 #define TX_TIMEOUT              (5*HZ)
83
84 static char version[] =
85         "QLogic 5771x/578xx 10/20-Gigabit Ethernet Driver "
86         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
87
88 MODULE_AUTHOR("Eliezer Tamir");
89 MODULE_DESCRIPTION("QLogic "
90                    "BCM57710/57711/57711E/"
91                    "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
92                    "57840/57840_MF Driver");
93 MODULE_LICENSE("GPL");
94 MODULE_VERSION(DRV_MODULE_VERSION);
95 /*(DEBLOBBED)*/
96
97 int bnx2x_num_queues;
98 module_param_named(num_queues, bnx2x_num_queues, int, S_IRUGO);
99 MODULE_PARM_DESC(num_queues,
100                  " Set number of queues (default is as a number of CPUs)");
101
102 static int disable_tpa;
103 module_param(disable_tpa, int, S_IRUGO);
104 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
105
106 static int int_mode;
107 module_param(int_mode, int, S_IRUGO);
108 MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
109                                 "(1 INT#x; 2 MSI)");
110
111 static int dropless_fc;
112 module_param(dropless_fc, int, S_IRUGO);
113 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
114
115 static int mrrs = -1;
116 module_param(mrrs, int, S_IRUGO);
117 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
119 static int debug;
120 module_param(debug, int, S_IRUGO);
121 MODULE_PARM_DESC(debug, " Default debug msglevel");
122
123 static struct workqueue_struct *bnx2x_wq;
124 struct workqueue_struct *bnx2x_iov_wq;
125
126 struct bnx2x_mac_vals {
127         u32 xmac_addr;
128         u32 xmac_val;
129         u32 emac_addr;
130         u32 emac_val;
131         u32 umac_addr[2];
132         u32 umac_val[2];
133         u32 bmac_addr;
134         u32 bmac_val[2];
135 };
136
137 enum bnx2x_board_type {
138         BCM57710 = 0,
139         BCM57711,
140         BCM57711E,
141         BCM57712,
142         BCM57712_MF,
143         BCM57712_VF,
144         BCM57800,
145         BCM57800_MF,
146         BCM57800_VF,
147         BCM57810,
148         BCM57810_MF,
149         BCM57810_VF,
150         BCM57840_4_10,
151         BCM57840_2_20,
152         BCM57840_MF,
153         BCM57840_VF,
154         BCM57811,
155         BCM57811_MF,
156         BCM57840_O,
157         BCM57840_MFO,
158         BCM57811_VF
159 };
160
161 /* indexed by board_type, above */
162 static struct {
163         char *name;
164 } board_info[] = {
165         [BCM57710]      = { "QLogic BCM57710 10 Gigabit PCIe [Everest]" },
166         [BCM57711]      = { "QLogic BCM57711 10 Gigabit PCIe" },
167         [BCM57711E]     = { "QLogic BCM57711E 10 Gigabit PCIe" },
168         [BCM57712]      = { "QLogic BCM57712 10 Gigabit Ethernet" },
169         [BCM57712_MF]   = { "QLogic BCM57712 10 Gigabit Ethernet Multi Function" },
170         [BCM57712_VF]   = { "QLogic BCM57712 10 Gigabit Ethernet Virtual Function" },
171         [BCM57800]      = { "QLogic BCM57800 10 Gigabit Ethernet" },
172         [BCM57800_MF]   = { "QLogic BCM57800 10 Gigabit Ethernet Multi Function" },
173         [BCM57800_VF]   = { "QLogic BCM57800 10 Gigabit Ethernet Virtual Function" },
174         [BCM57810]      = { "QLogic BCM57810 10 Gigabit Ethernet" },
175         [BCM57810_MF]   = { "QLogic BCM57810 10 Gigabit Ethernet Multi Function" },
176         [BCM57810_VF]   = { "QLogic BCM57810 10 Gigabit Ethernet Virtual Function" },
177         [BCM57840_4_10] = { "QLogic BCM57840 10 Gigabit Ethernet" },
178         [BCM57840_2_20] = { "QLogic BCM57840 20 Gigabit Ethernet" },
179         [BCM57840_MF]   = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
180         [BCM57840_VF]   = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" },
181         [BCM57811]      = { "QLogic BCM57811 10 Gigabit Ethernet" },
182         [BCM57811_MF]   = { "QLogic BCM57811 10 Gigabit Ethernet Multi Function" },
183         [BCM57840_O]    = { "QLogic BCM57840 10/20 Gigabit Ethernet" },
184         [BCM57840_MFO]  = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
185         [BCM57811_VF]   = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" }
186 };
187
188 #ifndef PCI_DEVICE_ID_NX2_57710
189 #define PCI_DEVICE_ID_NX2_57710         CHIP_NUM_57710
190 #endif
191 #ifndef PCI_DEVICE_ID_NX2_57711
192 #define PCI_DEVICE_ID_NX2_57711         CHIP_NUM_57711
193 #endif
194 #ifndef PCI_DEVICE_ID_NX2_57711E
195 #define PCI_DEVICE_ID_NX2_57711E        CHIP_NUM_57711E
196 #endif
197 #ifndef PCI_DEVICE_ID_NX2_57712
198 #define PCI_DEVICE_ID_NX2_57712         CHIP_NUM_57712
199 #endif
200 #ifndef PCI_DEVICE_ID_NX2_57712_MF
201 #define PCI_DEVICE_ID_NX2_57712_MF      CHIP_NUM_57712_MF
202 #endif
203 #ifndef PCI_DEVICE_ID_NX2_57712_VF
204 #define PCI_DEVICE_ID_NX2_57712_VF      CHIP_NUM_57712_VF
205 #endif
206 #ifndef PCI_DEVICE_ID_NX2_57800
207 #define PCI_DEVICE_ID_NX2_57800         CHIP_NUM_57800
208 #endif
209 #ifndef PCI_DEVICE_ID_NX2_57800_MF
210 #define PCI_DEVICE_ID_NX2_57800_MF      CHIP_NUM_57800_MF
211 #endif
212 #ifndef PCI_DEVICE_ID_NX2_57800_VF
213 #define PCI_DEVICE_ID_NX2_57800_VF      CHIP_NUM_57800_VF
214 #endif
215 #ifndef PCI_DEVICE_ID_NX2_57810
216 #define PCI_DEVICE_ID_NX2_57810         CHIP_NUM_57810
217 #endif
218 #ifndef PCI_DEVICE_ID_NX2_57810_MF
219 #define PCI_DEVICE_ID_NX2_57810_MF      CHIP_NUM_57810_MF
220 #endif
221 #ifndef PCI_DEVICE_ID_NX2_57840_O
222 #define PCI_DEVICE_ID_NX2_57840_O       CHIP_NUM_57840_OBSOLETE
223 #endif
224 #ifndef PCI_DEVICE_ID_NX2_57810_VF
225 #define PCI_DEVICE_ID_NX2_57810_VF      CHIP_NUM_57810_VF
226 #endif
227 #ifndef PCI_DEVICE_ID_NX2_57840_4_10
228 #define PCI_DEVICE_ID_NX2_57840_4_10    CHIP_NUM_57840_4_10
229 #endif
230 #ifndef PCI_DEVICE_ID_NX2_57840_2_20
231 #define PCI_DEVICE_ID_NX2_57840_2_20    CHIP_NUM_57840_2_20
232 #endif
233 #ifndef PCI_DEVICE_ID_NX2_57840_MFO
234 #define PCI_DEVICE_ID_NX2_57840_MFO     CHIP_NUM_57840_MF_OBSOLETE
235 #endif
236 #ifndef PCI_DEVICE_ID_NX2_57840_MF
237 #define PCI_DEVICE_ID_NX2_57840_MF      CHIP_NUM_57840_MF
238 #endif
239 #ifndef PCI_DEVICE_ID_NX2_57840_VF
240 #define PCI_DEVICE_ID_NX2_57840_VF      CHIP_NUM_57840_VF
241 #endif
242 #ifndef PCI_DEVICE_ID_NX2_57811
243 #define PCI_DEVICE_ID_NX2_57811         CHIP_NUM_57811
244 #endif
245 #ifndef PCI_DEVICE_ID_NX2_57811_MF
246 #define PCI_DEVICE_ID_NX2_57811_MF      CHIP_NUM_57811_MF
247 #endif
248 #ifndef PCI_DEVICE_ID_NX2_57811_VF
249 #define PCI_DEVICE_ID_NX2_57811_VF      CHIP_NUM_57811_VF
250 #endif
251
252 static const struct pci_device_id bnx2x_pci_tbl[] = {
253         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
254         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
255         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
256         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
257         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
258         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF },
259         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
260         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
261         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF },
262         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
263         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
264         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
265         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
266         { PCI_VDEVICE(QLOGIC,   PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
267         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
268         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF },
269         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
270         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
271         { PCI_VDEVICE(QLOGIC,   PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
272         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
273         { PCI_VDEVICE(QLOGIC,   PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
274         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
275         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
276         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF },
277         { 0 }
278 };
279
280 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
281
282 /* Global resources for unloading a previously loaded device */
283 #define BNX2X_PREV_WAIT_NEEDED 1
284 static DEFINE_SEMAPHORE(bnx2x_prev_sem);
285 static LIST_HEAD(bnx2x_prev_list);
286
287 /* Forward declaration */
288 static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
289 static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
290 static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
291
292 /****************************************************************************
293 * General service functions
294 ****************************************************************************/
295
296 static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr);
297
298 static void __storm_memset_dma_mapping(struct bnx2x *bp,
299                                        u32 addr, dma_addr_t mapping)
300 {
301         REG_WR(bp,  addr, U64_LO(mapping));
302         REG_WR(bp,  addr + 4, U64_HI(mapping));
303 }
304
305 static void storm_memset_spq_addr(struct bnx2x *bp,
306                                   dma_addr_t mapping, u16 abs_fid)
307 {
308         u32 addr = XSEM_REG_FAST_MEMORY +
309                         XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
310
311         __storm_memset_dma_mapping(bp, addr, mapping);
312 }
313
314 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
315                                   u16 pf_id)
316 {
317         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
318                 pf_id);
319         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
320                 pf_id);
321         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
322                 pf_id);
323         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
324                 pf_id);
325 }
326
327 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
328                                  u8 enable)
329 {
330         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
331                 enable);
332         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
333                 enable);
334         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
335                 enable);
336         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
337                 enable);
338 }
339
340 static void storm_memset_eq_data(struct bnx2x *bp,
341                                  struct event_ring_data *eq_data,
342                                 u16 pfid)
343 {
344         size_t size = sizeof(struct event_ring_data);
345
346         u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
347
348         __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
349 }
350
351 static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
352                                  u16 pfid)
353 {
354         u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
355         REG_WR16(bp, addr, eq_prod);
356 }
357
358 /* used only at init
359  * locking is done by mcp
360  */
361 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
362 {
363         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
364         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
365         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
366                                PCICFG_VENDOR_ID_OFFSET);
367 }
368
369 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
370 {
371         u32 val;
372
373         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
374         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
375         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
376                                PCICFG_VENDOR_ID_OFFSET);
377
378         return val;
379 }
380
381 #define DMAE_DP_SRC_GRC         "grc src_addr [%08x]"
382 #define DMAE_DP_SRC_PCI         "pci src_addr [%x:%08x]"
383 #define DMAE_DP_DST_GRC         "grc dst_addr [%08x]"
384 #define DMAE_DP_DST_PCI         "pci dst_addr [%x:%08x]"
385 #define DMAE_DP_DST_NONE        "dst_addr [none]"
386
387 static void bnx2x_dp_dmae(struct bnx2x *bp,
388                           struct dmae_command *dmae, int msglvl)
389 {
390         u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
391         int i;
392
393         switch (dmae->opcode & DMAE_COMMAND_DST) {
394         case DMAE_CMD_DST_PCI:
395                 if (src_type == DMAE_CMD_SRC_PCI)
396                         DP(msglvl, "DMAE: opcode 0x%08x\n"
397                            "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
398                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
399                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
400                            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
401                            dmae->comp_addr_hi, dmae->comp_addr_lo,
402                            dmae->comp_val);
403                 else
404                         DP(msglvl, "DMAE: opcode 0x%08x\n"
405                            "src [%08x], len [%d*4], dst [%x:%08x]\n"
406                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
407                            dmae->opcode, dmae->src_addr_lo >> 2,
408                            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
409                            dmae->comp_addr_hi, dmae->comp_addr_lo,
410                            dmae->comp_val);
411                 break;
412         case DMAE_CMD_DST_GRC:
413                 if (src_type == DMAE_CMD_SRC_PCI)
414                         DP(msglvl, "DMAE: opcode 0x%08x\n"
415                            "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
416                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
417                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
418                            dmae->len, dmae->dst_addr_lo >> 2,
419                            dmae->comp_addr_hi, dmae->comp_addr_lo,
420                            dmae->comp_val);
421                 else
422                         DP(msglvl, "DMAE: opcode 0x%08x\n"
423                            "src [%08x], len [%d*4], dst [%08x]\n"
424                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
425                            dmae->opcode, dmae->src_addr_lo >> 2,
426                            dmae->len, dmae->dst_addr_lo >> 2,
427                            dmae->comp_addr_hi, dmae->comp_addr_lo,
428                            dmae->comp_val);
429                 break;
430         default:
431                 if (src_type == DMAE_CMD_SRC_PCI)
432                         DP(msglvl, "DMAE: opcode 0x%08x\n"
433                            "src_addr [%x:%08x]  len [%d * 4]  dst_addr [none]\n"
434                            "comp_addr [%x:%08x]  comp_val 0x%08x\n",
435                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
436                            dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
437                            dmae->comp_val);
438                 else
439                         DP(msglvl, "DMAE: opcode 0x%08x\n"
440                            "src_addr [%08x]  len [%d * 4]  dst_addr [none]\n"
441                            "comp_addr [%x:%08x]  comp_val 0x%08x\n",
442                            dmae->opcode, dmae->src_addr_lo >> 2,
443                            dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
444                            dmae->comp_val);
445                 break;
446         }
447
448         for (i = 0; i < (sizeof(struct dmae_command)/4); i++)
449                 DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n",
450                    i, *(((u32 *)dmae) + i));
451 }
452
453 /* copy command into DMAE command memory and set DMAE command go */
454 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
455 {
456         u32 cmd_offset;
457         int i;
458
459         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
460         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
461                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
462         }
463         REG_WR(bp, dmae_reg_go_c[idx], 1);
464 }
465
466 u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
467 {
468         return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
469                            DMAE_CMD_C_ENABLE);
470 }
471
472 u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
473 {
474         return opcode & ~DMAE_CMD_SRC_RESET;
475 }
476
477 u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
478                              bool with_comp, u8 comp_type)
479 {
480         u32 opcode = 0;
481
482         opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
483                    (dst_type << DMAE_COMMAND_DST_SHIFT));
484
485         opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
486
487         opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
488         opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
489                    (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
490         opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
491
492 #ifdef __BIG_ENDIAN
493         opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
494 #else
495         opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
496 #endif
497         if (with_comp)
498                 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
499         return opcode;
500 }
501
502 void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
503                                       struct dmae_command *dmae,
504                                       u8 src_type, u8 dst_type)
505 {
506         memset(dmae, 0, sizeof(struct dmae_command));
507
508         /* set the opcode */
509         dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
510                                          true, DMAE_COMP_PCI);
511
512         /* fill in the completion parameters */
513         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
514         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
515         dmae->comp_val = DMAE_COMP_VAL;
516 }
517
518 /* issue a dmae command over the init-channel and wait for completion */
519 int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
520                                u32 *comp)
521 {
522         int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
523         int rc = 0;
524
525         bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE);
526
527         /* Lock the dmae channel. Disable BHs to prevent a dead-lock
528          * as long as this code is called both from syscall context and
529          * from ndo_set_rx_mode() flow that may be called from BH.
530          */
531
532         spin_lock_bh(&bp->dmae_lock);
533
534         /* reset completion */
535         *comp = 0;
536
537         /* post the command on the channel used for initializations */
538         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
539
540         /* wait for completion */
541         udelay(5);
542         while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
543
544                 if (!cnt ||
545                     (bp->recovery_state != BNX2X_RECOVERY_DONE &&
546                      bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
547                         BNX2X_ERR("DMAE timeout!\n");
548                         rc = DMAE_TIMEOUT;
549                         goto unlock;
550                 }
551                 cnt--;
552                 udelay(50);
553         }
554         if (*comp & DMAE_PCI_ERR_FLAG) {
555                 BNX2X_ERR("DMAE PCI error!\n");
556                 rc = DMAE_PCI_ERROR;
557         }
558
559 unlock:
560
561         spin_unlock_bh(&bp->dmae_lock);
562
563         return rc;
564 }
565
566 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
567                       u32 len32)
568 {
569         int rc;
570         struct dmae_command dmae;
571
572         if (!bp->dmae_ready) {
573                 u32 *data = bnx2x_sp(bp, wb_data[0]);
574
575                 if (CHIP_IS_E1(bp))
576                         bnx2x_init_ind_wr(bp, dst_addr, data, len32);
577                 else
578                         bnx2x_init_str_wr(bp, dst_addr, data, len32);
579                 return;
580         }
581
582         /* set opcode and fixed command fields */
583         bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
584
585         /* fill in addresses and len */
586         dmae.src_addr_lo = U64_LO(dma_addr);
587         dmae.src_addr_hi = U64_HI(dma_addr);
588         dmae.dst_addr_lo = dst_addr >> 2;
589         dmae.dst_addr_hi = 0;
590         dmae.len = len32;
591
592         /* issue the command and wait for completion */
593         rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
594         if (rc) {
595                 BNX2X_ERR("DMAE returned failure %d\n", rc);
596 #ifdef BNX2X_STOP_ON_ERROR
597                 bnx2x_panic();
598 #endif
599         }
600 }
601
602 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
603 {
604         int rc;
605         struct dmae_command dmae;
606
607         if (!bp->dmae_ready) {
608                 u32 *data = bnx2x_sp(bp, wb_data[0]);
609                 int i;
610
611                 if (CHIP_IS_E1(bp))
612                         for (i = 0; i < len32; i++)
613                                 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
614                 else
615                         for (i = 0; i < len32; i++)
616                                 data[i] = REG_RD(bp, src_addr + i*4);
617
618                 return;
619         }
620
621         /* set opcode and fixed command fields */
622         bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
623
624         /* fill in addresses and len */
625         dmae.src_addr_lo = src_addr >> 2;
626         dmae.src_addr_hi = 0;
627         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
628         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
629         dmae.len = len32;
630
631         /* issue the command and wait for completion */
632         rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
633         if (rc) {
634                 BNX2X_ERR("DMAE returned failure %d\n", rc);
635 #ifdef BNX2X_STOP_ON_ERROR
636                 bnx2x_panic();
637 #endif
638         }
639 }
640
641 static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
642                                       u32 addr, u32 len)
643 {
644         int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
645         int offset = 0;
646
647         while (len > dmae_wr_max) {
648                 bnx2x_write_dmae(bp, phys_addr + offset,
649                                  addr + offset, dmae_wr_max);
650                 offset += dmae_wr_max * 4;
651                 len -= dmae_wr_max;
652         }
653
654         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
655 }
656
657 enum storms {
658            XSTORM,
659            TSTORM,
660            CSTORM,
661            USTORM,
662            MAX_STORMS
663 };
664
665 #define STORMS_NUM 4
666 #define REGS_IN_ENTRY 4
667
668 static inline int bnx2x_get_assert_list_entry(struct bnx2x *bp,
669                                               enum storms storm,
670                                               int entry)
671 {
672         switch (storm) {
673         case XSTORM:
674                 return XSTORM_ASSERT_LIST_OFFSET(entry);
675         case TSTORM:
676                 return TSTORM_ASSERT_LIST_OFFSET(entry);
677         case CSTORM:
678                 return CSTORM_ASSERT_LIST_OFFSET(entry);
679         case USTORM:
680                 return USTORM_ASSERT_LIST_OFFSET(entry);
681         case MAX_STORMS:
682         default:
683                 BNX2X_ERR("unknown storm\n");
684         }
685         return -EINVAL;
686 }
687
688 static int bnx2x_mc_assert(struct bnx2x *bp)
689 {
690         char last_idx;
691         int i, j, rc = 0;
692         enum storms storm;
693         u32 regs[REGS_IN_ENTRY];
694         u32 bar_storm_intmem[STORMS_NUM] = {
695                 BAR_XSTRORM_INTMEM,
696                 BAR_TSTRORM_INTMEM,
697                 BAR_CSTRORM_INTMEM,
698                 BAR_USTRORM_INTMEM
699         };
700         u32 storm_assert_list_index[STORMS_NUM] = {
701                 XSTORM_ASSERT_LIST_INDEX_OFFSET,
702                 TSTORM_ASSERT_LIST_INDEX_OFFSET,
703                 CSTORM_ASSERT_LIST_INDEX_OFFSET,
704                 USTORM_ASSERT_LIST_INDEX_OFFSET
705         };
706         char *storms_string[STORMS_NUM] = {
707                 "XSTORM",
708                 "TSTORM",
709                 "CSTORM",
710                 "USTORM"
711         };
712
713         for (storm = XSTORM; storm < MAX_STORMS; storm++) {
714                 last_idx = REG_RD8(bp, bar_storm_intmem[storm] +
715                                    storm_assert_list_index[storm]);
716                 if (last_idx)
717                         BNX2X_ERR("%s_ASSERT_LIST_INDEX 0x%x\n",
718                                   storms_string[storm], last_idx);
719
720                 /* print the asserts */
721                 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
722                         /* read a single assert entry */
723                         for (j = 0; j < REGS_IN_ENTRY; j++)
724                                 regs[j] = REG_RD(bp, bar_storm_intmem[storm] +
725                                           bnx2x_get_assert_list_entry(bp,
726                                                                       storm,
727                                                                       i) +
728                                           sizeof(u32) * j);
729
730                         /* log entry if it contains a valid assert */
731                         if (regs[0] != COMMON_ASM_INVALID_ASSERT_OPCODE) {
732                                 BNX2X_ERR("%s_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
733                                           storms_string[storm], i, regs[3],
734                                           regs[2], regs[1], regs[0]);
735                                 rc++;
736                         } else {
737                                 break;
738                         }
739                 }
740         }
741
742         BNX2X_ERR("Chip Revision: %s, /*(DEBLOBBED)*/\n",
743                   CHIP_IS_E1(bp) ? "everest1" :
744                   CHIP_IS_E1H(bp) ? "everest1h" :
745                   CHIP_IS_E2(bp) ? "everest2" : "everest3"/*(DEBLOBBED)*/);
746
747         return rc;
748 }
749
750 #define MCPR_TRACE_BUFFER_SIZE  (0x800)
751 #define SCRATCH_BUFFER_SIZE(bp) \
752         (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000))
753
754 void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
755 {
756         u32 addr, val;
757         u32 mark, offset;
758         __be32 data[9];
759         int word;
760         u32 trace_shmem_base;
761         if (BP_NOMCP(bp)) {
762                 BNX2X_ERR("NO MCP - can not dump\n");
763                 return;
764         }
765         netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
766                 (bp->common.bc_ver & 0xff0000) >> 16,
767                 (bp->common.bc_ver & 0xff00) >> 8,
768                 (bp->common.bc_ver & 0xff));
769
770         val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
771         if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
772                 BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
773
774         if (BP_PATH(bp) == 0)
775                 trace_shmem_base = bp->common.shmem_base;
776         else
777                 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
778
779         /* sanity */
780         if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE ||
781             trace_shmem_base >= MCPR_SCRATCH_BASE(bp) +
782                                 SCRATCH_BUFFER_SIZE(bp)) {
783                 BNX2X_ERR("Unable to dump trace buffer (mark %x)\n",
784                           trace_shmem_base);
785                 return;
786         }
787
788         addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE;
789
790         /* validate TRCB signature */
791         mark = REG_RD(bp, addr);
792         if (mark != MFW_TRACE_SIGNATURE) {
793                 BNX2X_ERR("Trace buffer signature is missing.");
794                 return ;
795         }
796
797         /* read cyclic buffer pointer */
798         addr += 4;
799         mark = REG_RD(bp, addr);
800         mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000;
801         if (mark >= trace_shmem_base || mark < addr + 4) {
802                 BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n");
803                 return;
804         }
805         printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
806
807         printk("%s", lvl);
808
809         /* dump buffer after the mark */
810         for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) {
811                 for (word = 0; word < 8; word++)
812                         data[word] = htonl(REG_RD(bp, offset + 4*word));
813                 data[8] = 0x0;
814                 pr_cont("%s", (char *)data);
815         }
816
817         /* dump buffer before the mark */
818         for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
819                 for (word = 0; word < 8; word++)
820                         data[word] = htonl(REG_RD(bp, offset + 4*word));
821                 data[8] = 0x0;
822                 pr_cont("%s", (char *)data);
823         }
824         printk("%s" "end of fw dump\n", lvl);
825 }
826
827 static void bnx2x_fw_dump(struct bnx2x *bp)
828 {
829         bnx2x_fw_dump_lvl(bp, KERN_ERR);
830 }
831
832 static void bnx2x_hc_int_disable(struct bnx2x *bp)
833 {
834         int port = BP_PORT(bp);
835         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
836         u32 val = REG_RD(bp, addr);
837
838         /* in E1 we must use only PCI configuration space to disable
839          * MSI/MSIX capability
840          * It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
841          */
842         if (CHIP_IS_E1(bp)) {
843                 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
844                  * Use mask register to prevent from HC sending interrupts
845                  * after we exit the function
846                  */
847                 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
848
849                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
850                          HC_CONFIG_0_REG_INT_LINE_EN_0 |
851                          HC_CONFIG_0_REG_ATTN_BIT_EN_0);
852         } else
853                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
854                          HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
855                          HC_CONFIG_0_REG_INT_LINE_EN_0 |
856                          HC_CONFIG_0_REG_ATTN_BIT_EN_0);
857
858         DP(NETIF_MSG_IFDOWN,
859            "write %x to HC %d (addr 0x%x)\n",
860            val, port, addr);
861
862         /* flush all outstanding writes */
863         mmiowb();
864
865         REG_WR(bp, addr, val);
866         if (REG_RD(bp, addr) != val)
867                 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
868 }
869
870 static void bnx2x_igu_int_disable(struct bnx2x *bp)
871 {
872         u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
873
874         val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
875                  IGU_PF_CONF_INT_LINE_EN |
876                  IGU_PF_CONF_ATTN_BIT_EN);
877
878         DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
879
880         /* flush all outstanding writes */
881         mmiowb();
882
883         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
884         if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
885                 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
886 }
887
888 static void bnx2x_int_disable(struct bnx2x *bp)
889 {
890         if (bp->common.int_block == INT_BLOCK_HC)
891                 bnx2x_hc_int_disable(bp);
892         else
893                 bnx2x_igu_int_disable(bp);
894 }
895
896 void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
897 {
898         int i;
899         u16 j;
900         struct hc_sp_status_block_data sp_sb_data;
901         int func = BP_FUNC(bp);
902 #ifdef BNX2X_STOP_ON_ERROR
903         u16 start = 0, end = 0;
904         u8 cos;
905 #endif
906         if (IS_PF(bp) && disable_int)
907                 bnx2x_int_disable(bp);
908
909         bp->stats_state = STATS_STATE_DISABLED;
910         bp->eth_stats.unrecoverable_error++;
911         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
912
913         BNX2X_ERR("begin crash dump -----------------\n");
914
915         /* Indices */
916         /* Common */
917         if (IS_PF(bp)) {
918                 struct host_sp_status_block *def_sb = bp->def_status_blk;
919                 int data_size, cstorm_offset;
920
921                 BNX2X_ERR("def_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)  spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
922                           bp->def_idx, bp->def_att_idx, bp->attn_state,
923                           bp->spq_prod_idx, bp->stats_counter);
924                 BNX2X_ERR("DSB: attn bits(0x%x)  ack(0x%x)  id(0x%x)  idx(0x%x)\n",
925                           def_sb->atten_status_block.attn_bits,
926                           def_sb->atten_status_block.attn_bits_ack,
927                           def_sb->atten_status_block.status_block_id,
928                           def_sb->atten_status_block.attn_bits_index);
929                 BNX2X_ERR("     def (");
930                 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
931                         pr_cont("0x%x%s",
932                                 def_sb->sp_sb.index_values[i],
933                                 (i == HC_SP_SB_MAX_INDICES - 1) ? ")  " : " ");
934
935                 data_size = sizeof(struct hc_sp_status_block_data) /
936                             sizeof(u32);
937                 cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func);
938                 for (i = 0; i < data_size; i++)
939                         *((u32 *)&sp_sb_data + i) =
940                                 REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset +
941                                            i * sizeof(u32));
942
943                 pr_cont("igu_sb_id(0x%x)  igu_seg_id(0x%x) pf_id(0x%x)  vnic_id(0x%x)  vf_id(0x%x)  vf_valid (0x%x) state(0x%x)\n",
944                         sp_sb_data.igu_sb_id,
945                         sp_sb_data.igu_seg_id,
946                         sp_sb_data.p_func.pf_id,
947                         sp_sb_data.p_func.vnic_id,
948                         sp_sb_data.p_func.vf_id,
949                         sp_sb_data.p_func.vf_valid,
950                         sp_sb_data.state);
951         }
952
953         for_each_eth_queue(bp, i) {
954                 struct bnx2x_fastpath *fp = &bp->fp[i];
955                 int loop;
956                 struct hc_status_block_data_e2 sb_data_e2;
957                 struct hc_status_block_data_e1x sb_data_e1x;
958                 struct hc_status_block_sm  *hc_sm_p =
959                         CHIP_IS_E1x(bp) ?
960                         sb_data_e1x.common.state_machine :
961                         sb_data_e2.common.state_machine;
962                 struct hc_index_data *hc_index_p =
963                         CHIP_IS_E1x(bp) ?
964                         sb_data_e1x.index_data :
965                         sb_data_e2.index_data;
966                 u8 data_size, cos;
967                 u32 *sb_data_p;
968                 struct bnx2x_fp_txdata txdata;
969
970                 if (!bp->fp)
971                         break;
972
973                 if (!fp->rx_cons_sb)
974                         continue;
975
976                 /* Rx */
977                 BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)  rx_comp_prod(0x%x)  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
978                           i, fp->rx_bd_prod, fp->rx_bd_cons,
979                           fp->rx_comp_prod,
980                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
981                 BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)  fp_hc_idx(0x%x)\n",
982                           fp->rx_sge_prod, fp->last_max_sge,
983                           le16_to_cpu(fp->fp_hc_idx));
984
985                 /* Tx */
986                 for_each_cos_in_tx_queue(fp, cos)
987                 {
988                         if (!fp->txdata_ptr[cos])
989                                 break;
990
991                         txdata = *fp->txdata_ptr[cos];
992
993                         if (!txdata.tx_cons_sb)
994                                 continue;
995
996                         BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)  *tx_cons_sb(0x%x)\n",
997                                   i, txdata.tx_pkt_prod,
998                                   txdata.tx_pkt_cons, txdata.tx_bd_prod,
999                                   txdata.tx_bd_cons,
1000                                   le16_to_cpu(*txdata.tx_cons_sb));
1001                 }
1002
1003                 loop = CHIP_IS_E1x(bp) ?
1004                         HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
1005
1006                 /* host sb data */
1007
1008                 if (IS_FCOE_FP(fp))
1009                         continue;
1010
1011                 BNX2X_ERR("     run indexes (");
1012                 for (j = 0; j < HC_SB_MAX_SM; j++)
1013                         pr_cont("0x%x%s",
1014                                fp->sb_running_index[j],
1015                                (j == HC_SB_MAX_SM - 1) ? ")" : " ");
1016
1017                 BNX2X_ERR("     indexes (");
1018                 for (j = 0; j < loop; j++)
1019                         pr_cont("0x%x%s",
1020                                fp->sb_index_values[j],
1021                                (j == loop - 1) ? ")" : " ");
1022
1023                 /* VF cannot access FW refelection for status block */
1024                 if (IS_VF(bp))
1025                         continue;
1026
1027                 /* fw sb data */
1028                 data_size = CHIP_IS_E1x(bp) ?
1029                         sizeof(struct hc_status_block_data_e1x) :
1030                         sizeof(struct hc_status_block_data_e2);
1031                 data_size /= sizeof(u32);
1032                 sb_data_p = CHIP_IS_E1x(bp) ?
1033                         (u32 *)&sb_data_e1x :
1034                         (u32 *)&sb_data_e2;
1035                 /* copy sb data in here */
1036                 for (j = 0; j < data_size; j++)
1037                         *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
1038                                 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
1039                                 j * sizeof(u32));
1040
1041                 if (!CHIP_IS_E1x(bp)) {
1042                         pr_cont("pf_id(0x%x)  vf_id(0x%x)  vf_valid(0x%x) vnic_id(0x%x)  same_igu_sb_1b(0x%x) state(0x%x)\n",
1043                                 sb_data_e2.common.p_func.pf_id,
1044                                 sb_data_e2.common.p_func.vf_id,
1045                                 sb_data_e2.common.p_func.vf_valid,
1046                                 sb_data_e2.common.p_func.vnic_id,
1047                                 sb_data_e2.common.same_igu_sb_1b,
1048                                 sb_data_e2.common.state);
1049                 } else {
1050                         pr_cont("pf_id(0x%x)  vf_id(0x%x)  vf_valid(0x%x) vnic_id(0x%x)  same_igu_sb_1b(0x%x) state(0x%x)\n",
1051                                 sb_data_e1x.common.p_func.pf_id,
1052                                 sb_data_e1x.common.p_func.vf_id,
1053                                 sb_data_e1x.common.p_func.vf_valid,
1054                                 sb_data_e1x.common.p_func.vnic_id,
1055                                 sb_data_e1x.common.same_igu_sb_1b,
1056                                 sb_data_e1x.common.state);
1057                 }
1058
1059                 /* SB_SMs data */
1060                 for (j = 0; j < HC_SB_MAX_SM; j++) {
1061                         pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x)  igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n",
1062                                 j, hc_sm_p[j].__flags,
1063                                 hc_sm_p[j].igu_sb_id,
1064                                 hc_sm_p[j].igu_seg_id,
1065                                 hc_sm_p[j].time_to_expire,
1066                                 hc_sm_p[j].timer_value);
1067                 }
1068
1069                 /* Indices data */
1070                 for (j = 0; j < loop; j++) {
1071                         pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j,
1072                                hc_index_p[j].flags,
1073                                hc_index_p[j].timeout);
1074                 }
1075         }
1076
1077 #ifdef BNX2X_STOP_ON_ERROR
1078         if (IS_PF(bp)) {
1079                 /* event queue */
1080                 BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
1081                 for (i = 0; i < NUM_EQ_DESC; i++) {
1082                         u32 *data = (u32 *)&bp->eq_ring[i].message.data;
1083
1084                         BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
1085                                   i, bp->eq_ring[i].message.opcode,
1086                                   bp->eq_ring[i].message.error);
1087                         BNX2X_ERR("data: %x %x %x\n",
1088                                   data[0], data[1], data[2]);
1089                 }
1090         }
1091
1092         /* Rings */
1093         /* Rx */
1094         for_each_valid_rx_queue(bp, i) {
1095                 struct bnx2x_fastpath *fp = &bp->fp[i];
1096
1097                 if (!bp->fp)
1098                         break;
1099
1100                 if (!fp->rx_cons_sb)
1101                         continue;
1102
1103                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1104                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
1105                 for (j = start; j != end; j = RX_BD(j + 1)) {
1106                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1107                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1108
1109                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
1110                                   i, j, rx_bd[1], rx_bd[0], sw_bd->data);
1111                 }
1112
1113                 start = RX_SGE(fp->rx_sge_prod);
1114                 end = RX_SGE(fp->last_max_sge);
1115                 for (j = start; j != end; j = RX_SGE(j + 1)) {
1116                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1117                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1118
1119                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
1120                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
1121                 }
1122
1123                 start = RCQ_BD(fp->rx_comp_cons - 10);
1124                 end = RCQ_BD(fp->rx_comp_cons + 503);
1125                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
1126                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1127
1128                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1129                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
1130                 }
1131         }
1132
1133         /* Tx */
1134         for_each_valid_tx_queue(bp, i) {
1135                 struct bnx2x_fastpath *fp = &bp->fp[i];
1136
1137                 if (!bp->fp)
1138                         break;
1139
1140                 for_each_cos_in_tx_queue(fp, cos) {
1141                         struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1142
1143                         if (!fp->txdata_ptr[cos])
1144                                 break;
1145
1146                         if (!txdata->tx_cons_sb)
1147                                 continue;
1148
1149                         start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
1150                         end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
1151                         for (j = start; j != end; j = TX_BD(j + 1)) {
1152                                 struct sw_tx_bd *sw_bd =
1153                                         &txdata->tx_buf_ring[j];
1154
1155                                 BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n",
1156                                           i, cos, j, sw_bd->skb,
1157                                           sw_bd->first_bd);
1158                         }
1159
1160                         start = TX_BD(txdata->tx_bd_cons - 10);
1161                         end = TX_BD(txdata->tx_bd_cons + 254);
1162                         for (j = start; j != end; j = TX_BD(j + 1)) {
1163                                 u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
1164
1165                                 BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n",
1166                                           i, cos, j, tx_bd[0], tx_bd[1],
1167                                           tx_bd[2], tx_bd[3]);
1168                         }
1169                 }
1170         }
1171 #endif
1172         if (IS_PF(bp)) {
1173                 bnx2x_fw_dump(bp);
1174                 bnx2x_mc_assert(bp);
1175         }
1176         BNX2X_ERR("end crash dump -----------------\n");
1177 }
1178
1179 /*
1180  * FLR Support for E2
1181  *
1182  * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW
1183  * initialization.
1184  */
1185 #define FLR_WAIT_USEC           10000   /* 10 milliseconds */
1186 #define FLR_WAIT_INTERVAL       50      /* usec */
1187 #define FLR_POLL_CNT            (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */
1188
1189 struct pbf_pN_buf_regs {
1190         int pN;
1191         u32 init_crd;
1192         u32 crd;
1193         u32 crd_freed;
1194 };
1195
1196 struct pbf_pN_cmd_regs {
1197         int pN;
1198         u32 lines_occup;
1199         u32 lines_freed;
1200 };
1201
1202 static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
1203                                      struct pbf_pN_buf_regs *regs,
1204                                      u32 poll_count)
1205 {
1206         u32 init_crd, crd, crd_start, crd_freed, crd_freed_start;
1207         u32 cur_cnt = poll_count;
1208
1209         crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
1210         crd = crd_start = REG_RD(bp, regs->crd);
1211         init_crd = REG_RD(bp, regs->init_crd);
1212
1213         DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
1214         DP(BNX2X_MSG_SP, "CREDIT[%d]      : s:%x\n", regs->pN, crd);
1215         DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
1216
1217         while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
1218                (init_crd - crd_start))) {
1219                 if (cur_cnt--) {
1220                         udelay(FLR_WAIT_INTERVAL);
1221                         crd = REG_RD(bp, regs->crd);
1222                         crd_freed = REG_RD(bp, regs->crd_freed);
1223                 } else {
1224                         DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n",
1225                            regs->pN);
1226                         DP(BNX2X_MSG_SP, "CREDIT[%d]      : c:%x\n",
1227                            regs->pN, crd);
1228                         DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n",
1229                            regs->pN, crd_freed);
1230                         break;
1231                 }
1232         }
1233         DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
1234            poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1235 }
1236
1237 static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
1238                                      struct pbf_pN_cmd_regs *regs,
1239                                      u32 poll_count)
1240 {
1241         u32 occup, to_free, freed, freed_start;
1242         u32 cur_cnt = poll_count;
1243
1244         occup = to_free = REG_RD(bp, regs->lines_occup);
1245         freed = freed_start = REG_RD(bp, regs->lines_freed);
1246
1247         DP(BNX2X_MSG_SP, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
1248         DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
1249
1250         while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
1251                 if (cur_cnt--) {
1252                         udelay(FLR_WAIT_INTERVAL);
1253                         occup = REG_RD(bp, regs->lines_occup);
1254                         freed = REG_RD(bp, regs->lines_freed);
1255                 } else {
1256                         DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n",
1257                            regs->pN);
1258                         DP(BNX2X_MSG_SP, "OCCUPANCY[%d]   : s:%x\n",
1259                            regs->pN, occup);
1260                         DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n",
1261                            regs->pN, freed);
1262                         break;
1263                 }
1264         }
1265         DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
1266            poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1267 }
1268
1269 static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
1270                                     u32 expected, u32 poll_count)
1271 {
1272         u32 cur_cnt = poll_count;
1273         u32 val;
1274
1275         while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
1276                 udelay(FLR_WAIT_INTERVAL);
1277
1278         return val;
1279 }
1280
1281 int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
1282                                     char *msg, u32 poll_cnt)
1283 {
1284         u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
1285         if (val != 0) {
1286                 BNX2X_ERR("%s usage count=%d\n", msg, val);
1287                 return 1;
1288         }
1289         return 0;
1290 }
1291
1292 /* Common routines with VF FLR cleanup */
1293 u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
1294 {
1295         /* adjust polling timeout */
1296         if (CHIP_REV_IS_EMUL(bp))
1297                 return FLR_POLL_CNT * 2000;
1298
1299         if (CHIP_REV_IS_FPGA(bp))
1300                 return FLR_POLL_CNT * 120;
1301
1302         return FLR_POLL_CNT;
1303 }
1304
1305 void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
1306 {
1307         struct pbf_pN_cmd_regs cmd_regs[] = {
1308                 {0, (CHIP_IS_E3B0(bp)) ?
1309                         PBF_REG_TQ_OCCUPANCY_Q0 :
1310                         PBF_REG_P0_TQ_OCCUPANCY,
1311                     (CHIP_IS_E3B0(bp)) ?
1312                         PBF_REG_TQ_LINES_FREED_CNT_Q0 :
1313                         PBF_REG_P0_TQ_LINES_FREED_CNT},
1314                 {1, (CHIP_IS_E3B0(bp)) ?
1315                         PBF_REG_TQ_OCCUPANCY_Q1 :
1316                         PBF_REG_P1_TQ_OCCUPANCY,
1317                     (CHIP_IS_E3B0(bp)) ?
1318                         PBF_REG_TQ_LINES_FREED_CNT_Q1 :
1319                         PBF_REG_P1_TQ_LINES_FREED_CNT},
1320                 {4, (CHIP_IS_E3B0(bp)) ?
1321                         PBF_REG_TQ_OCCUPANCY_LB_Q :
1322                         PBF_REG_P4_TQ_OCCUPANCY,
1323                     (CHIP_IS_E3B0(bp)) ?
1324                         PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
1325                         PBF_REG_P4_TQ_LINES_FREED_CNT}
1326         };
1327
1328         struct pbf_pN_buf_regs buf_regs[] = {
1329                 {0, (CHIP_IS_E3B0(bp)) ?
1330                         PBF_REG_INIT_CRD_Q0 :
1331                         PBF_REG_P0_INIT_CRD ,
1332                     (CHIP_IS_E3B0(bp)) ?
1333                         PBF_REG_CREDIT_Q0 :
1334                         PBF_REG_P0_CREDIT,
1335                     (CHIP_IS_E3B0(bp)) ?
1336                         PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
1337                         PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
1338                 {1, (CHIP_IS_E3B0(bp)) ?
1339                         PBF_REG_INIT_CRD_Q1 :
1340                         PBF_REG_P1_INIT_CRD,
1341                     (CHIP_IS_E3B0(bp)) ?
1342                         PBF_REG_CREDIT_Q1 :
1343                         PBF_REG_P1_CREDIT,
1344                     (CHIP_IS_E3B0(bp)) ?
1345                         PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
1346                         PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
1347                 {4, (CHIP_IS_E3B0(bp)) ?
1348                         PBF_REG_INIT_CRD_LB_Q :
1349                         PBF_REG_P4_INIT_CRD,
1350                     (CHIP_IS_E3B0(bp)) ?
1351                         PBF_REG_CREDIT_LB_Q :
1352                         PBF_REG_P4_CREDIT,
1353                     (CHIP_IS_E3B0(bp)) ?
1354                         PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
1355                         PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
1356         };
1357
1358         int i;
1359
1360         /* Verify the command queues are flushed P0, P1, P4 */
1361         for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
1362                 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
1363
1364         /* Verify the transmission buffers are flushed P0, P1, P4 */
1365         for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
1366                 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
1367 }
1368
1369 #define OP_GEN_PARAM(param) \
1370         (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
1371
1372 #define OP_GEN_TYPE(type) \
1373         (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
1374
1375 #define OP_GEN_AGG_VECT(index) \
1376         (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
1377
1378 int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
1379 {
1380         u32 op_gen_command = 0;
1381         u32 comp_addr = BAR_CSTRORM_INTMEM +
1382                         CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
1383         int ret = 0;
1384
1385         if (REG_RD(bp, comp_addr)) {
1386                 BNX2X_ERR("Cleanup complete was not 0 before sending\n");
1387                 return 1;
1388         }
1389
1390         op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
1391         op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
1392         op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
1393         op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
1394
1395         DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
1396         REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command);
1397
1398         if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
1399                 BNX2X_ERR("FW final cleanup did not succeed\n");
1400                 DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
1401                    (REG_RD(bp, comp_addr)));
1402                 bnx2x_panic();
1403                 return 1;
1404         }
1405         /* Zero completion for next FLR */
1406         REG_WR(bp, comp_addr, 0);
1407
1408         return ret;
1409 }
1410
1411 u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
1412 {
1413         u16 status;
1414
1415         pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
1416         return status & PCI_EXP_DEVSTA_TRPND;
1417 }
1418
1419 /* PF FLR specific routines
1420 */
1421 static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
1422 {
1423         /* wait for CFC PF usage-counter to zero (includes all the VFs) */
1424         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1425                         CFC_REG_NUM_LCIDS_INSIDE_PF,
1426                         "CFC PF usage counter timed out",
1427                         poll_cnt))
1428                 return 1;
1429
1430         /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
1431         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1432                         DORQ_REG_PF_USAGE_CNT,
1433                         "DQ PF usage counter timed out",
1434                         poll_cnt))
1435                 return 1;
1436
1437         /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
1438         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1439                         QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
1440                         "QM PF usage counter timed out",
1441                         poll_cnt))
1442                 return 1;
1443
1444         /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
1445         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1446                         TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
1447                         "Timers VNIC usage counter timed out",
1448                         poll_cnt))
1449                 return 1;
1450         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1451                         TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
1452                         "Timers NUM_SCANS usage counter timed out",
1453                         poll_cnt))
1454                 return 1;
1455
1456         /* Wait DMAE PF usage counter to zero */
1457         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1458                         dmae_reg_go_c[INIT_DMAE_C(bp)],
1459                         "DMAE command register timed out",
1460                         poll_cnt))
1461                 return 1;
1462
1463         return 0;
1464 }
1465
1466 static void bnx2x_hw_enable_status(struct bnx2x *bp)
1467 {
1468         u32 val;
1469
1470         val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
1471         DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
1472
1473         val = REG_RD(bp, PBF_REG_DISABLE_PF);
1474         DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val);
1475
1476         val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
1477         DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
1478
1479         val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
1480         DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
1481
1482         val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
1483         DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
1484
1485         val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
1486         DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
1487
1488         val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
1489         DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
1490
1491         val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
1492         DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
1493            val);
1494 }
1495
1496 static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
1497 {
1498         u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
1499
1500         DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));
1501
1502         /* Re-enable PF target read access */
1503         REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
1504
1505         /* Poll HW usage counters */
1506         DP(BNX2X_MSG_SP, "Polling usage counters\n");
1507         if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
1508                 return -EBUSY;
1509
1510         /* Zero the igu 'trailing edge' and 'leading edge' */
1511
1512         /* Send the FW cleanup command */
1513         if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt))
1514                 return -EBUSY;
1515
1516         /* ATC cleanup */
1517
1518         /* Verify TX hw is flushed */
1519         bnx2x_tx_hw_flushed(bp, poll_cnt);
1520
1521         /* Wait 100ms (not adjusted according to platform) */
1522         msleep(100);
1523
1524         /* Verify no pending pci transactions */
1525         if (bnx2x_is_pcie_pending(bp->pdev))
1526                 BNX2X_ERR("PCIE Transactions still pending\n");
1527
1528         /* Debug */
1529         bnx2x_hw_enable_status(bp);
1530
1531         /*
1532          * Master enable - Due to WB DMAE writes performed before this
1533          * register is re-initialized as part of the regular function init
1534          */
1535         REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
1536
1537         return 0;
1538 }
1539
1540 static void bnx2x_hc_int_enable(struct bnx2x *bp)
1541 {
1542         int port = BP_PORT(bp);
1543         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1544         u32 val = REG_RD(bp, addr);
1545         bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1546         bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1547         bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1548
1549         if (msix) {
1550                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1551                          HC_CONFIG_0_REG_INT_LINE_EN_0);
1552                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1553                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1554                 if (single_msix)
1555                         val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
1556         } else if (msi) {
1557                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1558                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1559                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1560                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1561         } else {
1562                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1563                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1564                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
1565                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1566
1567                 if (!CHIP_IS_E1(bp)) {
1568                         DP(NETIF_MSG_IFUP,
1569                            "write %x to HC %d (addr 0x%x)\n", val, port, addr);
1570
1571                         REG_WR(bp, addr, val);
1572
1573                         val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1574                 }
1575         }
1576
1577         if (CHIP_IS_E1(bp))
1578                 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1579
1580         DP(NETIF_MSG_IFUP,
1581            "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr,
1582            (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1583
1584         REG_WR(bp, addr, val);
1585         /*
1586          * Ensure that HC_CONFIG is written before leading/trailing edge config
1587          */
1588         mmiowb();
1589         barrier();
1590
1591         if (!CHIP_IS_E1(bp)) {
1592                 /* init leading/trailing edge */
1593                 if (IS_MF(bp)) {
1594                         val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1595                         if (bp->port.pmf)
1596                                 /* enable nig and gpio3 attention */
1597                                 val |= 0x1100;
1598                 } else
1599                         val = 0xffff;
1600
1601                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1602                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1603         }
1604
1605         /* Make sure that interrupts are indeed enabled from here on */
1606         mmiowb();
1607 }
1608
1609 static void bnx2x_igu_int_enable(struct bnx2x *bp)
1610 {
1611         u32 val;
1612         bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1613         bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1614         bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1615
1616         val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1617
1618         if (msix) {
1619                 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1620                          IGU_PF_CONF_SINGLE_ISR_EN);
1621                 val |= (IGU_PF_CONF_MSI_MSIX_EN |
1622                         IGU_PF_CONF_ATTN_BIT_EN);
1623
1624                 if (single_msix)
1625                         val |= IGU_PF_CONF_SINGLE_ISR_EN;
1626         } else if (msi) {
1627                 val &= ~IGU_PF_CONF_INT_LINE_EN;
1628                 val |= (IGU_PF_CONF_MSI_MSIX_EN |
1629                         IGU_PF_CONF_ATTN_BIT_EN |
1630                         IGU_PF_CONF_SINGLE_ISR_EN);
1631         } else {
1632                 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1633                 val |= (IGU_PF_CONF_INT_LINE_EN |
1634                         IGU_PF_CONF_ATTN_BIT_EN |
1635                         IGU_PF_CONF_SINGLE_ISR_EN);
1636         }
1637
1638         /* Clean previous status - need to configure igu prior to ack*/
1639         if ((!msix) || single_msix) {
1640                 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1641                 bnx2x_ack_int(bp);
1642         }
1643
1644         val |= IGU_PF_CONF_FUNC_EN;
1645
1646         DP(NETIF_MSG_IFUP, "write 0x%x to IGU  mode %s\n",
1647            val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1648
1649         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1650
1651         if (val & IGU_PF_CONF_INT_LINE_EN)
1652                 pci_intx(bp->pdev, true);
1653
1654         barrier();
1655
1656         /* init leading/trailing edge */
1657         if (IS_MF(bp)) {
1658                 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1659                 if (bp->port.pmf)
1660                         /* enable nig and gpio3 attention */
1661                         val |= 0x1100;
1662         } else
1663                 val = 0xffff;
1664
1665         REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1666         REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1667
1668         /* Make sure that interrupts are indeed enabled from here on */
1669         mmiowb();
1670 }
1671
1672 void bnx2x_int_enable(struct bnx2x *bp)
1673 {
1674         if (bp->common.int_block == INT_BLOCK_HC)
1675                 bnx2x_hc_int_enable(bp);
1676         else
1677                 bnx2x_igu_int_enable(bp);
1678 }
1679
1680 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1681 {
1682         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1683         int i, offset;
1684
1685         if (disable_hw)
1686                 /* prevent the HW from sending interrupts */
1687                 bnx2x_int_disable(bp);
1688
1689         /* make sure all ISRs are done */
1690         if (msix) {
1691                 synchronize_irq(bp->msix_table[0].vector);
1692                 offset = 1;
1693                 if (CNIC_SUPPORT(bp))
1694                         offset++;
1695                 for_each_eth_queue(bp, i)
1696                         synchronize_irq(bp->msix_table[offset++].vector);
1697         } else
1698                 synchronize_irq(bp->pdev->irq);
1699
1700         /* make sure sp_task is not running */
1701         cancel_delayed_work(&bp->sp_task);
1702         cancel_delayed_work(&bp->period_task);
1703         flush_workqueue(bnx2x_wq);
1704 }
1705
1706 /* fast path */
1707
1708 /*
1709  * General service functions
1710  */
1711
1712 /* Return true if succeeded to acquire the lock */
1713 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1714 {
1715         u32 lock_status;
1716         u32 resource_bit = (1 << resource);
1717         int func = BP_FUNC(bp);
1718         u32 hw_lock_control_reg;
1719
1720         DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1721            "Trying to take a lock on resource %d\n", resource);
1722
1723         /* Validating that the resource is within range */
1724         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1725                 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1726                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1727                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1728                 return false;
1729         }
1730
1731         if (func <= 5)
1732                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1733         else
1734                 hw_lock_control_reg =
1735                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1736
1737         /* Try to acquire the lock */
1738         REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1739         lock_status = REG_RD(bp, hw_lock_control_reg);
1740         if (lock_status & resource_bit)
1741                 return true;
1742
1743         DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1744            "Failed to get a lock on resource %d\n", resource);
1745         return false;
1746 }
1747
1748 /**
1749  * bnx2x_get_leader_lock_resource - get the recovery leader resource id
1750  *
1751  * @bp: driver handle
1752  *
1753  * Returns the recovery leader resource id according to the engine this function
1754  * belongs to. Currently only only 2 engines is supported.
1755  */
1756 static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
1757 {
1758         if (BP_PATH(bp))
1759                 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
1760         else
1761                 return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
1762 }
1763
1764 /**
1765  * bnx2x_trylock_leader_lock- try to acquire a leader lock.
1766  *
1767  * @bp: driver handle
1768  *
1769  * Tries to acquire a leader lock for current engine.
1770  */
1771 static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
1772 {
1773         return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1774 }
1775
1776 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
1777
1778 /* schedule the sp task and mark that interrupt occurred (runs from ISR) */
1779 static int bnx2x_schedule_sp_task(struct bnx2x *bp)
1780 {
1781         /* Set the interrupt occurred bit for the sp-task to recognize it
1782          * must ack the interrupt and transition according to the IGU
1783          * state machine.
1784          */
1785         atomic_set(&bp->interrupt_occurred, 1);
1786
1787         /* The sp_task must execute only after this bit
1788          * is set, otherwise we will get out of sync and miss all
1789          * further interrupts. Hence, the barrier.
1790          */
1791         smp_wmb();
1792
1793         /* schedule sp_task to workqueue */
1794         return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1795 }
1796
1797 void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1798 {
1799         struct bnx2x *bp = fp->bp;
1800         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1801         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1802         enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
1803         struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
1804
1805         DP(BNX2X_MSG_SP,
1806            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
1807            fp->index, cid, command, bp->state,
1808            rr_cqe->ramrod_cqe.ramrod_type);
1809
1810         /* If cid is within VF range, replace the slowpath object with the
1811          * one corresponding to this VF
1812          */
1813         if (cid >= BNX2X_FIRST_VF_CID  &&
1814             cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)
1815                 bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj);
1816
1817         switch (command) {
1818         case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
1819                 DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
1820                 drv_cmd = BNX2X_Q_CMD_UPDATE;
1821                 break;
1822
1823         case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
1824                 DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid);
1825                 drv_cmd = BNX2X_Q_CMD_SETUP;
1826                 break;
1827
1828         case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
1829                 DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
1830                 drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
1831                 break;
1832
1833         case (RAMROD_CMD_ID_ETH_HALT):
1834                 DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid);
1835                 drv_cmd = BNX2X_Q_CMD_HALT;
1836                 break;
1837
1838         case (RAMROD_CMD_ID_ETH_TERMINATE):
1839                 DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid);
1840                 drv_cmd = BNX2X_Q_CMD_TERMINATE;
1841                 break;
1842
1843         case (RAMROD_CMD_ID_ETH_EMPTY):
1844                 DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid);
1845                 drv_cmd = BNX2X_Q_CMD_EMPTY;
1846                 break;
1847
1848         case (RAMROD_CMD_ID_ETH_TPA_UPDATE):
1849                 DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid);
1850                 drv_cmd = BNX2X_Q_CMD_UPDATE_TPA;
1851                 break;
1852
1853         default:
1854                 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
1855                           command, fp->index);
1856                 return;
1857         }
1858
1859         if ((drv_cmd != BNX2X_Q_CMD_MAX) &&
1860             q_obj->complete_cmd(bp, q_obj, drv_cmd))
1861                 /* q_obj->complete_cmd() failure means that this was
1862                  * an unexpected completion.
1863                  *
1864                  * In this case we don't want to increase the bp->spq_left
1865                  * because apparently we haven't sent this command the first
1866                  * place.
1867                  */
1868 #ifdef BNX2X_STOP_ON_ERROR
1869                 bnx2x_panic();
1870 #else
1871                 return;
1872 #endif
1873
1874         smp_mb__before_atomic();
1875         atomic_inc(&bp->cq_spq_left);
1876         /* push the change in bp->spq_left and towards the memory */
1877         smp_mb__after_atomic();
1878
1879         DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
1880
1881         if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
1882             (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) {
1883                 /* if Q update ramrod is completed for last Q in AFEX vif set
1884                  * flow, then ACK MCP at the end
1885                  *
1886                  * mark pending ACK to MCP bit.
1887                  * prevent case that both bits are cleared.
1888                  * At the end of load/unload driver checks that
1889                  * sp_state is cleared, and this order prevents
1890                  * races
1891                  */
1892                 smp_mb__before_atomic();
1893                 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
1894                 wmb();
1895                 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
1896                 smp_mb__after_atomic();
1897
1898                 /* schedule the sp task as mcp ack is required */
1899                 bnx2x_schedule_sp_task(bp);
1900         }
1901
1902         return;
1903 }
1904
1905 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1906 {
1907         struct bnx2x *bp = netdev_priv(dev_instance);
1908         u16 status = bnx2x_ack_int(bp);
1909         u16 mask;
1910         int i;
1911         u8 cos;
1912
1913         /* Return here if interrupt is shared and it's not for us */
1914         if (unlikely(status == 0)) {
1915                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1916                 return IRQ_NONE;
1917         }
1918         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1919
1920 #ifdef BNX2X_STOP_ON_ERROR
1921         if (unlikely(bp->panic))
1922                 return IRQ_HANDLED;
1923 #endif
1924
1925         for_each_eth_queue(bp, i) {
1926                 struct bnx2x_fastpath *fp = &bp->fp[i];
1927
1928                 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
1929                 if (status & mask) {
1930                         /* Handle Rx or Tx according to SB id */
1931                         for_each_cos_in_tx_queue(fp, cos)
1932                                 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1933                         prefetch(&fp->sb_running_index[SM_RX_ID]);
1934                         napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1935                         status &= ~mask;
1936                 }
1937         }
1938
1939         if (CNIC_SUPPORT(bp)) {
1940                 mask = 0x2;
1941                 if (status & (mask | 0x1)) {
1942                         struct cnic_ops *c_ops = NULL;
1943
1944                         rcu_read_lock();
1945                         c_ops = rcu_dereference(bp->cnic_ops);
1946                         if (c_ops && (bp->cnic_eth_dev.drv_state &
1947                                       CNIC_DRV_STATE_HANDLES_IRQ))
1948                                 c_ops->cnic_handler(bp->cnic_data, NULL);
1949                         rcu_read_unlock();
1950
1951                         status &= ~mask;
1952                 }
1953         }
1954
1955         if (unlikely(status & 0x1)) {
1956
1957                 /* schedule sp task to perform default status block work, ack
1958                  * attentions and enable interrupts.
1959                  */
1960                 bnx2x_schedule_sp_task(bp);
1961
1962                 status &= ~0x1;
1963                 if (!status)
1964                         return IRQ_HANDLED;
1965         }
1966
1967         if (unlikely(status))
1968                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1969                    status);
1970
1971         return IRQ_HANDLED;
1972 }
1973
1974 /* Link */
1975
1976 /*
1977  * General service functions
1978  */
1979
1980 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1981 {
1982         u32 lock_status;
1983         u32 resource_bit = (1 << resource);
1984         int func = BP_FUNC(bp);
1985         u32 hw_lock_control_reg;
1986         int cnt;
1987
1988         /* Validating that the resource is within range */
1989         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1990                 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1991                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1992                 return -EINVAL;
1993         }
1994
1995         if (func <= 5) {
1996                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1997         } else {
1998                 hw_lock_control_reg =
1999                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
2000         }
2001
2002         /* Validating that the resource is not already taken */
2003         lock_status = REG_RD(bp, hw_lock_control_reg);
2004         if (lock_status & resource_bit) {
2005                 BNX2X_ERR("lock_status 0x%x  resource_bit 0x%x\n",
2006                    lock_status, resource_bit);
2007                 return -EEXIST;
2008         }
2009
2010         /* Try for 5 second every 5ms */
2011         for (cnt = 0; cnt < 1000; cnt++) {
2012                 /* Try to acquire the lock */
2013                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
2014                 lock_status = REG_RD(bp, hw_lock_control_reg);
2015                 if (lock_status & resource_bit)
2016                         return 0;
2017
2018                 usleep_range(5000, 10000);
2019         }
2020         BNX2X_ERR("Timeout\n");
2021         return -EAGAIN;
2022 }
2023
2024 int bnx2x_release_leader_lock(struct bnx2x *bp)
2025 {
2026         return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
2027 }
2028
2029 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
2030 {
2031         u32 lock_status;
2032         u32 resource_bit = (1 << resource);
2033         int func = BP_FUNC(bp);
2034         u32 hw_lock_control_reg;
2035
2036         /* Validating that the resource is within range */
2037         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
2038                 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
2039                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
2040                 return -EINVAL;
2041         }
2042
2043         if (func <= 5) {
2044                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
2045         } else {
2046                 hw_lock_control_reg =
2047                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
2048         }
2049
2050         /* Validating that the resource is currently taken */
2051         lock_status = REG_RD(bp, hw_lock_control_reg);
2052         if (!(lock_status & resource_bit)) {
2053                 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n",
2054                           lock_status, resource_bit);
2055                 return -EFAULT;
2056         }
2057
2058         REG_WR(bp, hw_lock_control_reg, resource_bit);
2059         return 0;
2060 }
2061
2062 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
2063 {
2064         /* The GPIO should be swapped if swap register is set and active */
2065         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2066                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2067         int gpio_shift = gpio_num +
2068                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2069         u32 gpio_mask = (1 << gpio_shift);
2070         u32 gpio_reg;
2071         int value;
2072
2073         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2074                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2075                 return -EINVAL;
2076         }
2077
2078         /* read GPIO value */
2079         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2080
2081         /* get the requested pin value */
2082         if ((gpio_reg & gpio_mask) == gpio_mask)
2083                 value = 1;
2084         else
2085                 value = 0;
2086
2087         return value;
2088 }
2089
2090 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2091 {
2092         /* The GPIO should be swapped if swap register is set and active */
2093         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2094                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2095         int gpio_shift = gpio_num +
2096                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2097         u32 gpio_mask = (1 << gpio_shift);
2098         u32 gpio_reg;
2099
2100         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2101                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2102                 return -EINVAL;
2103         }
2104
2105         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2106         /* read GPIO and mask except the float bits */
2107         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2108
2109         switch (mode) {
2110         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2111                 DP(NETIF_MSG_LINK,
2112                    "Set GPIO %d (shift %d) -> output low\n",
2113                    gpio_num, gpio_shift);
2114                 /* clear FLOAT and set CLR */
2115                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2116                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2117                 break;
2118
2119         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2120                 DP(NETIF_MSG_LINK,
2121                    "Set GPIO %d (shift %d) -> output high\n",
2122                    gpio_num, gpio_shift);
2123                 /* clear FLOAT and set SET */
2124                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2125                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2126                 break;
2127
2128         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2129                 DP(NETIF_MSG_LINK,
2130                    "Set GPIO %d (shift %d) -> input\n",
2131                    gpio_num, gpio_shift);
2132                 /* set FLOAT */
2133                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2134                 break;
2135
2136         default:
2137                 break;
2138         }
2139
2140         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2141         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2142
2143         return 0;
2144 }
2145
2146 int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode)
2147 {
2148         u32 gpio_reg = 0;
2149         int rc = 0;
2150
2151         /* Any port swapping should be handled by caller. */
2152
2153         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2154         /* read GPIO and mask except the float bits */
2155         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2156         gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2157         gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2158         gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2159
2160         switch (mode) {
2161         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2162                 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins);
2163                 /* set CLR */
2164                 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2165                 break;
2166
2167         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2168                 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins);
2169                 /* set SET */
2170                 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2171                 break;
2172
2173         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2174                 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins);
2175                 /* set FLOAT */
2176                 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2177                 break;
2178
2179         default:
2180                 BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode);
2181                 rc = -EINVAL;
2182                 break;
2183         }
2184
2185         if (rc == 0)
2186                 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2187
2188         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2189
2190         return rc;
2191 }
2192
2193 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2194 {
2195         /* The GPIO should be swapped if swap register is set and active */
2196         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2197                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2198         int gpio_shift = gpio_num +
2199                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2200         u32 gpio_mask = (1 << gpio_shift);
2201         u32 gpio_reg;
2202
2203         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2204                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2205                 return -EINVAL;
2206         }
2207
2208         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2209         /* read GPIO int */
2210         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2211
2212         switch (mode) {
2213         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2214                 DP(NETIF_MSG_LINK,
2215                    "Clear GPIO INT %d (shift %d) -> output low\n",
2216                    gpio_num, gpio_shift);
2217                 /* clear SET and set CLR */
2218                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2219                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2220                 break;
2221
2222         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2223                 DP(NETIF_MSG_LINK,
2224                    "Set GPIO INT %d (shift %d) -> output high\n",
2225                    gpio_num, gpio_shift);
2226                 /* clear CLR and set SET */
2227                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2228                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2229                 break;
2230
2231         default:
2232                 break;
2233         }
2234
2235         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2236         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2237
2238         return 0;
2239 }
2240
2241 static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode)
2242 {
2243         u32 spio_reg;
2244
2245         /* Only 2 SPIOs are configurable */
2246         if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
2247                 BNX2X_ERR("Invalid SPIO 0x%x\n", spio);
2248                 return -EINVAL;
2249         }
2250
2251         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2252         /* read SPIO and mask except the float bits */
2253         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
2254
2255         switch (mode) {
2256         case MISC_SPIO_OUTPUT_LOW:
2257                 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio);
2258                 /* clear FLOAT and set CLR */
2259                 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2260                 spio_reg |=  (spio << MISC_SPIO_CLR_POS);
2261                 break;
2262
2263         case MISC_SPIO_OUTPUT_HIGH:
2264                 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio);
2265                 /* clear FLOAT and set SET */
2266                 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2267                 spio_reg |=  (spio << MISC_SPIO_SET_POS);
2268                 break;
2269
2270         case MISC_SPIO_INPUT_HI_Z:
2271                 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio);
2272                 /* set FLOAT */
2273                 spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
2274                 break;
2275
2276         default:
2277                 break;
2278         }
2279
2280         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2281         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2282
2283         return 0;
2284 }
2285
2286 void bnx2x_calc_fc_adv(struct bnx2x *bp)
2287 {
2288         u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
2289
2290         bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
2291                                            ADVERTISED_Pause);
2292         switch (bp->link_vars.ieee_fc &
2293                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2294         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2295                 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
2296                                                   ADVERTISED_Pause);
2297                 break;
2298
2299         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2300                 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
2301                 break;
2302
2303         default:
2304                 break;
2305         }
2306 }
2307
2308 static void bnx2x_set_requested_fc(struct bnx2x *bp)
2309 {
2310         /* Initialize link parameters structure variables
2311          * It is recommended to turn off RX FC for jumbo frames
2312          *  for better performance
2313          */
2314         if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
2315                 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2316         else
2317                 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2318 }
2319
2320 static void bnx2x_init_dropless_fc(struct bnx2x *bp)
2321 {
2322         u32 pause_enabled = 0;
2323
2324         if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) {
2325                 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2326                         pause_enabled = 1;
2327
2328                 REG_WR(bp, BAR_USTRORM_INTMEM +
2329                            USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)),
2330                        pause_enabled);
2331         }
2332
2333         DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n",
2334            pause_enabled ? "enabled" : "disabled");
2335 }
2336
2337 int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2338 {
2339         int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
2340         u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
2341
2342         if (!BP_NOMCP(bp)) {
2343                 bnx2x_set_requested_fc(bp);
2344                 bnx2x_acquire_phy_lock(bp);
2345
2346                 if (load_mode == LOAD_DIAG) {
2347                         struct link_params *lp = &bp->link_params;
2348                         lp->loopback_mode = LOOPBACK_XGXS;
2349                         /* Prefer doing PHY loopback at highest speed */
2350                         if (lp->req_line_speed[cfx_idx] < SPEED_20000) {
2351                                 if (lp->speed_cap_mask[cfx_idx] &
2352                                     PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
2353                                         lp->req_line_speed[cfx_idx] =
2354                                         SPEED_20000;
2355                                 else if (lp->speed_cap_mask[cfx_idx] &
2356                                             PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
2357                                                 lp->req_line_speed[cfx_idx] =
2358                                                 SPEED_10000;
2359                                 else
2360                                         lp->req_line_speed[cfx_idx] =
2361                                         SPEED_1000;
2362                         }
2363                 }
2364
2365                 if (load_mode == LOAD_LOOPBACK_EXT) {
2366                         struct link_params *lp = &bp->link_params;
2367                         lp->loopback_mode = LOOPBACK_EXT;
2368                 }
2369
2370                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2371
2372                 bnx2x_release_phy_lock(bp);
2373
2374                 bnx2x_init_dropless_fc(bp);
2375
2376                 bnx2x_calc_fc_adv(bp);
2377
2378                 if (bp->link_vars.link_up) {
2379                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2380                         bnx2x_link_report(bp);
2381                 }
2382                 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2383                 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
2384                 return rc;
2385         }
2386         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2387         return -EINVAL;
2388 }
2389
2390 void bnx2x_link_set(struct bnx2x *bp)
2391 {
2392         if (!BP_NOMCP(bp)) {
2393                 bnx2x_acquire_phy_lock(bp);
2394                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2395                 bnx2x_release_phy_lock(bp);
2396
2397                 bnx2x_init_dropless_fc(bp);
2398
2399                 bnx2x_calc_fc_adv(bp);
2400         } else
2401                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2402 }
2403
2404 static void bnx2x__link_reset(struct bnx2x *bp)
2405 {
2406         if (!BP_NOMCP(bp)) {
2407                 bnx2x_acquire_phy_lock(bp);
2408                 bnx2x_lfa_reset(&bp->link_params, &bp->link_vars);
2409                 bnx2x_release_phy_lock(bp);
2410         } else
2411                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2412 }
2413
2414 void bnx2x_force_link_reset(struct bnx2x *bp)
2415 {
2416         bnx2x_acquire_phy_lock(bp);
2417         bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2418         bnx2x_release_phy_lock(bp);
2419 }
2420
2421 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
2422 {
2423         u8 rc = 0;
2424
2425         if (!BP_NOMCP(bp)) {
2426                 bnx2x_acquire_phy_lock(bp);
2427                 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
2428                                      is_serdes);
2429                 bnx2x_release_phy_lock(bp);
2430         } else
2431                 BNX2X_ERR("Bootcode is missing - can not test link\n");
2432
2433         return rc;
2434 }
2435
2436 /* Calculates the sum of vn_min_rates.
2437    It's needed for further normalizing of the min_rates.
2438    Returns:
2439      sum of vn_min_rates.
2440        or
2441      0 - if all the min_rates are 0.
2442      In the later case fairness algorithm should be deactivated.
2443      If not all min_rates are zero then those that are zeroes will be set to 1.
2444  */
2445 static void bnx2x_calc_vn_min(struct bnx2x *bp,
2446                                       struct cmng_init_input *input)
2447 {
2448         int all_zero = 1;
2449         int vn;
2450
2451         for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2452                 u32 vn_cfg = bp->mf_config[vn];
2453                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2454                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2455
2456                 /* Skip hidden vns */
2457                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2458                         vn_min_rate = 0;
2459                 /* If min rate is zero - set it to 1 */
2460                 else if (!vn_min_rate)
2461                         vn_min_rate = DEF_MIN_RATE;
2462                 else
2463                         all_zero = 0;
2464
2465                 input->vnic_min_rate[vn] = vn_min_rate;
2466         }
2467
2468         /* if ETS or all min rates are zeros - disable fairness */
2469         if (BNX2X_IS_ETS_ENABLED(bp)) {
2470                 input->flags.cmng_enables &=
2471                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2472                 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
2473         } else if (all_zero) {
2474                 input->flags.cmng_enables &=
2475                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2476                 DP(NETIF_MSG_IFUP,
2477                    "All MIN values are zeroes fairness will be disabled\n");
2478         } else
2479                 input->flags.cmng_enables |=
2480                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2481 }
2482
2483 static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
2484                                     struct cmng_init_input *input)
2485 {
2486         u16 vn_max_rate;
2487         u32 vn_cfg = bp->mf_config[vn];
2488
2489         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2490                 vn_max_rate = 0;
2491         else {
2492                 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
2493
2494                 if (IS_MF_PERCENT_BW(bp)) {
2495                         /* maxCfg in percents of linkspeed */
2496                         vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
2497                 } else /* SD modes */
2498                         /* maxCfg is absolute in 100Mb units */
2499                         vn_max_rate = maxCfg * 100;
2500         }
2501
2502         DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
2503
2504         input->vnic_max_rate[vn] = vn_max_rate;
2505 }
2506
2507 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2508 {
2509         if (CHIP_REV_IS_SLOW(bp))
2510                 return CMNG_FNS_NONE;
2511         if (IS_MF(bp))
2512                 return CMNG_FNS_MINMAX;
2513
2514         return CMNG_FNS_NONE;
2515 }
2516
2517 void bnx2x_read_mf_cfg(struct bnx2x *bp)
2518 {
2519         int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
2520
2521         if (BP_NOMCP(bp))
2522                 return; /* what should be the default value in this case */
2523
2524         /* For 2 port configuration the absolute function number formula
2525          * is:
2526          *      abs_func = 2 * vn + BP_PORT + BP_PATH
2527          *
2528          *      and there are 4 functions per port
2529          *
2530          * For 4 port configuration it is
2531          *      abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2532          *
2533          *      and there are 2 functions per port
2534          */
2535         for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2536                 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2537
2538                 if (func >= E1H_FUNC_MAX)
2539                         break;
2540
2541                 bp->mf_config[vn] =
2542                         MF_CFG_RD(bp, func_mf_config[func].config);
2543         }
2544         if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2545                 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
2546                 bp->flags |= MF_FUNC_DIS;
2547         } else {
2548                 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2549                 bp->flags &= ~MF_FUNC_DIS;
2550         }
2551 }
2552
2553 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2554 {
2555         struct cmng_init_input input;
2556         memset(&input, 0, sizeof(struct cmng_init_input));
2557
2558         input.port_rate = bp->link_vars.line_speed;
2559
2560         if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) {
2561                 int vn;
2562
2563                 /* read mf conf from shmem */
2564                 if (read_cfg)
2565                         bnx2x_read_mf_cfg(bp);
2566
2567                 /* vn_weight_sum and enable fairness if not 0 */
2568                 bnx2x_calc_vn_min(bp, &input);
2569
2570                 /* calculate and set min-max rate for each vn */
2571                 if (bp->port.pmf)
2572                         for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
2573                                 bnx2x_calc_vn_max(bp, vn, &input);
2574
2575                 /* always enable rate shaping and fairness */
2576                 input.flags.cmng_enables |=
2577                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2578
2579                 bnx2x_init_cmng(&input, &bp->cmng);
2580                 return;
2581         }
2582
2583         /* rate shaping and fairness are disabled */
2584         DP(NETIF_MSG_IFUP,
2585            "rate shaping and fairness are disabled\n");
2586 }
2587
2588 static void storm_memset_cmng(struct bnx2x *bp,
2589                               struct cmng_init *cmng,
2590                               u8 port)
2591 {
2592         int vn;
2593         size_t size = sizeof(struct cmng_struct_per_port);
2594
2595         u32 addr = BAR_XSTRORM_INTMEM +
2596                         XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
2597
2598         __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);
2599
2600         for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2601                 int func = func_by_vn(bp, vn);
2602
2603                 addr = BAR_XSTRORM_INTMEM +
2604                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
2605                 size = sizeof(struct rate_shaping_vars_per_vn);
2606                 __storm_memset_struct(bp, addr, size,
2607                                       (u32 *)&cmng->vnic.vnic_max_rate[vn]);
2608
2609                 addr = BAR_XSTRORM_INTMEM +
2610                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
2611                 size = sizeof(struct fairness_vars_per_vn);
2612                 __storm_memset_struct(bp, addr, size,
2613                                       (u32 *)&cmng->vnic.vnic_min_rate[vn]);
2614         }
2615 }
2616
2617 /* init cmng mode in HW according to local configuration */
2618 void bnx2x_set_local_cmng(struct bnx2x *bp)
2619 {
2620         int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2621
2622         if (cmng_fns != CMNG_FNS_NONE) {
2623                 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2624                 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2625         } else {
2626                 /* rate shaping and fairness are disabled */
2627                 DP(NETIF_MSG_IFUP,
2628                    "single function mode without fairness\n");
2629         }
2630 }
2631
2632 /* This function is called upon link interrupt */
2633 static void bnx2x_link_attn(struct bnx2x *bp)
2634 {
2635         /* Make sure that we are synced with the current statistics */
2636         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2637
2638         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2639
2640         bnx2x_init_dropless_fc(bp);
2641
2642         if (bp->link_vars.link_up) {
2643
2644                 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
2645                         struct host_port_stats *pstats;
2646
2647                         pstats = bnx2x_sp(bp, port_stats);
2648                         /* reset old mac stats */
2649                         memset(&(pstats->mac_stx[0]), 0,
2650                                sizeof(struct mac_stx));
2651                 }
2652                 if (bp->state == BNX2X_STATE_OPEN)
2653                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2654         }
2655
2656         if (bp->link_vars.link_up && bp->link_vars.line_speed)
2657                 bnx2x_set_local_cmng(bp);
2658
2659         __bnx2x_link_report(bp);
2660
2661         if (IS_MF(bp))
2662                 bnx2x_link_sync_notify(bp);
2663 }
2664
2665 void bnx2x__link_status_update(struct bnx2x *bp)
2666 {
2667         if (bp->state != BNX2X_STATE_OPEN)
2668                 return;
2669
2670         /* read updated dcb configuration */
2671         if (IS_PF(bp)) {
2672                 bnx2x_dcbx_pmf_update(bp);
2673                 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2674                 if (bp->link_vars.link_up)
2675                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2676                 else
2677                         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2678                         /* indicate link status */
2679                 bnx2x_link_report(bp);
2680
2681         } else { /* VF */
2682                 bp->port.supported[0] |= (SUPPORTED_10baseT_Half |
2683                                           SUPPORTED_10baseT_Full |
2684                                           SUPPORTED_100baseT_Half |
2685                                           SUPPORTED_100baseT_Full |
2686                                           SUPPORTED_1000baseT_Full |
2687                                           SUPPORTED_2500baseX_Full |
2688                                           SUPPORTED_10000baseT_Full |
2689                                           SUPPORTED_TP |
2690                                           SUPPORTED_FIBRE |
2691                                           SUPPORTED_Autoneg |
2692                                           SUPPORTED_Pause |
2693                                           SUPPORTED_Asym_Pause);
2694                 bp->port.advertising[0] = bp->port.supported[0];
2695
2696                 bp->link_params.bp = bp;
2697                 bp->link_params.port = BP_PORT(bp);
2698                 bp->link_params.req_duplex[0] = DUPLEX_FULL;
2699                 bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE;
2700                 bp->link_params.req_line_speed[0] = SPEED_10000;
2701                 bp->link_params.speed_cap_mask[0] = 0x7f0000;
2702                 bp->link_params.switch_cfg = SWITCH_CFG_10G;
2703                 bp->link_vars.mac_type = MAC_TYPE_BMAC;
2704                 bp->link_vars.line_speed = SPEED_10000;
2705                 bp->link_vars.link_status =
2706                         (LINK_STATUS_LINK_UP |
2707                          LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
2708                 bp->link_vars.link_up = 1;
2709                 bp->link_vars.duplex = DUPLEX_FULL;
2710                 bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE;
2711                 __bnx2x_link_report(bp);
2712
2713                 bnx2x_sample_bulletin(bp);
2714
2715                 /* if bulletin board did not have an update for link status
2716                  * __bnx2x_link_report will report current status
2717                  * but it will NOT duplicate report in case of already reported
2718                  * during sampling bulletin board.
2719                  */
2720                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2721         }
2722 }
2723
2724 static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
2725                                   u16 vlan_val, u8 allowed_prio)
2726 {
2727         struct bnx2x_func_state_params func_params = {NULL};
2728         struct bnx2x_func_afex_update_params *f_update_params =
2729                 &func_params.params.afex_update;
2730
2731         func_params.f_obj = &bp->func_obj;
2732         func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE;
2733
2734         /* no need to wait for RAMROD completion, so don't
2735          * set RAMROD_COMP_WAIT flag
2736          */
2737
2738         f_update_params->vif_id = vifid;
2739         f_update_params->afex_default_vlan = vlan_val;
2740         f_update_params->allowed_priorities = allowed_prio;
2741
2742         /* if ramrod can not be sent, response to MCP immediately */
2743         if (bnx2x_func_state_change(bp, &func_params) < 0)
2744                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
2745
2746         return 0;
2747 }
2748
2749 static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
2750                                           u16 vif_index, u8 func_bit_map)
2751 {
2752         struct bnx2x_func_state_params func_params = {NULL};
2753         struct bnx2x_func_afex_viflists_params *update_params =
2754                 &func_params.params.afex_viflists;
2755         int rc;
2756         u32 drv_msg_code;
2757
2758         /* validate only LIST_SET and LIST_GET are received from switch */
2759         if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET))
2760                 BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n",
2761                           cmd_type);
2762
2763         func_params.f_obj = &bp->func_obj;
2764         func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS;
2765
2766         /* set parameters according to cmd_type */
2767         update_params->afex_vif_list_command = cmd_type;
2768         update_params->vif_list_index = vif_index;
2769         update_params->func_bit_map =
2770                 (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
2771         update_params->func_to_clear = 0;
2772         drv_msg_code =
2773                 (cmd_type == VIF_LIST_RULE_GET) ?
2774                 DRV_MSG_CODE_AFEX_LISTGET_ACK :
2775                 DRV_MSG_CODE_AFEX_LISTSET_ACK;
2776
2777         /* if ramrod can not be sent, respond to MCP immediately for
2778          * SET and GET requests (other are not triggered from MCP)
2779          */
2780         rc = bnx2x_func_state_change(bp, &func_params);
2781         if (rc < 0)
2782                 bnx2x_fw_command(bp, drv_msg_code, 0);
2783
2784         return 0;
2785 }
2786
2787 static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
2788 {
2789         struct afex_stats afex_stats;
2790         u32 func = BP_ABS_FUNC(bp);
2791         u32 mf_config;
2792         u16 vlan_val;
2793         u32 vlan_prio;
2794         u16 vif_id;
2795         u8 allowed_prio;
2796         u8 vlan_mode;
2797         u32 addr_to_write, vifid, addrs, stats_type, i;
2798
2799         if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) {
2800                 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2801                 DP(BNX2X_MSG_MCP,
2802                    "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid);
2803                 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0);
2804         }
2805
2806         if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) {
2807                 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2808                 addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]);
2809                 DP(BNX2X_MSG_MCP,
2810                    "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n",
2811                    vifid, addrs);
2812                 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid,
2813                                                addrs);
2814         }
2815
2816         if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) {
2817                 addr_to_write = SHMEM2_RD(bp,
2818                         afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]);
2819                 stats_type = SHMEM2_RD(bp,
2820                         afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2821
2822                 DP(BNX2X_MSG_MCP,
2823                    "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n",
2824                    addr_to_write);
2825
2826                 bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type);
2827
2828                 /* write response to scratchpad, for MCP */
2829                 for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++)
2830                         REG_WR(bp, addr_to_write + i*sizeof(u32),
2831                                *(((u32 *)(&afex_stats))+i));
2832
2833                 /* send ack message to MCP */
2834                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0);
2835         }
2836
2837         if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) {
2838                 mf_config = MF_CFG_RD(bp, func_mf_config[func].config);
2839                 bp->mf_config[BP_VN(bp)] = mf_config;
2840                 DP(BNX2X_MSG_MCP,
2841                    "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n",
2842                    mf_config);
2843
2844                 /* if VIF_SET is "enabled" */
2845                 if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) {
2846                         /* set rate limit directly to internal RAM */
2847                         struct cmng_init_input cmng_input;
2848                         struct rate_shaping_vars_per_vn m_rs_vn;
2849                         size_t size = sizeof(struct rate_shaping_vars_per_vn);
2850                         u32 addr = BAR_XSTRORM_INTMEM +
2851                             XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp));
2852
2853                         bp->mf_config[BP_VN(bp)] = mf_config;
2854
2855                         bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input);
2856                         m_rs_vn.vn_counter.rate =
2857                                 cmng_input.vnic_max_rate[BP_VN(bp)];
2858                         m_rs_vn.vn_counter.quota =
2859                                 (m_rs_vn.vn_counter.rate *
2860                                  RS_PERIODIC_TIMEOUT_USEC) / 8;
2861
2862                         __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn);
2863
2864                         /* read relevant values from mf_cfg struct in shmem */
2865                         vif_id =
2866                                 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2867                                  FUNC_MF_CFG_E1HOV_TAG_MASK) >>
2868                                 FUNC_MF_CFG_E1HOV_TAG_SHIFT;
2869                         vlan_val =
2870                                 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2871                                  FUNC_MF_CFG_AFEX_VLAN_MASK) >>
2872                                 FUNC_MF_CFG_AFEX_VLAN_SHIFT;
2873                         vlan_prio = (mf_config &
2874                                      FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
2875                                     FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT;
2876                         vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT);
2877                         vlan_mode =
2878                                 (MF_CFG_RD(bp,
2879                                            func_mf_config[func].afex_config) &
2880                                  FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
2881                                 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT;
2882                         allowed_prio =
2883                                 (MF_CFG_RD(bp,
2884                                            func_mf_config[func].afex_config) &
2885                                  FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
2886                                 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT;
2887
2888                         /* send ramrod to FW, return in case of failure */
2889                         if (bnx2x_afex_func_update(bp, vif_id, vlan_val,
2890                                                    allowed_prio))
2891                                 return;
2892
2893                         bp->afex_def_vlan_tag = vlan_val;
2894                         bp->afex_vlan_mode = vlan_mode;
2895                 } else {
2896                         /* notify link down because BP->flags is disabled */
2897                         bnx2x_link_report(bp);
2898
2899                         /* send INVALID VIF ramrod to FW */
2900                         bnx2x_afex_func_update(bp, 0xFFFF, 0, 0);
2901
2902                         /* Reset the default afex VLAN */
2903                         bp->afex_def_vlan_tag = -1;
2904                 }
2905         }
2906 }
2907
2908 static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
2909 {
2910         struct bnx2x_func_switch_update_params *switch_update_params;
2911         struct bnx2x_func_state_params func_params;
2912
2913         memset(&func_params, 0, sizeof(struct bnx2x_func_state_params));
2914         switch_update_params = &func_params.params.switch_update;
2915         func_params.f_obj = &bp->func_obj;
2916         func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
2917
2918         if (IS_MF_UFP(bp) || IS_MF_BD(bp)) {
2919                 int func = BP_ABS_FUNC(bp);
2920                 u32 val;
2921
2922                 /* Re-learn the S-tag from shmem */
2923                 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2924                                 FUNC_MF_CFG_E1HOV_TAG_MASK;
2925                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
2926                         bp->mf_ov = val;
2927                 } else {
2928                         BNX2X_ERR("Got an SVID event, but no tag is configured in shmem\n");
2929                         goto fail;
2930                 }
2931
2932                 /* Configure new S-tag in LLH */
2933                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + BP_PORT(bp) * 8,
2934                        bp->mf_ov);
2935
2936                 /* Send Ramrod to update FW of change */
2937                 __set_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG,
2938                           &switch_update_params->changes);
2939                 switch_update_params->vlan = bp->mf_ov;
2940
2941                 if (bnx2x_func_state_change(bp, &func_params) < 0) {
2942                         BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n",
2943                                   bp->mf_ov);
2944                         goto fail;
2945                 } else {
2946                         DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n",
2947                            bp->mf_ov);
2948                 }
2949         } else {
2950                 goto fail;
2951         }
2952
2953         bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0);
2954         return;
2955 fail:
2956         bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0);
2957 }
2958
2959 static void bnx2x_pmf_update(struct bnx2x *bp)
2960 {
2961         int port = BP_PORT(bp);
2962         u32 val;
2963
2964         bp->port.pmf = 1;
2965         DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf);
2966
2967         /*
2968          * We need the mb() to ensure the ordering between the writing to
2969          * bp->port.pmf here and reading it from the bnx2x_periodic_task().
2970          */
2971         smp_mb();
2972
2973         /* queue a periodic task */
2974         queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2975
2976         bnx2x_dcbx_pmf_update(bp);
2977
2978         /* enable nig attention */
2979         val = (0xff0f | (1 << (BP_VN(bp) + 4)));
2980         if (bp->common.int_block == INT_BLOCK_HC) {
2981                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2982                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2983         } else if (!CHIP_IS_E1x(bp)) {
2984                 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2985                 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2986         }
2987
2988         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2989 }
2990
2991 /* end of Link */
2992
2993 /* slow path */
2994
2995 /*
2996  * General service functions
2997  */
2998
2999 /* send the MCP a request, block until there is a reply */
3000 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
3001 {
3002         int mb_idx = BP_FW_MB_IDX(bp);
3003         u32 seq;
3004         u32 rc = 0;
3005         u32 cnt = 1;
3006         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
3007
3008         mutex_lock(&bp->fw_mb_mutex);
3009         seq = ++bp->fw_seq;
3010         SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
3011         SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
3012
3013         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n",
3014                         (command | seq), param);
3015
3016         do {
3017                 /* let the FW do it's magic ... */
3018                 msleep(delay);
3019
3020                 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
3021
3022                 /* Give the FW up to 5 second (500*10ms) */
3023         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
3024
3025         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
3026            cnt*delay, rc, seq);
3027
3028         /* is this a reply to our command? */
3029         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
3030                 rc &= FW_MSG_CODE_MASK;
3031         else {
3032                 /* FW BUG! */
3033                 BNX2X_ERR("FW failed to respond!\n");
3034                 bnx2x_fw_dump(bp);
3035                 rc = 0;
3036         }
3037         mutex_unlock(&bp->fw_mb_mutex);
3038
3039         return rc;
3040 }
3041
3042 static void storm_memset_func_cfg(struct bnx2x *bp,
3043                                  struct tstorm_eth_function_common_config *tcfg,
3044                                  u16 abs_fid)
3045 {
3046         size_t size = sizeof(struct tstorm_eth_function_common_config);
3047
3048         u32 addr = BAR_TSTRORM_INTMEM +
3049                         TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
3050
3051         __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
3052 }
3053
3054 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
3055 {
3056         if (CHIP_IS_E1x(bp)) {
3057                 struct tstorm_eth_function_common_config tcfg = {0};
3058
3059                 storm_memset_func_cfg(bp, &tcfg, p->func_id);
3060         }
3061
3062         /* Enable the function in the FW */
3063         storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
3064         storm_memset_func_en(bp, p->func_id, 1);
3065
3066         /* spq */
3067         if (p->spq_active) {
3068                 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
3069                 REG_WR(bp, XSEM_REG_FAST_MEMORY +
3070                        XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
3071         }
3072 }
3073
3074 /**
3075  * bnx2x_get_common_flags - Return common flags
3076  *
3077  * @bp          device handle
3078  * @fp          queue handle
3079  * @zero_stats  TRUE if statistics zeroing is needed
3080  *
3081  * Return the flags that are common for the Tx-only and not normal connections.
3082  */
3083 static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
3084                                             struct bnx2x_fastpath *fp,
3085                                             bool zero_stats)
3086 {
3087         unsigned long flags = 0;
3088
3089         /* PF driver will always initialize the Queue to an ACTIVE state */
3090         __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
3091
3092         /* tx only connections collect statistics (on the same index as the
3093          * parent connection). The statistics are zeroed when the parent
3094          * connection is initialized.
3095          */
3096
3097         __set_bit(BNX2X_Q_FLG_STATS, &flags);
3098         if (zero_stats)
3099                 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
3100
3101         if (bp->flags & TX_SWITCHING)
3102                 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &flags);
3103
3104         __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
3105         __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
3106
3107 #ifdef BNX2X_STOP_ON_ERROR
3108         __set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
3109 #endif
3110
3111         return flags;
3112 }
3113
3114 static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
3115                                        struct bnx2x_fastpath *fp,
3116                                        bool leading)
3117 {
3118         unsigned long flags = 0;
3119
3120         /* calculate other queue flags */
3121         if (IS_MF_SD(bp))
3122                 __set_bit(BNX2X_Q_FLG_OV, &flags);
3123
3124         if (IS_FCOE_FP(fp)) {
3125                 __set_bit(BNX2X_Q_FLG_FCOE, &flags);
3126                 /* For FCoE - force usage of default priority (for afex) */
3127                 __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
3128         }
3129
3130         if (fp->mode != TPA_MODE_DISABLED) {
3131                 __set_bit(BNX2X_Q_FLG_TPA, &flags);
3132                 __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
3133                 if (fp->mode == TPA_MODE_GRO)
3134                         __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags);
3135         }
3136
3137         if (leading) {
3138                 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
3139                 __set_bit(BNX2X_Q_FLG_MCAST, &flags);
3140         }
3141
3142         /* Always set HW VLAN stripping */
3143         __set_bit(BNX2X_Q_FLG_VLAN, &flags);
3144
3145         /* configure silent vlan removal */
3146         if (IS_MF_AFEX(bp))
3147                 __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);
3148
3149         return flags | bnx2x_get_common_flags(bp, fp, true);
3150 }
3151
3152 static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
3153         struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init,
3154         u8 cos)
3155 {
3156         gen_init->stat_id = bnx2x_stats_id(fp);
3157         gen_init->spcl_id = fp->cl_id;
3158
3159         /* Always use mini-jumbo MTU for FCoE L2 ring */
3160         if (IS_FCOE_FP(fp))
3161                 gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
3162         else
3163                 gen_init->mtu = bp->dev->mtu;
3164
3165         gen_init->cos = cos;
3166
3167         gen_init->fp_hsi = ETH_FP_HSI_VERSION;
3168 }
3169
3170 static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
3171         struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
3172         struct bnx2x_rxq_setup_params *rxq_init)
3173 {
3174         u8 max_sge = 0;
3175         u16 sge_sz = 0;
3176         u16 tpa_agg_size = 0;
3177
3178         if (fp->mode != TPA_MODE_DISABLED) {
3179                 pause->sge_th_lo = SGE_TH_LO(bp);
3180                 pause->sge_th_hi = SGE_TH_HI(bp);
3181
3182                 /* validate SGE ring has enough to cross high threshold */
3183                 WARN_ON(bp->dropless_fc &&
3184                                 pause->sge_th_hi + FW_PREFETCH_CNT >
3185                                 MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
3186
3187                 tpa_agg_size = TPA_AGG_SIZE;
3188                 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
3189                         SGE_PAGE_SHIFT;
3190                 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
3191                           (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
3192                 sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff);
3193         }
3194
3195         /* pause - not for e1 */
3196         if (!CHIP_IS_E1(bp)) {
3197                 pause->bd_th_lo = BD_TH_LO(bp);
3198                 pause->bd_th_hi = BD_TH_HI(bp);
3199
3200                 pause->rcq_th_lo = RCQ_TH_LO(bp);
3201                 pause->rcq_th_hi = RCQ_TH_HI(bp);
3202                 /*
3203                  * validate that rings have enough entries to cross
3204                  * high thresholds
3205                  */
3206                 WARN_ON(bp->dropless_fc &&
3207                                 pause->bd_th_hi + FW_PREFETCH_CNT >
3208                                 bp->rx_ring_size);
3209                 WARN_ON(bp->dropless_fc &&
3210                                 pause->rcq_th_hi + FW_PREFETCH_CNT >
3211                                 NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
3212
3213                 pause->pri_map = 1;
3214         }
3215
3216         /* rxq setup */
3217         rxq_init->dscr_map = fp->rx_desc_mapping;
3218         rxq_init->sge_map = fp->rx_sge_mapping;
3219         rxq_init->rcq_map = fp->rx_comp_mapping;
3220         rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
3221
3222         /* This should be a maximum number of data bytes that may be
3223          * placed on the BD (not including paddings).
3224          */
3225         rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
3226                            BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
3227
3228         rxq_init->cl_qzone_id = fp->cl_qzone_id;
3229         rxq_init->tpa_agg_sz = tpa_agg_size;
3230         rxq_init->sge_buf_sz = sge_sz;
3231         rxq_init->max_sges_pkt = max_sge;
3232         rxq_init->rss_engine_id = BP_FUNC(bp);
3233         rxq_init->mcast_engine_id = BP_FUNC(bp);
3234
3235         /* Maximum number or simultaneous TPA aggregation for this Queue.
3236          *
3237          * For PF Clients it should be the maximum available number.
3238          * VF driver(s) may want to define it to a smaller value.
3239          */
3240         rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
3241
3242         rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
3243         rxq_init->fw_sb_id = fp->fw_sb_id;
3244
3245         if (IS_FCOE_FP(fp))
3246                 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
3247         else
3248                 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
3249         /* configure silent vlan removal
3250          * if multi function mode is afex, then mask default vlan
3251          */
3252         if (IS_MF_AFEX(bp)) {
3253                 rxq_init->silent_removal_value = bp->afex_def_vlan_tag;
3254                 rxq_init->silent_removal_mask = VLAN_VID_MASK;
3255         }
3256 }
3257
3258 static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
3259         struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
3260         u8 cos)
3261 {
3262         txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping;
3263         txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
3264         txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
3265         txq_init->fw_sb_id = fp->fw_sb_id;
3266
3267         /*
3268          * set the tss leading client id for TX classification ==
3269          * leading RSS client id
3270          */
3271         txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
3272
3273         if (IS_FCOE_FP(fp)) {
3274                 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
3275                 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
3276         }
3277 }
3278
3279 static void bnx2x_pf_init(struct bnx2x *bp)
3280 {
3281         struct bnx2x_func_init_params func_init = {0};
3282         struct event_ring_data eq_data = { {0} };
3283
3284         if (!CHIP_IS_E1x(bp)) {
3285                 /* reset IGU PF statistics: MSIX + ATTN */
3286                 /* PF */
3287                 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3288                            BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3289                            (CHIP_MODE_IS_4_PORT(bp) ?
3290                                 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3291                 /* ATTN */
3292                 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3293                            BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3294                            BNX2X_IGU_STAS_MSG_PF_CNT*4 +
3295                            (CHIP_MODE_IS_4_PORT(bp) ?
3296                                 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3297         }
3298
3299         func_init.spq_active = true;
3300         func_init.pf_id = BP_FUNC(bp);
3301         func_init.func_id = BP_FUNC(bp);
3302         func_init.spq_map = bp->spq_mapping;
3303         func_init.spq_prod = bp->spq_prod_idx;
3304
3305         bnx2x_func_init(bp, &func_init);
3306
3307         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3308
3309         /*
3310          * Congestion management values depend on the link rate
3311          * There is no active link so initial link rate is set to 10 Gbps.
3312          * When the link comes up The congestion management values are
3313          * re-calculated according to the actual link rate.
3314          */
3315         bp->link_vars.line_speed = SPEED_10000;
3316         bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
3317
3318         /* Only the PMF sets the HW */
3319         if (bp->port.pmf)
3320                 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3321
3322         /* init Event Queue - PCI bus guarantees correct endianity*/
3323         eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
3324         eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
3325         eq_data.producer = bp->eq_prod;
3326         eq_data.index_id = HC_SP_INDEX_EQ_CONS;
3327         eq_data.sb_id = DEF_SB_ID;
3328         storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
3329 }
3330
3331 static void bnx2x_e1h_disable(struct bnx2x *bp)
3332 {
3333         int port = BP_PORT(bp);
3334
3335         bnx2x_tx_disable(bp);
3336
3337         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
3338 }
3339
3340 static void bnx2x_e1h_enable(struct bnx2x *bp)
3341 {
3342         int port = BP_PORT(bp);
3343
3344         if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
3345                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
3346
3347         /* Tx queue should be only re-enabled */
3348         netif_tx_wake_all_queues(bp->dev);
3349
3350         /*
3351          * Should not call netif_carrier_on since it will be called if the link
3352          * is up when checking for link state
3353          */
3354 }
3355
3356 #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
3357
3358 static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
3359 {
3360         struct eth_stats_info *ether_stat =
3361                 &bp->slowpath->drv_info_to_mcp.ether_stat;
3362         struct bnx2x_vlan_mac_obj *mac_obj =
3363                 &bp->sp_objs->mac_obj;
3364         int i;
3365
3366         strlcpy(ether_stat->version, DRV_MODULE_VERSION,
3367                 ETH_STAT_INFO_VERSION_LEN);
3368
3369         /* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the
3370          * mac_local field in ether_stat struct. The base address is offset by 2
3371          * bytes to account for the field being 8 bytes but a mac address is
3372          * only 6 bytes. Likewise, the stride for the get_n_elements function is
3373          * 2 bytes to compensate from the 6 bytes of a mac to the 8 bytes
3374          * allocated by the ether_stat struct, so the macs will land in their
3375          * proper positions.
3376          */
3377         for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++)
3378                 memset(ether_stat->mac_local + i, 0,
3379                        sizeof(ether_stat->mac_local[0]));
3380         mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
3381                                 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
3382                                 ether_stat->mac_local + MAC_PAD, MAC_PAD,
3383                                 ETH_ALEN);
3384         ether_stat->mtu_size = bp->dev->mtu;
3385         if (bp->dev->features & NETIF_F_RXCSUM)
3386                 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
3387         if (bp->dev->features & NETIF_F_TSO)
3388                 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
3389         ether_stat->feature_flags |= bp->common.boot_mode;
3390
3391         ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
3392
3393         ether_stat->txq_size = bp->tx_ring_size;
3394         ether_stat->rxq_size = bp->rx_ring_size;
3395
3396 #ifdef CONFIG_BNX2X_SRIOV
3397         ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0;
3398 #endif
3399 }
3400
3401 static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3402 {
3403         struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3404         struct fcoe_stats_info *fcoe_stat =
3405                 &bp->slowpath->drv_info_to_mcp.fcoe_stat;
3406
3407         if (!CNIC_LOADED(bp))
3408                 return;
3409
3410         memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);
3411
3412         fcoe_stat->qos_priority =
3413                 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
3414
3415         /* insert FCoE stats from ramrod response */
3416         if (!NO_FCOE(bp)) {
3417                 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
3418                         &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3419                         tstorm_queue_statistics;
3420
3421                 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
3422                         &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3423                         xstorm_queue_statistics;
3424
3425                 struct fcoe_statistics_params *fw_fcoe_stat =
3426                         &bp->fw_stats_data->fcoe;
3427
3428                 ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0,
3429                           fcoe_stat->rx_bytes_lo,
3430                           fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
3431
3432                 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3433                           fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
3434                           fcoe_stat->rx_bytes_lo,
3435                           fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
3436
3437                 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3438                           fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
3439                           fcoe_stat->rx_bytes_lo,
3440                           fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
3441
3442                 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3443                           fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
3444                           fcoe_stat->rx_bytes_lo,
3445                           fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
3446
3447                 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3448                           fcoe_stat->rx_frames_lo,
3449                           fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
3450
3451                 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3452                           fcoe_stat->rx_frames_lo,
3453                           fcoe_q_tstorm_stats->rcv_ucast_pkts);
3454
3455                 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3456                           fcoe_stat->rx_frames_lo,
3457                           fcoe_q_tstorm_stats->rcv_bcast_pkts);
3458
3459                 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3460                           fcoe_stat->rx_frames_lo,
3461                           fcoe_q_tstorm_stats->rcv_mcast_pkts);
3462
3463                 ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0,
3464                           fcoe_stat->tx_bytes_lo,
3465                           fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
3466
3467                 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3468                           fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
3469                           fcoe_stat->tx_bytes_lo,
3470                           fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
3471
3472                 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3473                           fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
3474                           fcoe_stat->tx_bytes_lo,
3475                           fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
3476
3477                 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3478                           fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
3479                           fcoe_stat->tx_bytes_lo,
3480                           fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
3481
3482                 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3483                           fcoe_stat->tx_frames_lo,
3484                           fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
3485
3486                 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3487                           fcoe_stat->tx_frames_lo,
3488                           fcoe_q_xstorm_stats->ucast_pkts_sent);
3489
3490                 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3491                           fcoe_stat->tx_frames_lo,
3492                           fcoe_q_xstorm_stats->bcast_pkts_sent);
3493
3494                 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3495                           fcoe_stat->tx_frames_lo,
3496                           fcoe_q_xstorm_stats->mcast_pkts_sent);
3497         }
3498
3499         /* ask L5 driver to add data to the struct */
3500         bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
3501 }
3502
3503 static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3504 {
3505         struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3506         struct iscsi_stats_info *iscsi_stat =
3507                 &bp->slowpath->drv_info_to_mcp.iscsi_stat;
3508
3509         if (!CNIC_LOADED(bp))
3510                 return;
3511
3512         memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
3513                ETH_ALEN);
3514
3515         iscsi_stat->qos_priority =
3516                 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
3517
3518         /* ask L5 driver to add data to the struct */
3519         bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
3520 }
3521
3522 /* called due to MCP event (on pmf):
3523  *      reread new bandwidth configuration
3524  *      configure FW
3525  *      notify others function about the change
3526  */
3527 static void bnx2x_config_mf_bw(struct bnx2x *bp)
3528 {
3529         if (bp->link_vars.link_up) {
3530                 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
3531                 bnx2x_link_sync_notify(bp);
3532         }
3533         storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3534 }
3535
3536 static void bnx2x_set_mf_bw(struct bnx2x *bp)
3537 {
3538         bnx2x_config_mf_bw(bp);
3539         bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
3540 }
3541
3542 static void bnx2x_handle_eee_event(struct bnx2x *bp)
3543 {
3544         DP(BNX2X_MSG_MCP, "EEE - LLDP event\n");
3545         bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
3546 }
3547
3548 #define BNX2X_UPDATE_DRV_INFO_IND_LENGTH        (20)
3549 #define BNX2X_UPDATE_DRV_INFO_IND_COUNT         (25)
3550
3551 static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
3552 {
3553         enum drv_info_opcode op_code;
3554         u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
3555         bool release = false;
3556         int wait;
3557
3558         /* if drv_info version supported by MFW doesn't match - send NACK */
3559         if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
3560                 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3561                 return;
3562         }
3563
3564         op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
3565                   DRV_INFO_CONTROL_OP_CODE_SHIFT;
3566
3567         /* Must prevent other flows from accessing drv_info_to_mcp */
3568         mutex_lock(&bp->drv_info_mutex);
3569
3570         memset(&bp->slowpath->drv_info_to_mcp, 0,
3571                sizeof(union drv_info_to_mcp));
3572
3573         switch (op_code) {
3574         case ETH_STATS_OPCODE:
3575                 bnx2x_drv_info_ether_stat(bp);
3576                 break;
3577         case FCOE_STATS_OPCODE:
3578                 bnx2x_drv_info_fcoe_stat(bp);
3579                 break;
3580         case ISCSI_STATS_OPCODE:
3581                 bnx2x_drv_info_iscsi_stat(bp);
3582                 break;
3583         default:
3584                 /* if op code isn't supported - send NACK */
3585                 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3586                 goto out;
3587         }
3588
3589         /* if we got drv_info attn from MFW then these fields are defined in
3590          * shmem2 for sure
3591          */
3592         SHMEM2_WR(bp, drv_info_host_addr_lo,
3593                 U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3594         SHMEM2_WR(bp, drv_info_host_addr_hi,
3595                 U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3596
3597         bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
3598
3599         /* Since possible management wants both this and get_driver_version
3600          * need to wait until management notifies us it finished utilizing
3601          * the buffer.
3602          */
3603         if (!SHMEM2_HAS(bp, mfw_drv_indication)) {
3604                 DP(BNX2X_MSG_MCP, "Management does not support indication\n");
3605         } else if (!bp->drv_info_mng_owner) {
3606                 u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1));
3607
3608                 for (wait = 0; wait < BNX2X_UPDATE_DRV_INFO_IND_COUNT; wait++) {
3609                         u32 indication = SHMEM2_RD(bp, mfw_drv_indication);
3610
3611                         /* Management is done; need to clear indication */
3612                         if (indication & bit) {
3613                                 SHMEM2_WR(bp, mfw_drv_indication,
3614                                           indication & ~bit);
3615                                 release = true;
3616                                 break;
3617                         }
3618
3619                         msleep(BNX2X_UPDATE_DRV_INFO_IND_LENGTH);
3620                 }
3621         }
3622         if (!release) {
3623                 DP(BNX2X_MSG_MCP, "Management did not release indication\n");
3624                 bp->drv_info_mng_owner = true;
3625         }
3626
3627 out:
3628         mutex_unlock(&bp->drv_info_mutex);
3629 }
3630
3631 static u32 bnx2x_update_mng_version_utility(u8 *version, bool bnx2x_format)
3632 {
3633         u8 vals[4];
3634         int i = 0;
3635
3636         if (bnx2x_format) {
3637                 i = sscanf(version, "1.%c%hhd.%hhd.%hhd",
3638                            &vals[0], &vals[1], &vals[2], &vals[3]);
3639                 if (i > 0)
3640                         vals[0] -= '0';
3641         } else {
3642                 i = sscanf(version, "%hhd.%hhd.%hhd.%hhd",
3643                            &vals[0], &vals[1], &vals[2], &vals[3]);
3644         }
3645
3646         while (i < 4)
3647                 vals[i++] = 0;
3648
3649         return (vals[0] << 24) | (vals[1] << 16) | (vals[2] << 8) | vals[3];
3650 }
3651
3652 void bnx2x_update_mng_version(struct bnx2x *bp)
3653 {
3654         u32 iscsiver = DRV_VER_NOT_LOADED;
3655         u32 fcoever = DRV_VER_NOT_LOADED;
3656         u32 ethver = DRV_VER_NOT_LOADED;
3657         int idx = BP_FW_MB_IDX(bp);
3658         u8 *version;
3659
3660         if (!SHMEM2_HAS(bp, func_os_drv_ver))
3661                 return;
3662
3663         mutex_lock(&bp->drv_info_mutex);
3664         /* Must not proceed when `bnx2x_handle_drv_info_req' is feasible */
3665         if (bp->drv_info_mng_owner)
3666                 goto out;
3667
3668         if (bp->state != BNX2X_STATE_OPEN)
3669                 goto out;
3670
3671         /* Parse ethernet driver version */
3672         ethver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
3673         if (!CNIC_LOADED(bp))
3674                 goto out;
3675
3676         /* Try getting storage driver version via cnic */
3677         memset(&bp->slowpath->drv_info_to_mcp, 0,
3678                sizeof(union drv_info_to_mcp));
3679         bnx2x_drv_info_iscsi_stat(bp);
3680         version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version;
3681         iscsiver = bnx2x_update_mng_version_utility(version, false);
3682
3683         memset(&bp->slowpath->drv_info_to_mcp, 0,
3684                sizeof(union drv_info_to_mcp));
3685         bnx2x_drv_info_fcoe_stat(bp);
3686         version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version;
3687         fcoever = bnx2x_update_mng_version_utility(version, false);
3688
3689 out:
3690         SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver);
3691         SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver);
3692         SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever);
3693
3694         mutex_unlock(&bp->drv_info_mutex);
3695
3696         DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n",
3697            ethver, iscsiver, fcoever);
3698 }
3699
3700 void bnx2x_update_mfw_dump(struct bnx2x *bp)
3701 {
3702         u32 drv_ver;
3703         u32 valid_dump;
3704
3705         if (!SHMEM2_HAS(bp, drv_info))
3706                 return;
3707
3708         /* Update Driver load time, possibly broken in y2038 */
3709         SHMEM2_WR(bp, drv_info.epoc, (u32)ktime_get_real_seconds());
3710
3711         drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
3712         SHMEM2_WR(bp, drv_info.drv_ver, drv_ver);
3713
3714         SHMEM2_WR(bp, drv_info.fw_ver, REG_RD(bp, XSEM_REG_PRAM));
3715
3716         /* Check & notify On-Chip dump. */
3717         valid_dump = SHMEM2_RD(bp, drv_info.valid_dump);
3718
3719         if (valid_dump & FIRST_DUMP_VALID)
3720                 DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 1st partition\n");
3721
3722         if (valid_dump & SECOND_DUMP_VALID)
3723                 DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 2nd partition\n");
3724 }
3725
3726 static void bnx2x_oem_event(struct bnx2x *bp, u32 event)
3727 {
3728         u32 cmd_ok, cmd_fail;
3729
3730         /* sanity */
3731         if (event & DRV_STATUS_DCC_EVENT_MASK &&
3732             event & DRV_STATUS_OEM_EVENT_MASK) {
3733                 BNX2X_ERR("Received simultaneous events %08x\n", event);
3734                 return;
3735         }
3736
3737         if (event & DRV_STATUS_DCC_EVENT_MASK) {
3738                 cmd_fail = DRV_MSG_CODE_DCC_FAILURE;
3739                 cmd_ok = DRV_MSG_CODE_DCC_OK;
3740         } else /* if (event & DRV_STATUS_OEM_EVENT_MASK) */ {
3741                 cmd_fail = DRV_MSG_CODE_OEM_FAILURE;
3742                 cmd_ok = DRV_MSG_CODE_OEM_OK;
3743         }
3744
3745         DP(BNX2X_MSG_MCP, "oem_event 0x%x\n", event);
3746
3747         if (event & (DRV_STATUS_DCC_DISABLE_ENABLE_PF |
3748                      DRV_STATUS_OEM_DISABLE_ENABLE_PF)) {
3749                 /* This is the only place besides the function initialization
3750                  * where the bp->flags can change so it is done without any
3751                  * locks
3752                  */
3753                 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
3754                         DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n");
3755                         bp->flags |= MF_FUNC_DIS;
3756
3757                         bnx2x_e1h_disable(bp);
3758                 } else {
3759                         DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n");
3760                         bp->flags &= ~MF_FUNC_DIS;
3761
3762                         bnx2x_e1h_enable(bp);
3763                 }
3764                 event &= ~(DRV_STATUS_DCC_DISABLE_ENABLE_PF |
3765                            DRV_STATUS_OEM_DISABLE_ENABLE_PF);
3766         }
3767
3768         if (event & (DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
3769                      DRV_STATUS_OEM_BANDWIDTH_ALLOCATION)) {
3770                 bnx2x_config_mf_bw(bp);
3771                 event &= ~(DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
3772                            DRV_STATUS_OEM_BANDWIDTH_ALLOCATION);
3773         }
3774
3775         /* Report results to MCP */
3776         if (event)
3777                 bnx2x_fw_command(bp, cmd_fail, 0);
3778         else
3779                 bnx2x_fw_command(bp, cmd_ok, 0);
3780 }
3781
3782 /* must be called under the spq lock */
3783 static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
3784 {
3785         struct eth_spe *next_spe = bp->spq_prod_bd;
3786
3787         if (bp->spq_prod_bd == bp->spq_last_bd) {
3788                 bp->spq_prod_bd = bp->spq;
3789                 bp->spq_prod_idx = 0;
3790                 DP(BNX2X_MSG_SP, "end of spq\n");
3791         } else {
3792                 bp->spq_prod_bd++;
3793                 bp->spq_prod_idx++;
3794         }
3795         return next_spe;
3796 }
3797
3798 /* must be called under the spq lock */
3799 static void bnx2x_sp_prod_update(struct bnx2x *bp)
3800 {
3801         int func = BP_FUNC(bp);
3802
3803         /*
3804          * Make sure that BD data is updated before writing the producer:
3805          * BD data is written to the memory, the producer is read from the
3806          * memory, thus we need a full memory barrier to ensure the ordering.
3807          */
3808         mb();
3809
3810         REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
3811                  bp->spq_prod_idx);
3812         mmiowb();
3813 }
3814
3815 /**
3816  * bnx2x_is_contextless_ramrod - check if the current command ends on EQ
3817  *
3818  * @cmd:        command to check
3819  * @cmd_type:   command type
3820  */
3821 static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
3822 {
3823         if ((cmd_type == NONE_CONNECTION_TYPE) ||
3824             (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
3825             (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
3826             (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
3827             (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
3828             (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
3829             (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE))
3830                 return true;
3831         else
3832                 return false;
3833 }
3834
3835 /**
3836  * bnx2x_sp_post - place a single command on an SP ring
3837  *
3838  * @bp:         driver handle
3839  * @command:    command to place (e.g. SETUP, FILTER_RULES, etc.)
3840  * @cid:        SW CID the command is related to
3841  * @data_hi:    command private data address (high 32 bits)
3842  * @data_lo:    command private data address (low 32 bits)
3843  * @cmd_type:   command type (e.g. NONE, ETH)
3844  *
3845  * SP data is handled as if it's always an address pair, thus data fields are
3846  * not swapped to little endian in upper functions. Instead this function swaps
3847  * data as if it's two u32 fields.
3848  */
3849 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3850                   u32 data_hi, u32 data_lo, int cmd_type)
3851 {
3852         struct eth_spe *spe;
3853         u16 type;
3854         bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
3855
3856 #ifdef BNX2X_STOP_ON_ERROR
3857         if (unlikely(bp->panic)) {
3858                 BNX2X_ERR("Can't post SP when there is panic\n");
3859                 return -EIO;
3860         }
3861 #endif
3862
3863         spin_lock_bh(&bp->spq_lock);
3864
3865         if (common) {
3866                 if (!atomic_read(&bp->eq_spq_left)) {
3867                         BNX2X_ERR("BUG! EQ ring full!\n");
3868                         spin_unlock_bh(&bp->spq_lock);
3869                         bnx2x_panic();
3870                         return -EBUSY;
3871                 }
3872         } else if (!atomic_read(&bp->cq_spq_left)) {
3873                         BNX2X_ERR("BUG! SPQ ring full!\n");
3874                         spin_unlock_bh(&bp->spq_lock);
3875                         bnx2x_panic();
3876                         return -EBUSY;
3877         }
3878
3879         spe = bnx2x_sp_get_next(bp);
3880
3881         /* CID needs port number to be encoded int it */
3882         spe->hdr.conn_and_cmd_data =
3883                         cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
3884                                     HW_CID(bp, cid));
3885
3886         /* In some cases, type may already contain the func-id
3887          * mainly in SRIOV related use cases, so we add it here only
3888          * if it's not already set.
3889          */
3890         if (!(cmd_type & SPE_HDR_FUNCTION_ID)) {
3891                 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) &
3892                         SPE_HDR_CONN_TYPE;
3893                 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
3894                          SPE_HDR_FUNCTION_ID);
3895         } else {
3896                 type = cmd_type;
3897         }
3898
3899         spe->hdr.type = cpu_to_le16(type);
3900
3901         spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
3902         spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
3903
3904         /*
3905          * It's ok if the actual decrement is issued towards the memory
3906          * somewhere between the spin_lock and spin_unlock. Thus no
3907          * more explicit memory barrier is needed.
3908          */
3909         if (common)
3910                 atomic_dec(&bp->eq_spq_left);
3911         else
3912                 atomic_dec(&bp->cq_spq_left);
3913
3914         DP(BNX2X_MSG_SP,
3915            "SPQE[%x] (%x:%x)  (cmd, common?) (%d,%d)  hw_cid %x  data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n",
3916            bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
3917            (u32)(U64_LO(bp->spq_mapping) +
3918            (void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
3919            HW_CID(bp, cid), data_hi, data_lo, type,
3920            atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
3921
3922         bnx2x_sp_prod_update(bp);
3923         spin_unlock_bh(&bp->spq_lock);
3924         return 0;
3925 }
3926
3927 /* acquire split MCP access lock register */
3928 static int bnx2x_acquire_alr(struct bnx2x *bp)
3929 {
3930         u32 j, val;
3931         int rc = 0;
3932
3933         might_sleep();
3934         for (j = 0; j < 1000; j++) {
3935                 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK);
3936                 val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK);
3937                 if (val & MCPR_ACCESS_LOCK_LOCK)
3938                         break;
3939
3940                 usleep_range(5000, 10000);
3941         }
3942         if (!(val & MCPR_ACCESS_LOCK_LOCK)) {
3943                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
3944                 rc = -EBUSY;
3945         }
3946
3947         return rc;
3948 }
3949
3950 /* release split MCP access lock register */
3951 static void bnx2x_release_alr(struct bnx2x *bp)
3952 {
3953         REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
3954 }
3955
3956 #define BNX2X_DEF_SB_ATT_IDX    0x0001
3957 #define BNX2X_DEF_SB_IDX        0x0002
3958
3959 static u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
3960 {
3961         struct host_sp_status_block *def_sb = bp->def_status_blk;
3962         u16 rc = 0;
3963
3964         barrier(); /* status block is written to by the chip */
3965         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
3966                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
3967                 rc |= BNX2X_DEF_SB_ATT_IDX;
3968         }
3969
3970         if (bp->def_idx != def_sb->sp_sb.running_index) {
3971                 bp->def_idx = def_sb->sp_sb.running_index;
3972                 rc |= BNX2X_DEF_SB_IDX;
3973         }
3974
3975         /* Do not reorder: indices reading should complete before handling */
3976         barrier();
3977         return rc;
3978 }
3979
3980 /*
3981  * slow path service functions
3982  */
3983
3984 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
3985 {
3986         int port = BP_PORT(bp);
3987         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3988                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
3989         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
3990                                        NIG_REG_MASK_INTERRUPT_PORT0;
3991         u32 aeu_mask;
3992         u32 nig_mask = 0;
3993         u32 reg_addr;
3994
3995         if (bp->attn_state & asserted)
3996                 BNX2X_ERR("IGU ERROR\n");
3997
3998         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3999         aeu_mask = REG_RD(bp, aeu_addr);
4000
4001         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
4002            aeu_mask, asserted);
4003         aeu_mask &= ~(asserted & 0x3ff);
4004         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
4005
4006         REG_WR(bp, aeu_addr, aeu_mask);
4007         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4008
4009         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
4010         bp->attn_state |= asserted;
4011         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
4012
4013         if (asserted & ATTN_HARD_WIRED_MASK) {
4014                 if (asserted & ATTN_NIG_FOR_FUNC) {
4015
4016                         bnx2x_acquire_phy_lock(bp);
4017
4018                         /* save nig interrupt mask */
4019                         nig_mask = REG_RD(bp, nig_int_mask_addr);
4020
4021                         /* If nig_mask is not set, no need to call the update
4022                          * function.
4023                          */
4024                         if (nig_mask) {
4025                                 REG_WR(bp, nig_int_mask_addr, 0);
4026
4027                                 bnx2x_link_attn(bp);
4028                         }
4029
4030                         /* handle unicore attn? */
4031                 }
4032                 if (asserted & ATTN_SW_TIMER_4_FUNC)
4033                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
4034
4035                 if (asserted & GPIO_2_FUNC)
4036                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
4037
4038                 if (asserted & GPIO_3_FUNC)
4039                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
4040
4041                 if (asserted & GPIO_4_FUNC)
4042                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
4043
4044                 if (port == 0) {
4045                         if (asserted & ATTN_GENERAL_ATTN_1) {
4046                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
4047                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
4048                         }
4049                         if (asserted & ATTN_GENERAL_ATTN_2) {
4050                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
4051                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
4052                         }
4053                         if (asserted & ATTN_GENERAL_ATTN_3) {
4054                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
4055                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
4056                         }
4057                 } else {
4058                         if (asserted & ATTN_GENERAL_ATTN_4) {
4059                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
4060                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
4061                         }
4062                         if (asserted & ATTN_GENERAL_ATTN_5) {
4063                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
4064                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
4065                         }
4066                         if (asserted & ATTN_GENERAL_ATTN_6) {
4067                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
4068                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
4069                         }
4070                 }
4071
4072         } /* if hardwired */
4073
4074         if (bp->common.int_block == INT_BLOCK_HC)
4075                 reg_addr = (HC_REG_COMMAND_REG + port*32 +
4076                             COMMAND_REG_ATTN_BITS_SET);
4077         else
4078                 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
4079
4080         DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
4081            (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
4082         REG_WR(bp, reg_addr, asserted);
4083
4084         /* now set back the mask */
4085         if (asserted & ATTN_NIG_FOR_FUNC) {
4086                 /* Verify that IGU ack through BAR was written before restoring
4087                  * NIG mask. This loop should exit after 2-3 iterations max.
4088                  */
4089                 if (bp->common.int_block != INT_BLOCK_HC) {
4090                         u32 cnt = 0, igu_acked;
4091                         do {
4092                                 igu_acked = REG_RD(bp,
4093                                                    IGU_REG_ATTENTION_ACK_BITS);
4094                         } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
4095                                  (++cnt < MAX_IGU_ATTN_ACK_TO));
4096                         if (!igu_acked)
4097                                 DP(NETIF_MSG_HW,
4098                                    "Failed to verify IGU ack on time\n");
4099                         barrier();
4100                 }
4101                 REG_WR(bp, nig_int_mask_addr, nig_mask);
4102                 bnx2x_release_phy_lock(bp);
4103         }
4104 }
4105
4106 static void bnx2x_fan_failure(struct bnx2x *bp)
4107 {
4108         int port = BP_PORT(bp);
4109         u32 ext_phy_config;
4110         /* mark the failure */
4111         ext_phy_config =
4112                 SHMEM_RD(bp,
4113                          dev_info.port_hw_config[port].external_phy_config);
4114
4115         ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
4116         ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
4117         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
4118                  ext_phy_config);
4119
4120         /* log the failure */
4121         netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
4122                             "Please contact OEM Support for assistance\n");
4123
4124         /* Schedule device reset (unload)
4125          * This is due to some boards consuming sufficient power when driver is
4126          * up to overheat if fan fails.
4127          */
4128         bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0);
4129 }
4130
4131 static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
4132 {
4133         int port = BP_PORT(bp);
4134         int reg_offset;
4135         u32 val;
4136
4137         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4138                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4139
4140         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
4141
4142                 val = REG_RD(bp, reg_offset);
4143                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
4144                 REG_WR(bp, reg_offset, val);
4145
4146                 BNX2X_ERR("SPIO5 hw attention\n");
4147
4148                 /* Fan failure attention */
4149                 bnx2x_hw_reset_phy(&bp->link_params);
4150                 bnx2x_fan_failure(bp);
4151         }
4152
4153         if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) {
4154                 bnx2x_acquire_phy_lock(bp);
4155                 bnx2x_handle_module_detect_int(&bp->link_params);
4156                 bnx2x_release_phy_lock(bp);
4157         }
4158
4159         if (attn & HW_INTERRUT_ASSERT_SET_0) {
4160
4161                 val = REG_RD(bp, reg_offset);
4162                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
4163                 REG_WR(bp, reg_offset, val);
4164
4165                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
4166                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
4167                 bnx2x_panic();
4168         }
4169 }
4170
4171 static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
4172 {
4173         u32 val;
4174
4175         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
4176
4177                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
4178                 BNX2X_ERR("DB hw attention 0x%x\n", val);
4179                 /* DORQ discard attention */
4180                 if (val & 0x2)
4181                         BNX2X_ERR("FATAL error from DORQ\n");
4182         }
4183
4184         if (attn & HW_INTERRUT_ASSERT_SET_1) {
4185
4186                 int port = BP_PORT(bp);
4187                 int reg_offset;
4188
4189                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
4190                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
4191
4192                 val = REG_RD(bp, reg_offset);
4193                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
4194                 REG_WR(bp, reg_offset, val);
4195
4196                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
4197                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
4198                 bnx2x_panic();
4199         }
4200 }
4201
4202 static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
4203 {
4204         u32 val;
4205
4206         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
4207
4208                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
4209                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
4210                 /* CFC error attention */
4211                 if (val & 0x2)
4212                         BNX2X_ERR("FATAL error from CFC\n");
4213         }
4214
4215         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
4216                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
4217                 BNX2X_ERR("PXP hw attention-0 0x%x\n", val);
4218                 /* RQ_USDMDP_FIFO_OVERFLOW */
4219                 if (val & 0x18000)
4220                         BNX2X_ERR("FATAL error from PXP\n");
4221
4222                 if (!CHIP_IS_E1x(bp)) {
4223                         val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
4224                         BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
4225                 }
4226         }
4227
4228         if (attn & HW_INTERRUT_ASSERT_SET_2) {
4229
4230                 int port = BP_PORT(bp);
4231                 int reg_offset;
4232
4233                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
4234                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
4235
4236                 val = REG_RD(bp, reg_offset);
4237                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
4238                 REG_WR(bp, reg_offset, val);
4239
4240                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
4241                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
4242                 bnx2x_panic();
4243         }
4244 }
4245
4246 static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
4247 {
4248         u32 val;
4249
4250         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
4251
4252                 if (attn & BNX2X_PMF_LINK_ASSERT) {
4253                         int func = BP_FUNC(bp);
4254
4255                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4256                         bnx2x_read_mf_cfg(bp);
4257                         bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
4258                                         func_mf_config[BP_ABS_FUNC(bp)].config);
4259                         val = SHMEM_RD(bp,
4260                                        func_mb[BP_FW_MB_IDX(bp)].drv_status);
4261
4262                         if (val & (DRV_STATUS_DCC_EVENT_MASK |
4263                                    DRV_STATUS_OEM_EVENT_MASK))
4264                                 bnx2x_oem_event(bp,
4265                                         (val & (DRV_STATUS_DCC_EVENT_MASK |
4266                                                 DRV_STATUS_OEM_EVENT_MASK)));
4267
4268                         if (val & DRV_STATUS_SET_MF_BW)
4269                                 bnx2x_set_mf_bw(bp);
4270
4271                         if (val & DRV_STATUS_DRV_INFO_REQ)
4272                                 bnx2x_handle_drv_info_req(bp);
4273
4274                         if (val & DRV_STATUS_VF_DISABLED)
4275                                 bnx2x_schedule_iov_task(bp,
4276                                                         BNX2X_IOV_HANDLE_FLR);
4277
4278                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
4279                                 bnx2x_pmf_update(bp);
4280
4281                         if (bp->port.pmf &&
4282                             (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
4283                                 bp->dcbx_enabled > 0)
4284                                 /* start dcbx state machine */
4285                                 bnx2x_dcbx_set_params(bp,
4286                                         BNX2X_DCBX_STATE_NEG_RECEIVED);
4287                         if (val & DRV_STATUS_AFEX_EVENT_MASK)
4288                                 bnx2x_handle_afex_cmd(bp,
4289                                         val & DRV_STATUS_AFEX_EVENT_MASK);
4290                         if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
4291                                 bnx2x_handle_eee_event(bp);
4292
4293                         if (val & DRV_STATUS_OEM_UPDATE_SVID)
4294                                 bnx2x_handle_update_svid_cmd(bp);
4295
4296                         if (bp->link_vars.periodic_flags &
4297                             PERIODIC_FLAGS_LINK_EVENT) {
4298                                 /*  sync with link */
4299                                 bnx2x_acquire_phy_lock(bp);
4300                                 bp->link_vars.periodic_flags &=
4301                                         ~PERIODIC_FLAGS_LINK_EVENT;
4302                                 bnx2x_release_phy_lock(bp);
4303                                 if (IS_MF(bp))
4304                                         bnx2x_link_sync_notify(bp);
4305                                 bnx2x_link_report(bp);
4306                         }
4307                         /* Always call it here: bnx2x_link_report() will
4308                          * prevent the link indication duplication.
4309                          */
4310                         bnx2x__link_status_update(bp);
4311                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
4312
4313                         BNX2X_ERR("MC assert!\n");
4314                         bnx2x_mc_assert(bp);
4315                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
4316                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
4317                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
4318                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
4319                         bnx2x_panic();
4320
4321                 } else if (attn & BNX2X_MCP_ASSERT) {
4322
4323                         BNX2X_ERR("MCP assert!\n");
4324                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
4325                         bnx2x_fw_dump(bp);
4326
4327                 } else
4328                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
4329         }
4330
4331         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
4332                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
4333                 if (attn & BNX2X_GRC_TIMEOUT) {
4334                         val = CHIP_IS_E1(bp) ? 0 :
4335                                         REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
4336                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
4337                 }
4338                 if (attn & BNX2X_GRC_RSV) {
4339                         val = CHIP_IS_E1(bp) ? 0 :
4340                                         REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
4341                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
4342                 }
4343                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
4344         }
4345 }
4346
4347 /*
4348  * Bits map:
4349  * 0-7   - Engine0 load counter.
4350  * 8-15  - Engine1 load counter.
4351  * 16    - Engine0 RESET_IN_PROGRESS bit.
4352  * 17    - Engine1 RESET_IN_PROGRESS bit.
4353  * 18    - Engine0 ONE_IS_LOADED. Set when there is at least one active function
4354  *         on the engine
4355  * 19    - Engine1 ONE_IS_LOADED.
4356  * 20    - Chip reset flow bit. When set none-leader must wait for both engines
4357  *         leader to complete (check for both RESET_IN_PROGRESS bits and not for
4358  *         just the one belonging to its engine).
4359  *
4360  */
4361 #define BNX2X_RECOVERY_GLOB_REG         MISC_REG_GENERIC_POR_1
4362
4363 #define BNX2X_PATH0_LOAD_CNT_MASK       0x000000ff
4364 #define BNX2X_PATH0_LOAD_CNT_SHIFT      0
4365 #define BNX2X_PATH1_LOAD_CNT_MASK       0x0000ff00
4366 #define BNX2X_PATH1_LOAD_CNT_SHIFT      8
4367 #define BNX2X_PATH0_RST_IN_PROG_BIT     0x00010000
4368 #define BNX2X_PATH1_RST_IN_PROG_BIT     0x00020000
4369 #define BNX2X_GLOBAL_RESET_BIT          0x00040000
4370
4371 /*
4372  * Set the GLOBAL_RESET bit.
4373  *
4374  * Should be run under rtnl lock
4375  */
4376 void bnx2x_set_reset_global(struct bnx2x *bp)
4377 {
4378         u32 val;
4379         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4380         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4381         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
4382         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4383 }
4384
4385 /*
4386  * Clear the GLOBAL_RESET bit.
4387  *
4388  * Should be run under rtnl lock
4389  */
4390 static void bnx2x_clear_reset_global(struct bnx2x *bp)
4391 {
4392         u32 val;
4393         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4394         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4395         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
4396         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4397 }
4398
4399 /*
4400  * Checks the GLOBAL_RESET bit.
4401  *
4402  * should be run under rtnl lock
4403  */
4404 static bool bnx2x_reset_is_global(struct bnx2x *bp)
4405 {
4406         u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4407
4408         DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
4409         return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
4410 }
4411
4412 /*
4413  * Clear RESET_IN_PROGRESS bit for the current engine.
4414  *
4415  * Should be run under rtnl lock
4416  */
4417 static void bnx2x_set_reset_done(struct bnx2x *bp)
4418 {
4419         u32 val;
4420         u32 bit = BP_PATH(bp) ?
4421                 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4422         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4423         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4424
4425         /* Clear the bit */
4426         val &= ~bit;
4427         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4428
4429         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4430 }
4431
4432 /*
4433  * Set RESET_IN_PROGRESS for the current engine.
4434  *
4435  * should be run under rtnl lock
4436  */
4437 void bnx2x_set_reset_in_progress(struct bnx2x *bp)
4438 {
4439         u32 val;
4440         u32 bit = BP_PATH(bp) ?
4441                 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4442         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4443         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4444
4445         /* Set the bit */
4446         val |= bit;
4447         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4448         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4449 }
4450
4451 /*
4452  * Checks the RESET_IN_PROGRESS bit for the given engine.
4453  * should be run under rtnl lock
4454  */
4455 bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
4456 {
4457         u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4458         u32 bit = engine ?
4459                 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4460
4461         /* return false if bit is set */
4462         return (val & bit) ? false : true;
4463 }
4464
4465 /*
4466  * set pf load for the current pf.
4467  *
4468  * should be run under rtnl lock
4469  */
4470 void bnx2x_set_pf_load(struct bnx2x *bp)
4471 {
4472         u32 val1, val;
4473         u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4474                              BNX2X_PATH0_LOAD_CNT_MASK;
4475         u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4476                              BNX2X_PATH0_LOAD_CNT_SHIFT;
4477
4478         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4479         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4480
4481         DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val);
4482
4483         /* get the current counter value */
4484         val1 = (val & mask) >> shift;
4485
4486         /* set bit of that PF */
4487         val1 |= (1 << bp->pf_num);
4488
4489         /* clear the old value */
4490         val &= ~mask;
4491
4492         /* set the new one */
4493         val |= ((val1 << shift) & mask);
4494
4495         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4496         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4497 }
4498
4499 /**
4500  * bnx2x_clear_pf_load - clear pf load mark
4501  *
4502  * @bp:         driver handle
4503  *
4504  * Should be run under rtnl lock.
4505  * Decrements the load counter for the current engine. Returns
4506  * whether other functions are still loaded
4507  */
4508 bool bnx2x_clear_pf_load(struct bnx2x *bp)
4509 {
4510         u32 val1, val;
4511         u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4512                              BNX2X_PATH0_LOAD_CNT_MASK;
4513         u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4514                              BNX2X_PATH0_LOAD_CNT_SHIFT;
4515
4516         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4517         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4518         DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val);
4519
4520         /* get the current counter value */
4521         val1 = (val & mask) >> shift;
4522
4523         /* clear bit of that PF */
4524         val1 &= ~(1 << bp->pf_num);
4525
4526         /* clear the old value */
4527         val &= ~mask;
4528
4529         /* set the new one */
4530         val |= ((val1 << shift) & mask);
4531
4532         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4533         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4534         return val1 != 0;
4535 }
4536
4537 /*
4538  * Read the load status for the current engine.
4539  *
4540  * should be run under rtnl lock
4541  */
4542 static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
4543 {
4544         u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
4545                              BNX2X_PATH0_LOAD_CNT_MASK);
4546         u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4547                              BNX2X_PATH0_LOAD_CNT_SHIFT);
4548         u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4549
4550         DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val);
4551
4552         val = (val & mask) >> shift;
4553
4554         DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n",
4555            engine, val);
4556
4557         return val != 0;
4558 }
4559
4560 static void _print_parity(struct bnx2x *bp, u32 reg)
4561 {
4562         pr_cont(" [0x%08x] ", REG_RD(bp, reg));
4563 }
4564
4565 static void _print_next_block(int idx, const char *blk)
4566 {
4567         pr_cont("%s%s", idx ? ", " : "", blk);
4568 }
4569
4570 static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
4571                                             int *par_num, bool print)
4572 {
4573         u32 cur_bit;
4574         bool res;
4575         int i;
4576
4577         res = false;
4578
4579         for (i = 0; sig; i++) {
4580                 cur_bit = (0x1UL << i);
4581                 if (sig & cur_bit) {
4582                         res |= true; /* Each bit is real error! */
4583
4584                         if (print) {
4585                                 switch (cur_bit) {
4586                                 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
4587                                         _print_next_block((*par_num)++, "BRB");
4588                                         _print_parity(bp,
4589                                                       BRB1_REG_BRB1_PRTY_STS);
4590                                         break;
4591                                 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
4592                                         _print_next_block((*par_num)++,
4593                                                           "PARSER");
4594                                         _print_parity(bp, PRS_REG_PRS_PRTY_STS);
4595                                         break;
4596                                 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
4597                                         _print_next_block((*par_num)++, "TSDM");
4598                                         _print_parity(bp,
4599                                                       TSDM_REG_TSDM_PRTY_STS);
4600                                         break;
4601                                 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
4602                                         _print_next_block((*par_num)++,
4603                                                           "SEARCHER");
4604                                         _print_parity(bp, SRC_REG_SRC_PRTY_STS);
4605                                         break;
4606                                 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
4607                                         _print_next_block((*par_num)++, "TCM");
4608                                         _print_parity(bp, TCM_REG_TCM_PRTY_STS);
4609                                         break;
4610                                 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
4611                                         _print_next_block((*par_num)++,
4612                                                           "TSEMI");
4613                                         _print_parity(bp,
4614                                                       TSEM_REG_TSEM_PRTY_STS_0);
4615                                         _print_parity(bp,
4616                                                       TSEM_REG_TSEM_PRTY_STS_1);
4617                                         break;
4618                                 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
4619                                         _print_next_block((*par_num)++, "XPB");
4620                                         _print_parity(bp, GRCBASE_XPB +
4621                                                           PB_REG_PB_PRTY_STS);
4622                                         break;
4623                                 }
4624                         }
4625
4626                         /* Clear the bit */
4627                         sig &= ~cur_bit;
4628                 }
4629         }
4630
4631         return res;
4632 }
4633
4634 static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4635                                             int *par_num, bool *global,
4636                                             bool print)
4637 {
4638         u32 cur_bit;
4639         bool res;
4640         int i;
4641
4642         res = false;
4643
4644         for (i = 0; sig; i++) {
4645                 cur_bit = (0x1UL << i);
4646                 if (sig & cur_bit) {
4647                         res |= true; /* Each bit is real error! */
4648                         switch (cur_bit) {
4649                         case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
4650                                 if (print) {
4651                                         _print_next_block((*par_num)++, "PBF");
4652                                         _print_parity(bp, PBF_REG_PBF_PRTY_STS);
4653                                 }
4654                                 break;
4655                         case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
4656                                 if (print) {
4657                                         _print_next_block((*par_num)++, "QM");
4658                                         _print_parity(bp, QM_REG_QM_PRTY_STS);
4659                                 }
4660                                 break;
4661                         case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
4662                                 if (print) {
4663                                         _print_next_block((*par_num)++, "TM");
4664                                         _print_parity(bp, TM_REG_TM_PRTY_STS);
4665                                 }
4666                                 break;
4667                         case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
4668                                 if (print) {
4669                                         _print_next_block((*par_num)++, "XSDM");
4670                                         _print_parity(bp,
4671                                                       XSDM_REG_XSDM_PRTY_STS);
4672                                 }
4673                                 break;
4674                         case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
4675                                 if (print) {
4676                                         _print_next_block((*par_num)++, "XCM");
4677                                         _print_parity(bp, XCM_REG_XCM_PRTY_STS);
4678                                 }
4679                                 break;
4680                         case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
4681                                 if (print) {
4682                                         _print_next_block((*par_num)++,
4683                                                           "XSEMI");
4684                                         _print_parity(bp,
4685                                                       XSEM_REG_XSEM_PRTY_STS_0);
4686                                         _print_parity(bp,
4687                                                       XSEM_REG_XSEM_PRTY_STS_1);
4688                                 }
4689                                 break;
4690                         case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
4691                                 if (print) {
4692                                         _print_next_block((*par_num)++,
4693                                                           "DOORBELLQ");
4694                                         _print_parity(bp,
4695                                                       DORQ_REG_DORQ_PRTY_STS);
4696                                 }
4697                                 break;
4698                         case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
4699                                 if (print) {
4700                                         _print_next_block((*par_num)++, "NIG");
4701                                         if (CHIP_IS_E1x(bp)) {
4702                                                 _print_parity(bp,
4703                                                         NIG_REG_NIG_PRTY_STS);
4704                                         } else {
4705                                                 _print_parity(bp,
4706                                                         NIG_REG_NIG_PRTY_STS_0);
4707                                                 _print_parity(bp,
4708                                                         NIG_REG_NIG_PRTY_STS_1);
4709                                         }
4710                                 }
4711                                 break;
4712                         case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
4713                                 if (print)
4714                                         _print_next_block((*par_num)++,
4715                                                           "VAUX PCI CORE");
4716                                 *global = true;
4717                                 break;
4718                         case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
4719                                 if (print) {
4720                                         _print_next_block((*par_num)++,
4721                                                           "DEBUG");
4722                                         _print_parity(bp, DBG_REG_DBG_PRTY_STS);
4723                                 }
4724                                 break;
4725                         case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
4726                                 if (print) {
4727                                         _print_next_block((*par_num)++, "USDM");
4728                                         _print_parity(bp,
4729                                                       USDM_REG_USDM_PRTY_STS);
4730                                 }
4731                                 break;
4732                         case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
4733                                 if (print) {
4734                                         _print_next_block((*par_num)++, "UCM");
4735                                         _print_parity(bp, UCM_REG_UCM_PRTY_STS);
4736                                 }
4737                                 break;
4738                         case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
4739                                 if (print) {
4740                                         _print_next_block((*par_num)++,
4741                                                           "USEMI");
4742                                         _print_parity(bp,
4743                                                       USEM_REG_USEM_PRTY_STS_0);
4744                                         _print_parity(bp,
4745                                                       USEM_REG_USEM_PRTY_STS_1);
4746                                 }
4747                                 break;
4748                         case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
4749                                 if (print) {
4750                                         _print_next_block((*par_num)++, "UPB");
4751                                         _print_parity(bp, GRCBASE_UPB +
4752                                                           PB_REG_PB_PRTY_STS);
4753                                 }
4754                                 break;
4755                         case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
4756                                 if (print) {
4757                                         _print_next_block((*par_num)++, "CSDM");
4758                                         _print_parity(bp,
4759                                                       CSDM_REG_CSDM_PRTY_STS);
4760                                 }
4761                                 break;
4762                         case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
4763                                 if (print) {
4764                                         _print_next_block((*par_num)++, "CCM");
4765                                         _print_parity(bp, CCM_REG_CCM_PRTY_STS);
4766                                 }
4767                                 break;
4768                         }
4769
4770                         /* Clear the bit */
4771                         sig &= ~cur_bit;
4772                 }
4773         }
4774
4775         return res;
4776 }
4777
4778 static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
4779                                             int *par_num, bool print)
4780 {
4781         u32 cur_bit;
4782         bool res;
4783         int i;
4784
4785         res = false;
4786
4787         for (i = 0; sig; i++) {
4788                 cur_bit = (0x1UL << i);
4789                 if (sig & cur_bit) {
4790                         res = true; /* Each bit is real error! */
4791                         if (print) {
4792                                 switch (cur_bit) {
4793                                 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
4794                                         _print_next_block((*par_num)++,
4795                                                           "CSEMI");
4796                                         _print_parity(bp,
4797                                                       CSEM_REG_CSEM_PRTY_STS_0);
4798                                         _print_parity(bp,
4799                                                       CSEM_REG_CSEM_PRTY_STS_1);
4800                                         break;
4801                                 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
4802                                         _print_next_block((*par_num)++, "PXP");
4803                                         _print_parity(bp, PXP_REG_PXP_PRTY_STS);
4804                                         _print_parity(bp,
4805                                                       PXP2_REG_PXP2_PRTY_STS_0);
4806                                         _print_parity(bp,
4807                                                       PXP2_REG_PXP2_PRTY_STS_1);
4808                                         break;
4809                                 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
4810                                         _print_next_block((*par_num)++,
4811                                                           "PXPPCICLOCKCLIENT");
4812                                         break;
4813                                 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
4814                                         _print_next_block((*par_num)++, "CFC");
4815                                         _print_parity(bp,
4816                                                       CFC_REG_CFC_PRTY_STS);
4817                                         break;
4818                                 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
4819                                         _print_next_block((*par_num)++, "CDU");
4820                                         _print_parity(bp, CDU_REG_CDU_PRTY_STS);
4821                                         break;
4822                                 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
4823                                         _print_next_block((*par_num)++, "DMAE");
4824                                         _print_parity(bp,
4825                                                       DMAE_REG_DMAE_PRTY_STS);
4826                                         break;
4827                                 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
4828                                         _print_next_block((*par_num)++, "IGU");
4829                                         if (CHIP_IS_E1x(bp))
4830                                                 _print_parity(bp,
4831                                                         HC_REG_HC_PRTY_STS);
4832                                         else
4833                                                 _print_parity(bp,
4834                                                         IGU_REG_IGU_PRTY_STS);
4835                                         break;
4836                                 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
4837                                         _print_next_block((*par_num)++, "MISC");
4838                                         _print_parity(bp,
4839                                                       MISC_REG_MISC_PRTY_STS);
4840                                         break;
4841                                 }
4842                         }
4843
4844                         /* Clear the bit */
4845                         sig &= ~cur_bit;
4846                 }
4847         }
4848
4849         return res;
4850 }
4851
4852 static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
4853                                             int *par_num, bool *global,
4854                                             bool print)
4855 {
4856         bool res = false;
4857         u32 cur_bit;
4858         int i;
4859
4860         for (i = 0; sig; i++) {
4861                 cur_bit = (0x1UL << i);
4862                 if (sig & cur_bit) {
4863                         switch (cur_bit) {
4864                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
4865                                 if (print)
4866                                         _print_next_block((*par_num)++,
4867                                                           "MCP ROM");
4868                                 *global = true;
4869                                 res = true;
4870                                 break;
4871                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
4872                                 if (print)
4873                                         _print_next_block((*par_num)++,
4874                                                           "MCP UMP RX");
4875                                 *global = true;
4876                                 res = true;
4877                                 break;
4878                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
4879                                 if (print)
4880                                         _print_next_block((*par_num)++,
4881                                                           "MCP UMP TX");
4882                                 *global = true;
4883                                 res = true;
4884                                 break;
4885                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
4886                                 (*par_num)++;
4887                                 /* clear latched SCPAD PATIRY from MCP */
4888                                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
4889                                        1UL << 10);
4890                                 break;
4891                         }
4892
4893                         /* Clear the bit */
4894                         sig &= ~cur_bit;
4895                 }
4896         }
4897
4898         return res;
4899 }
4900
4901 static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
4902                                             int *par_num, bool print)
4903 {
4904         u32 cur_bit;
4905         bool res;
4906         int i;
4907
4908         res = false;
4909
4910         for (i = 0; sig; i++) {
4911                 cur_bit = (0x1UL << i);
4912                 if (sig & cur_bit) {
4913                         res = true; /* Each bit is real error! */
4914                         if (print) {
4915                                 switch (cur_bit) {
4916                                 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
4917                                         _print_next_block((*par_num)++,
4918                                                           "PGLUE_B");
4919                                         _print_parity(bp,
4920                                                       PGLUE_B_REG_PGLUE_B_PRTY_STS);
4921                                         break;
4922                                 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
4923                                         _print_next_block((*par_num)++, "ATC");
4924                                         _print_parity(bp,
4925                                                       ATC_REG_ATC_PRTY_STS);
4926                                         break;
4927                                 }
4928                         }
4929                         /* Clear the bit */
4930                         sig &= ~cur_bit;
4931                 }
4932         }
4933
4934         return res;
4935 }
4936
4937 static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4938                               u32 *sig)
4939 {
4940         bool res = false;
4941
4942         if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4943             (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4944             (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4945             (sig[3] & HW_PRTY_ASSERT_SET_3) ||
4946             (sig[4] & HW_PRTY_ASSERT_SET_4)) {
4947                 int par_num = 0;
4948
4949                 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n"
4950                                  "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
4951                           sig[0] & HW_PRTY_ASSERT_SET_0,
4952                           sig[1] & HW_PRTY_ASSERT_SET_1,
4953                           sig[2] & HW_PRTY_ASSERT_SET_2,
4954                           sig[3] & HW_PRTY_ASSERT_SET_3,
4955                           sig[4] & HW_PRTY_ASSERT_SET_4);
4956                 if (print) {
4957                         if (((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4958                              (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4959                              (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4960                              (sig[4] & HW_PRTY_ASSERT_SET_4)) ||
4961                              (sig[3] & HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD)) {
4962                                 netdev_err(bp->dev,
4963                                            "Parity errors detected in blocks: ");
4964                         } else {
4965                                 print = false;
4966                         }
4967                 }
4968                 res |= bnx2x_check_blocks_with_parity0(bp,
4969                         sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
4970                 res |= bnx2x_check_blocks_with_parity1(bp,
4971                         sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print);
4972                 res |= bnx2x_check_blocks_with_parity2(bp,
4973                         sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print);
4974                 res |= bnx2x_check_blocks_with_parity3(bp,
4975                         sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print);
4976                 res |= bnx2x_check_blocks_with_parity4(bp,
4977                         sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print);
4978
4979                 if (print)
4980                         pr_cont("\n");
4981         }
4982
4983         return res;
4984 }
4985
4986 /**
4987  * bnx2x_chk_parity_attn - checks for parity attentions.
4988  *
4989  * @bp:         driver handle
4990  * @global:     true if there was a global attention
4991  * @print:      show parity attention in syslog
4992  */
4993 bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
4994 {
4995         struct attn_route attn = { {0} };
4996         int port = BP_PORT(bp);
4997
4998         attn.sig[0] = REG_RD(bp,
4999                 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
5000                              port*4);
5001         attn.sig[1] = REG_RD(bp,
5002                 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
5003                              port*4);
5004         attn.sig[2] = REG_RD(bp,
5005                 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
5006                              port*4);
5007         attn.sig[3] = REG_RD(bp,
5008                 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
5009                              port*4);
5010         /* Since MCP attentions can't be disabled inside the block, we need to
5011          * read AEU registers to see whether they're currently disabled
5012          */
5013         attn.sig[3] &= ((REG_RD(bp,
5014                                 !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
5015                                       : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) &
5016                          MISC_AEU_ENABLE_MCP_PRTY_BITS) |
5017                         ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
5018
5019         if (!CHIP_IS_E1x(bp))
5020                 attn.sig[4] = REG_RD(bp,
5021                         MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 +
5022                                      port*4);
5023
5024         return bnx2x_parity_attn(bp, global, print, attn.sig);
5025 }
5026
5027 static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
5028 {
5029         u32 val;
5030         if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
5031
5032                 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
5033                 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
5034                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
5035                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
5036                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
5037                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
5038                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
5039                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
5040                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
5041                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
5042                 if (val &
5043                     PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
5044                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
5045                 if (val &
5046                     PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
5047                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
5048                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
5049                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
5050                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
5051                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
5052                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
5053                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
5054         }
5055         if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
5056                 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
5057                 BNX2X_ERR("ATC hw attention 0x%x\n", val);
5058                 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
5059                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
5060                 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
5061                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
5062                 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
5063                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
5064                 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
5065                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
5066                 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
5067                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
5068                 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
5069                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
5070         }
5071
5072         if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
5073                     AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
5074                 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
5075                 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
5076                     AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
5077         }
5078 }
5079
5080 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
5081 {
5082         struct attn_route attn, *group_mask;
5083         int port = BP_PORT(bp);
5084         int index;
5085         u32 reg_addr;
5086         u32 val;
5087         u32 aeu_mask;
5088         bool global = false;
5089
5090         /* need to take HW lock because MCP or other port might also
5091            try to handle this event */
5092         bnx2x_acquire_alr(bp);
5093
5094         if (bnx2x_chk_parity_attn(bp, &global, true)) {
5095 #ifndef BNX2X_STOP_ON_ERROR
5096                 bp->recovery_state = BNX2X_RECOVERY_INIT;
5097                 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5098                 /* Disable HW interrupts */
5099                 bnx2x_int_disable(bp);
5100                 /* In case of parity errors don't handle attentions so that
5101                  * other function would "see" parity errors.
5102                  */
5103 #else
5104                 bnx2x_panic();
5105 #endif
5106                 bnx2x_release_alr(bp);
5107                 return;
5108         }
5109
5110         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
5111         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
5112         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
5113         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
5114         if (!CHIP_IS_E1x(bp))
5115                 attn.sig[4] =
5116                       REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
5117         else
5118                 attn.sig[4] = 0;
5119
5120         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
5121            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
5122
5123         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
5124                 if (deasserted & (1 << index)) {
5125                         group_mask = &bp->attn_group[index];
5126
5127                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n",
5128                            index,
5129                            group_mask->sig[0], group_mask->sig[1],
5130                            group_mask->sig[2], group_mask->sig[3],
5131                            group_mask->sig[4]);
5132
5133                         bnx2x_attn_int_deasserted4(bp,
5134                                         attn.sig[4] & group_mask->sig[4]);
5135                         bnx2x_attn_int_deasserted3(bp,
5136                                         attn.sig[3] & group_mask->sig[3]);
5137                         bnx2x_attn_int_deasserted1(bp,
5138                                         attn.sig[1] & group_mask->sig[1]);
5139                         bnx2x_attn_int_deasserted2(bp,
5140                                         attn.sig[2] & group_mask->sig[2]);
5141                         bnx2x_attn_int_deasserted0(bp,
5142                                         attn.sig[0] & group_mask->sig[0]);
5143                 }
5144         }
5145
5146         bnx2x_release_alr(bp);
5147
5148         if (bp->common.int_block == INT_BLOCK_HC)
5149                 reg_addr = (HC_REG_COMMAND_REG + port*32 +
5150                             COMMAND_REG_ATTN_BITS_CLR);
5151         else
5152                 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
5153
5154         val = ~deasserted;
5155         DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
5156            (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
5157         REG_WR(bp, reg_addr, val);
5158
5159         if (~bp->attn_state & deasserted)
5160                 BNX2X_ERR("IGU ERROR\n");
5161
5162         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5163                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
5164
5165         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
5166         aeu_mask = REG_RD(bp, reg_addr);
5167
5168         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
5169            aeu_mask, deasserted);
5170         aeu_mask |= (deasserted & 0x3ff);
5171         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
5172
5173         REG_WR(bp, reg_addr, aeu_mask);
5174         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
5175
5176         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
5177         bp->attn_state &= ~deasserted;
5178         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
5179 }
5180
5181 static void bnx2x_attn_int(struct bnx2x *bp)
5182 {
5183         /* read local copy of bits */
5184         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
5185                                                                 attn_bits);
5186         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
5187                                                                 attn_bits_ack);
5188         u32 attn_state = bp->attn_state;
5189
5190         /* look for changed bits */
5191         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
5192         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
5193
5194         DP(NETIF_MSG_HW,
5195            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
5196            attn_bits, attn_ack, asserted, deasserted);
5197
5198         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
5199                 BNX2X_ERR("BAD attention state\n");
5200
5201         /* handle bits that were raised */
5202         if (asserted)
5203                 bnx2x_attn_int_asserted(bp, asserted);
5204
5205         if (deasserted)
5206                 bnx2x_attn_int_deasserted(bp, deasserted);
5207 }
5208
5209 void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
5210                       u16 index, u8 op, u8 update)
5211 {
5212         u32 igu_addr = bp->igu_base_addr;
5213         igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
5214         bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
5215                              igu_addr);
5216 }
5217
5218 static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
5219 {
5220         /* No memory barriers */
5221         storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
5222         mmiowb(); /* keep prod updates ordered */
5223 }
5224
5225 static int  bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
5226                                       union event_ring_elem *elem)
5227 {
5228         u8 err = elem->message.error;
5229
5230         if (!bp->cnic_eth_dev.starting_cid  ||
5231             (cid < bp->cnic_eth_dev.starting_cid &&
5232             cid != bp->cnic_eth_dev.iscsi_l2_cid))
5233                 return 1;
5234
5235         DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
5236
5237         if (unlikely(err)) {
5238
5239                 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
5240                           cid);
5241                 bnx2x_panic_dump(bp, false);
5242         }
5243         bnx2x_cnic_cfc_comp(bp, cid, err);
5244         return 0;
5245 }
5246
5247 static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
5248 {
5249         struct bnx2x_mcast_ramrod_params rparam;
5250         int rc;
5251
5252         memset(&rparam, 0, sizeof(rparam));
5253
5254         rparam.mcast_obj = &bp->mcast_obj;
5255
5256         netif_addr_lock_bh(bp->dev);
5257
5258         /* Clear pending state for the last command */
5259         bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw);
5260
5261         /* If there are pending mcast commands - send them */
5262         if (bp->mcast_obj.check_pending(&bp->mcast_obj)) {
5263                 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
5264                 if (rc < 0)
5265                         BNX2X_ERR("Failed to send pending mcast commands: %d\n",
5266                                   rc);
5267         }
5268
5269         netif_addr_unlock_bh(bp->dev);
5270 }
5271
5272 static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
5273                                             union event_ring_elem *elem)
5274 {
5275         unsigned long ramrod_flags = 0;
5276         int rc = 0;
5277         u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK;
5278         struct bnx2x_vlan_mac_obj *vlan_mac_obj;
5279
5280         /* Always push next commands out, don't wait here */
5281         __set_bit(RAMROD_CONT, &ramrod_flags);
5282
5283         switch (le32_to_cpu((__force __le32)elem->message.data.eth_event.echo)
5284                             >> BNX2X_SWCID_SHIFT) {
5285         case BNX2X_FILTER_MAC_PENDING:
5286                 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
5287                 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
5288                         vlan_mac_obj = &bp->iscsi_l2_mac_obj;
5289                 else
5290                         vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
5291
5292                 break;
5293         case BNX2X_FILTER_VLAN_PENDING:
5294                 DP(BNX2X_MSG_SP, "Got SETUP_VLAN completions\n");
5295                 vlan_mac_obj = &bp->sp_objs[cid].vlan_obj;
5296                 break;
5297         case BNX2X_FILTER_MCAST_PENDING:
5298                 DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
5299                 /* This is only relevant for 57710 where multicast MACs are
5300                  * configured as unicast MACs using the same ramrod.
5301                  */
5302                 bnx2x_handle_mcast_eqe(bp);
5303                 return;
5304         default:
5305                 BNX2X_ERR("Unsupported classification command: %d\n",
5306                           elem->message.data.eth_event.echo);
5307                 return;
5308         }
5309
5310         rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags);
5311
5312         if (rc < 0)
5313                 BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
5314         else if (rc > 0)
5315                 DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
5316 }
5317
5318 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
5319
5320 static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
5321 {
5322         netif_addr_lock_bh(bp->dev);
5323
5324         clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
5325
5326         /* Send rx_mode command again if was requested */
5327         if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
5328                 bnx2x_set_storm_rx_mode(bp);
5329         else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
5330                                     &bp->sp_state))
5331                 bnx2x_set_iscsi_eth_rx_mode(bp, true);
5332         else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
5333                                     &bp->sp_state))
5334                 bnx2x_set_iscsi_eth_rx_mode(bp, false);
5335
5336         netif_addr_unlock_bh(bp->dev);
5337 }
5338
5339 static void bnx2x_after_afex_vif_lists(struct bnx2x *bp,
5340                                               union event_ring_elem *elem)
5341 {
5342         if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) {
5343                 DP(BNX2X_MSG_SP,
5344                    "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n",
5345                    elem->message.data.vif_list_event.func_bit_map);
5346                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK,
5347                         elem->message.data.vif_list_event.func_bit_map);
5348         } else if (elem->message.data.vif_list_event.echo ==
5349                    VIF_LIST_RULE_SET) {
5350                 DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n");
5351                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0);
5352         }
5353 }
5354
5355 /* called with rtnl_lock */
5356 static void bnx2x_after_function_update(struct bnx2x *bp)
5357 {
5358         int q, rc;
5359         struct bnx2x_fastpath *fp;
5360         struct bnx2x_queue_state_params queue_params = {NULL};
5361         struct bnx2x_queue_update_params *q_update_params =
5362                 &queue_params.params.update;
5363
5364         /* Send Q update command with afex vlan removal values for all Qs */
5365         queue_params.cmd = BNX2X_Q_CMD_UPDATE;
5366
5367         /* set silent vlan removal values according to vlan mode */
5368         __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
5369                   &q_update_params->update_flags);
5370         __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
5371                   &q_update_params->update_flags);
5372         __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5373
5374         /* in access mode mark mask and value are 0 to strip all vlans */
5375         if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) {
5376                 q_update_params->silent_removal_value = 0;
5377                 q_update_params->silent_removal_mask = 0;
5378         } else {
5379                 q_update_params->silent_removal_value =
5380                         (bp->afex_def_vlan_tag & VLAN_VID_MASK);
5381                 q_update_params->silent_removal_mask = VLAN_VID_MASK;
5382         }
5383
5384         for_each_eth_queue(bp, q) {
5385                 /* Set the appropriate Queue object */
5386                 fp = &bp->fp[q];
5387                 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5388
5389                 /* send the ramrod */
5390                 rc = bnx2x_queue_state_change(bp, &queue_params);
5391                 if (rc < 0)
5392                         BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5393                                   q);
5394         }
5395
5396         if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {
5397                 fp = &bp->fp[FCOE_IDX(bp)];
5398                 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5399
5400                 /* clear pending completion bit */
5401                 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5402
5403                 /* mark latest Q bit */
5404                 smp_mb__before_atomic();
5405                 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
5406                 smp_mb__after_atomic();
5407
5408                 /* send Q update ramrod for FCoE Q */
5409                 rc = bnx2x_queue_state_change(bp, &queue_params);
5410                 if (rc < 0)
5411                         BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5412                                   q);
5413         } else {
5414                 /* If no FCoE ring - ACK MCP now */
5415                 bnx2x_link_report(bp);
5416                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5417         }
5418 }
5419
5420 static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
5421         struct bnx2x *bp, u32 cid)
5422 {
5423         DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
5424
5425         if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp)))
5426                 return &bnx2x_fcoe_sp_obj(bp, q_obj);
5427         else
5428                 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
5429 }
5430
5431 static void bnx2x_eq_int(struct bnx2x *bp)
5432 {
5433         u16 hw_cons, sw_cons, sw_prod;
5434         union event_ring_elem *elem;
5435         u8 echo;
5436         u32 cid;
5437         u8 opcode;
5438         int rc, spqe_cnt = 0;
5439         struct bnx2x_queue_sp_obj *q_obj;
5440         struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
5441         struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
5442
5443         hw_cons = le16_to_cpu(*bp->eq_cons_sb);
5444
5445         /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
5446          * when we get the next-page we need to adjust so the loop
5447          * condition below will be met. The next element is the size of a
5448          * regular element and hence incrementing by 1
5449          */
5450         if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
5451                 hw_cons++;
5452
5453         /* This function may never run in parallel with itself for a
5454          * specific bp, thus there is no need in "paired" read memory
5455          * barrier here.
5456          */
5457         sw_cons = bp->eq_cons;
5458         sw_prod = bp->eq_prod;
5459
5460         DP(BNX2X_MSG_SP, "EQ:  hw_cons %u  sw_cons %u bp->eq_spq_left %x\n",
5461                         hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
5462
5463         for (; sw_cons != hw_cons;
5464               sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
5465
5466                 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
5467
5468                 rc = bnx2x_iov_eq_sp_event(bp, elem);
5469                 if (!rc) {
5470                         DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n",
5471                            rc);
5472                         goto next_spqe;
5473                 }
5474
5475                 /* elem CID originates from FW; actually LE */
5476                 cid = SW_CID((__force __le32)
5477                              elem->message.data.cfc_del_event.cid);
5478                 opcode = elem->message.opcode;
5479
5480                 /* handle eq element */
5481                 switch (opcode) {
5482                 case EVENT_RING_OPCODE_VF_PF_CHANNEL:
5483                         bnx2x_vf_mbx_schedule(bp,
5484                                               &elem->message.data.vf_pf_event);
5485                         continue;
5486
5487                 case EVENT_RING_OPCODE_STAT_QUERY:
5488                         DP_AND((BNX2X_MSG_SP | BNX2X_MSG_STATS),
5489                                "got statistics comp event %d\n",
5490                                bp->stats_comp++);
5491                         /* nothing to do with stats comp */
5492                         goto next_spqe;
5493
5494                 case EVENT_RING_OPCODE_CFC_DEL:
5495                         /* handle according to cid range */
5496                         /*
5497                          * we may want to verify here that the bp state is
5498                          * HALTING
5499                          */
5500                         DP(BNX2X_MSG_SP,
5501                            "got delete ramrod for MULTI[%d]\n", cid);
5502
5503                         if (CNIC_LOADED(bp) &&
5504                             !bnx2x_cnic_handle_cfc_del(bp, cid, elem))
5505                                 goto next_spqe;
5506
5507                         q_obj = bnx2x_cid_to_q_obj(bp, cid);
5508
5509                         if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
5510                                 break;
5511
5512                         goto next_spqe;
5513
5514                 case EVENT_RING_OPCODE_STOP_TRAFFIC:
5515                         DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n");
5516                         bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
5517                         if (f_obj->complete_cmd(bp, f_obj,
5518                                                 BNX2X_F_CMD_TX_STOP))
5519                                 break;
5520                         goto next_spqe;
5521
5522                 case EVENT_RING_OPCODE_START_TRAFFIC:
5523                         DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n");
5524                         bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
5525                         if (f_obj->complete_cmd(bp, f_obj,
5526                                                 BNX2X_F_CMD_TX_START))
5527                                 break;
5528                         goto next_spqe;
5529
5530                 case EVENT_RING_OPCODE_FUNCTION_UPDATE:
5531                         echo = elem->message.data.function_update_event.echo;
5532                         if (echo == SWITCH_UPDATE) {
5533                                 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5534                                    "got FUNC_SWITCH_UPDATE ramrod\n");
5535                                 if (f_obj->complete_cmd(
5536                                         bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE))
5537                                         break;
5538
5539                         } else {
5540                                 int cmd = BNX2X_SP_RTNL_AFEX_F_UPDATE;
5541
5542                                 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
5543                                    "AFEX: ramrod completed FUNCTION_UPDATE\n");
5544                                 f_obj->complete_cmd(bp, f_obj,
5545                                                     BNX2X_F_CMD_AFEX_UPDATE);
5546
5547                                 /* We will perform the Queues update from
5548                                  * sp_rtnl task as all Queue SP operations
5549                                  * should run under rtnl_lock.
5550                                  */
5551                                 bnx2x_schedule_sp_rtnl(bp, cmd, 0);
5552                         }
5553
5554                         goto next_spqe;
5555
5556                 case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
5557                         f_obj->complete_cmd(bp, f_obj,
5558                                             BNX2X_F_CMD_AFEX_VIFLISTS);
5559                         bnx2x_after_afex_vif_lists(bp, elem);
5560                         goto next_spqe;
5561                 case EVENT_RING_OPCODE_FUNCTION_START:
5562                         DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5563                            "got FUNC_START ramrod\n");
5564                         if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
5565                                 break;
5566
5567                         goto next_spqe;
5568
5569                 case EVENT_RING_OPCODE_FUNCTION_STOP:
5570                         DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5571                            "got FUNC_STOP ramrod\n");
5572                         if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
5573                                 break;
5574
5575                         goto next_spqe;
5576
5577                 case EVENT_RING_OPCODE_SET_TIMESYNC:
5578                         DP(BNX2X_MSG_SP | BNX2X_MSG_PTP,
5579                            "got set_timesync ramrod completion\n");
5580                         if (f_obj->complete_cmd(bp, f_obj,
5581                                                 BNX2X_F_CMD_SET_TIMESYNC))
5582                                 break;
5583                         goto next_spqe;
5584                 }
5585
5586                 switch (opcode | bp->state) {
5587                 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5588                       BNX2X_STATE_OPEN):
5589                 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5590                       BNX2X_STATE_OPENING_WAIT4_PORT):
5591                 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5592                       BNX2X_STATE_CLOSING_WAIT4_HALT):
5593                         cid = elem->message.data.eth_event.echo &
5594                                 BNX2X_SWCID_MASK;
5595                         DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
5596                            cid);
5597                         rss_raw->clear_pending(rss_raw);
5598                         break;
5599
5600                 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
5601                 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
5602                 case (EVENT_RING_OPCODE_SET_MAC |
5603                       BNX2X_STATE_CLOSING_WAIT4_HALT):
5604                 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5605                       BNX2X_STATE_OPEN):
5606                 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5607                       BNX2X_STATE_DIAG):
5608                 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5609                       BNX2X_STATE_CLOSING_WAIT4_HALT):
5610                         DP(BNX2X_MSG_SP, "got (un)set vlan/mac ramrod\n");
5611                         bnx2x_handle_classification_eqe(bp, elem);
5612                         break;
5613
5614                 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5615                       BNX2X_STATE_OPEN):
5616                 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5617                       BNX2X_STATE_DIAG):
5618                 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5619                       BNX2X_STATE_CLOSING_WAIT4_HALT):
5620                         DP(BNX2X_MSG_SP, "got mcast ramrod\n");
5621                         bnx2x_handle_mcast_eqe(bp);
5622                         break;
5623
5624                 case (EVENT_RING_OPCODE_FILTERS_RULES |
5625                       BNX2X_STATE_OPEN):
5626                 case (EVENT_RING_OPCODE_FILTERS_RULES |
5627                       BNX2X_STATE_DIAG):
5628                 case (EVENT_RING_OPCODE_FILTERS_RULES |
5629                       BNX2X_STATE_CLOSING_WAIT4_HALT):
5630                         DP(BNX2X_MSG_SP, "got rx_mode ramrod\n");
5631                         bnx2x_handle_rx_mode_eqe(bp);
5632                         break;
5633                 default:
5634                         /* unknown event log error and continue */
5635                         BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n",
5636                                   elem->message.opcode, bp->state);
5637                 }
5638 next_spqe:
5639                 spqe_cnt++;
5640         } /* for */
5641
5642         smp_mb__before_atomic();
5643         atomic_add(spqe_cnt, &bp->eq_spq_left);
5644
5645         bp->eq_cons = sw_cons;
5646         bp->eq_prod = sw_prod;
5647         /* Make sure that above mem writes were issued towards the memory */
5648         smp_wmb();
5649
5650         /* update producer */
5651         bnx2x_update_eq_prod(bp, bp->eq_prod);
5652 }
5653
5654 static void bnx2x_sp_task(struct work_struct *work)
5655 {
5656         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
5657
5658         DP(BNX2X_MSG_SP, "sp task invoked\n");
5659
5660         /* make sure the atomic interrupt_occurred has been written */
5661         smp_rmb();
5662         if (atomic_read(&bp->interrupt_occurred)) {
5663
5664                 /* what work needs to be performed? */
5665                 u16 status = bnx2x_update_dsb_idx(bp);
5666
5667                 DP(BNX2X_MSG_SP, "status %x\n", status);
5668                 DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n");
5669                 atomic_set(&bp->interrupt_occurred, 0);
5670
5671                 /* HW attentions */
5672                 if (status & BNX2X_DEF_SB_ATT_IDX) {
5673                         bnx2x_attn_int(bp);
5674                         status &= ~BNX2X_DEF_SB_ATT_IDX;
5675                 }
5676
5677                 /* SP events: STAT_QUERY and others */
5678                 if (status & BNX2X_DEF_SB_IDX) {
5679                         struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
5680
5681                 if (FCOE_INIT(bp) &&
5682                             (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
5683                                 /* Prevent local bottom-halves from running as
5684                                  * we are going to change the local NAPI list.
5685                                  */
5686                                 local_bh_disable();
5687                                 napi_schedule(&bnx2x_fcoe(bp, napi));
5688                                 local_bh_enable();
5689                         }
5690
5691                         /* Handle EQ completions */
5692                         bnx2x_eq_int(bp);
5693                         bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
5694                                      le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
5695
5696                         status &= ~BNX2X_DEF_SB_IDX;
5697                 }
5698
5699                 /* if status is non zero then perhaps something went wrong */
5700                 if (unlikely(status))
5701                         DP(BNX2X_MSG_SP,
5702                            "got an unknown interrupt! (status 0x%x)\n", status);
5703
5704                 /* ack status block only if something was actually handled */
5705                 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
5706                              le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
5707         }
5708
5709         /* afex - poll to check if VIFSET_ACK should be sent to MFW */
5710         if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
5711                                &bp->sp_state)) {
5712                 bnx2x_link_report(bp);
5713                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5714         }
5715 }
5716
5717 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
5718 {
5719         struct net_device *dev = dev_instance;
5720         struct bnx2x *bp = netdev_priv(dev);
5721
5722         bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
5723                      IGU_INT_DISABLE, 0);
5724
5725 #ifdef BNX2X_STOP_ON_ERROR
5726         if (unlikely(bp->panic))
5727                 return IRQ_HANDLED;
5728 #endif
5729
5730         if (CNIC_LOADED(bp)) {
5731                 struct cnic_ops *c_ops;
5732
5733                 rcu_read_lock();
5734                 c_ops = rcu_dereference(bp->cnic_ops);
5735                 if (c_ops)
5736                         c_ops->cnic_handler(bp->cnic_data, NULL);
5737                 rcu_read_unlock();
5738         }
5739
5740         /* schedule sp task to perform default status block work, ack
5741          * attentions and enable interrupts.
5742          */
5743         bnx2x_schedule_sp_task(bp);
5744
5745         return IRQ_HANDLED;
5746 }
5747
5748 /* end of slow path */
5749
5750 void bnx2x_drv_pulse(struct bnx2x *bp)
5751 {
5752         SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
5753                  bp->fw_drv_pulse_wr_seq);
5754 }
5755
5756 static void bnx2x_timer(unsigned long data)
5757 {
5758         struct bnx2x *bp = (struct bnx2x *) data;
5759
5760         if (!netif_running(bp->dev))
5761                 return;
5762
5763         if (IS_PF(bp) &&
5764             !BP_NOMCP(bp)) {
5765                 int mb_idx = BP_FW_MB_IDX(bp);
5766                 u16 drv_pulse;
5767                 u16 mcp_pulse;
5768
5769                 ++bp->fw_drv_pulse_wr_seq;
5770                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5771                 drv_pulse = bp->fw_drv_pulse_wr_seq;
5772                 bnx2x_drv_pulse(bp);
5773
5774                 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
5775                              MCP_PULSE_SEQ_MASK);
5776                 /* The delta between driver pulse and mcp response
5777                  * should not get too big. If the MFW is more than 5 pulses
5778                  * behind, we should worry about it enough to generate an error
5779                  * log.
5780                  */
5781                 if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5)
5782                         BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5783                                   drv_pulse, mcp_pulse);
5784         }
5785
5786         if (bp->state == BNX2X_STATE_OPEN)
5787                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
5788
5789         /* sample pf vf bulletin board for new posts from pf */
5790         if (IS_VF(bp))
5791                 bnx2x_timer_sriov(bp);
5792
5793         mod_timer(&bp->timer, jiffies + bp->current_interval);
5794 }
5795
5796 /* end of Statistics */
5797
5798 /* nic init */
5799
5800 /*
5801  * nic init service functions
5802  */
5803
5804 static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
5805 {
5806         u32 i;
5807         if (!(len%4) && !(addr%4))
5808                 for (i = 0; i < len; i += 4)
5809                         REG_WR(bp, addr + i, fill);
5810         else
5811                 for (i = 0; i < len; i++)
5812                         REG_WR8(bp, addr + i, fill);
5813 }
5814
5815 /* helper: writes FP SP data to FW - data_size in dwords */
5816 static void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
5817                                 int fw_sb_id,
5818                                 u32 *sb_data_p,
5819                                 u32 data_size)
5820 {
5821         int index;
5822         for (index = 0; index < data_size; index++)
5823                 REG_WR(bp, BAR_CSTRORM_INTMEM +
5824                         CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
5825                         sizeof(u32)*index,
5826                         *(sb_data_p + index));
5827 }
5828
5829 static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
5830 {
5831         u32 *sb_data_p;
5832         u32 data_size = 0;
5833         struct hc_status_block_data_e2 sb_data_e2;
5834         struct hc_status_block_data_e1x sb_data_e1x;
5835
5836         /* disable the function first */
5837         if (!CHIP_IS_E1x(bp)) {
5838                 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5839                 sb_data_e2.common.state = SB_DISABLED;
5840                 sb_data_e2.common.p_func.vf_valid = false;
5841                 sb_data_p = (u32 *)&sb_data_e2;
5842                 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5843         } else {
5844                 memset(&sb_data_e1x, 0,
5845                        sizeof(struct hc_status_block_data_e1x));
5846                 sb_data_e1x.common.state = SB_DISABLED;
5847                 sb_data_e1x.common.p_func.vf_valid = false;
5848                 sb_data_p = (u32 *)&sb_data_e1x;
5849                 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5850         }
5851         bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5852
5853         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5854                         CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
5855                         CSTORM_STATUS_BLOCK_SIZE);
5856         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5857                         CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
5858                         CSTORM_SYNC_BLOCK_SIZE);
5859 }
5860
5861 /* helper:  writes SP SB data to FW */
5862 static void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
5863                 struct hc_sp_status_block_data *sp_sb_data)
5864 {
5865         int func = BP_FUNC(bp);
5866         int i;
5867         for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
5868                 REG_WR(bp, BAR_CSTRORM_INTMEM +
5869                         CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
5870                         i*sizeof(u32),
5871                         *((u32 *)sp_sb_data + i));
5872 }
5873
5874 static void bnx2x_zero_sp_sb(struct bnx2x *bp)
5875 {
5876         int func = BP_FUNC(bp);
5877         struct hc_sp_status_block_data sp_sb_data;
5878         memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
5879
5880         sp_sb_data.state = SB_DISABLED;
5881         sp_sb_data.p_func.vf_valid = false;
5882
5883         bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
5884
5885         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5886                         CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
5887                         CSTORM_SP_STATUS_BLOCK_SIZE);
5888         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5889                         CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
5890                         CSTORM_SP_SYNC_BLOCK_SIZE);
5891 }
5892
5893 static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
5894                                            int igu_sb_id, int igu_seg_id)
5895 {
5896         hc_sm->igu_sb_id = igu_sb_id;
5897         hc_sm->igu_seg_id = igu_seg_id;
5898         hc_sm->timer_value = 0xFF;
5899         hc_sm->time_to_expire = 0xFFFFFFFF;
5900 }
5901
5902 /* allocates state machine ids. */
5903 static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
5904 {
5905         /* zero out state machine indices */
5906         /* rx indices */
5907         index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5908
5909         /* tx indices */
5910         index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5911         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
5912         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
5913         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
5914
5915         /* map indices */
5916         /* rx indices */
5917         index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
5918                 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5919
5920         /* tx indices */
5921         index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
5922                 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5923         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
5924                 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5925         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
5926                 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5927         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
5928                 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5929 }
5930
5931 void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
5932                           u8 vf_valid, int fw_sb_id, int igu_sb_id)
5933 {
5934         int igu_seg_id;
5935
5936         struct hc_status_block_data_e2 sb_data_e2;
5937         struct hc_status_block_data_e1x sb_data_e1x;
5938         struct hc_status_block_sm  *hc_sm_p;
5939         int data_size;
5940         u32 *sb_data_p;
5941
5942         if (CHIP_INT_MODE_IS_BC(bp))
5943                 igu_seg_id = HC_SEG_ACCESS_NORM;
5944         else
5945                 igu_seg_id = IGU_SEG_ACCESS_NORM;
5946
5947         bnx2x_zero_fp_sb(bp, fw_sb_id);
5948
5949         if (!CHIP_IS_E1x(bp)) {
5950                 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5951                 sb_data_e2.common.state = SB_ENABLED;
5952                 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
5953                 sb_data_e2.common.p_func.vf_id = vfid;
5954                 sb_data_e2.common.p_func.vf_valid = vf_valid;
5955                 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
5956                 sb_data_e2.common.same_igu_sb_1b = true;
5957                 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
5958                 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
5959                 hc_sm_p = sb_data_e2.common.state_machine;
5960                 sb_data_p = (u32 *)&sb_data_e2;
5961                 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5962                 bnx2x_map_sb_state_machines(sb_data_e2.index_data);
5963         } else {
5964                 memset(&sb_data_e1x, 0,
5965                        sizeof(struct hc_status_block_data_e1x));
5966                 sb_data_e1x.common.state = SB_ENABLED;
5967                 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
5968                 sb_data_e1x.common.p_func.vf_id = 0xff;
5969                 sb_data_e1x.common.p_func.vf_valid = false;
5970                 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
5971                 sb_data_e1x.common.same_igu_sb_1b = true;
5972                 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
5973                 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
5974                 hc_sm_p = sb_data_e1x.common.state_machine;
5975                 sb_data_p = (u32 *)&sb_data_e1x;
5976                 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5977                 bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
5978         }
5979
5980         bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
5981                                        igu_sb_id, igu_seg_id);
5982         bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
5983                                        igu_sb_id, igu_seg_id);
5984
5985         DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id);
5986
5987         /* write indices to HW - PCI guarantees endianity of regpairs */
5988         bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5989 }
5990
5991 static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id,
5992                                      u16 tx_usec, u16 rx_usec)
5993 {
5994         bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS,
5995                                     false, rx_usec);
5996         bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
5997                                        HC_INDEX_ETH_TX_CQ_CONS_COS0, false,
5998                                        tx_usec);
5999         bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6000                                        HC_INDEX_ETH_TX_CQ_CONS_COS1, false,
6001                                        tx_usec);
6002         bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6003                                        HC_INDEX_ETH_TX_CQ_CONS_COS2, false,
6004                                        tx_usec);
6005 }
6006
6007 static void bnx2x_init_def_sb(struct bnx2x *bp)
6008 {
6009         struct host_sp_status_block *def_sb = bp->def_status_blk;
6010         dma_addr_t mapping = bp->def_status_blk_mapping;
6011         int igu_sp_sb_index;
6012         int igu_seg_id;
6013         int port = BP_PORT(bp);
6014         int func = BP_FUNC(bp);
6015         int reg_offset, reg_offset_en5;
6016         u64 section;
6017         int index;
6018         struct hc_sp_status_block_data sp_sb_data;
6019         memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
6020
6021         if (CHIP_INT_MODE_IS_BC(bp)) {
6022                 igu_sp_sb_index = DEF_SB_IGU_ID;
6023                 igu_seg_id = HC_SEG_ACCESS_DEF;
6024         } else {
6025                 igu_sp_sb_index = bp->igu_dsb_id;
6026                 igu_seg_id = IGU_SEG_ACCESS_DEF;
6027         }
6028
6029         /* ATTN */
6030         section = ((u64)mapping) + offsetof(struct host_sp_status_block,
6031                                             atten_status_block);
6032         def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
6033
6034         bp->attn_state = 0;
6035
6036         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6037                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6038         reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
6039                                  MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
6040         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
6041                 int sindex;
6042                 /* take care of sig[0]..sig[4] */
6043                 for (sindex = 0; sindex < 4; sindex++)
6044                         bp->attn_group[index].sig[sindex] =
6045                            REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
6046
6047                 if (!CHIP_IS_E1x(bp))
6048                         /*
6049                          * enable5 is separate from the rest of the registers,
6050                          * and therefore the address skip is 4
6051                          * and not 16 between the different groups
6052                          */
6053                         bp->attn_group[index].sig[4] = REG_RD(bp,
6054                                         reg_offset_en5 + 0x4*index);
6055                 else
6056                         bp->attn_group[index].sig[4] = 0;
6057         }
6058
6059         if (bp->common.int_block == INT_BLOCK_HC) {
6060                 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
6061                                      HC_REG_ATTN_MSG0_ADDR_L);
6062
6063                 REG_WR(bp, reg_offset, U64_LO(section));
6064                 REG_WR(bp, reg_offset + 4, U64_HI(section));
6065         } else if (!CHIP_IS_E1x(bp)) {
6066                 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
6067                 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
6068         }
6069
6070         section = ((u64)mapping) + offsetof(struct host_sp_status_block,
6071                                             sp_sb);
6072
6073         bnx2x_zero_sp_sb(bp);
6074
6075         /* PCI guarantees endianity of regpairs */
6076         sp_sb_data.state                = SB_ENABLED;
6077         sp_sb_data.host_sb_addr.lo      = U64_LO(section);
6078         sp_sb_data.host_sb_addr.hi      = U64_HI(section);
6079         sp_sb_data.igu_sb_id            = igu_sp_sb_index;
6080         sp_sb_data.igu_seg_id           = igu_seg_id;
6081         sp_sb_data.p_func.pf_id         = func;
6082         sp_sb_data.p_func.vnic_id       = BP_VN(bp);
6083         sp_sb_data.p_func.vf_id         = 0xff;
6084
6085         bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
6086
6087         bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
6088 }
6089
6090 void bnx2x_update_coalesce(struct bnx2x *bp)
6091 {
6092         int i;
6093
6094         for_each_eth_queue(bp, i)
6095                 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
6096                                          bp->tx_ticks, bp->rx_ticks);
6097 }
6098
6099 static void bnx2x_init_sp_ring(struct bnx2x *bp)
6100 {
6101         spin_lock_init(&bp->spq_lock);
6102         atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
6103
6104         bp->spq_prod_idx = 0;
6105         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
6106         bp->spq_prod_bd = bp->spq;
6107         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
6108 }
6109
6110 static void bnx2x_init_eq_ring(struct bnx2x *bp)
6111 {
6112         int i;
6113         for (i = 1; i <= NUM_EQ_PAGES; i++) {
6114                 union event_ring_elem *elem =
6115                         &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
6116
6117                 elem->next_page.addr.hi =
6118                         cpu_to_le32(U64_HI(bp->eq_mapping +
6119                                    BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
6120                 elem->next_page.addr.lo =
6121                         cpu_to_le32(U64_LO(bp->eq_mapping +
6122                                    BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
6123         }
6124         bp->eq_cons = 0;
6125         bp->eq_prod = NUM_EQ_DESC;
6126         bp->eq_cons_sb = BNX2X_EQ_INDEX;
6127         /* we want a warning message before it gets wrought... */
6128         atomic_set(&bp->eq_spq_left,
6129                 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
6130 }
6131
6132 /* called with netif_addr_lock_bh() */
6133 static int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
6134                                unsigned long rx_mode_flags,
6135                                unsigned long rx_accept_flags,
6136                                unsigned long tx_accept_flags,
6137                                unsigned long ramrod_flags)
6138 {
6139         struct bnx2x_rx_mode_ramrod_params ramrod_param;
6140         int rc;
6141
6142         memset(&ramrod_param, 0, sizeof(ramrod_param));
6143
6144         /* Prepare ramrod parameters */
6145         ramrod_param.cid = 0;
6146         ramrod_param.cl_id = cl_id;
6147         ramrod_param.rx_mode_obj = &bp->rx_mode_obj;
6148         ramrod_param.func_id = BP_FUNC(bp);
6149
6150         ramrod_param.pstate = &bp->sp_state;
6151         ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING;
6152
6153         ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata);
6154         ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata);
6155
6156         set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
6157
6158         ramrod_param.ramrod_flags = ramrod_flags;
6159         ramrod_param.rx_mode_flags = rx_mode_flags;
6160
6161         ramrod_param.rx_accept_flags = rx_accept_flags;
6162         ramrod_param.tx_accept_flags = tx_accept_flags;
6163
6164         rc = bnx2x_config_rx_mode(bp, &ramrod_param);
6165         if (rc < 0) {
6166                 BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
6167                 return rc;
6168         }
6169
6170         return 0;
6171 }
6172
6173 static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
6174                                    unsigned long *rx_accept_flags,
6175                                    unsigned long *tx_accept_flags)
6176 {
6177         /* Clear the flags first */
6178         *rx_accept_flags = 0;
6179         *tx_accept_flags = 0;
6180
6181         switch (rx_mode) {
6182         case BNX2X_RX_MODE_NONE:
6183                 /*
6184                  * 'drop all' supersedes any accept flags that may have been
6185                  * passed to the function.
6186                  */
6187                 break;
6188         case BNX2X_RX_MODE_NORMAL:
6189                 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6190                 __set_bit(BNX2X_ACCEPT_MULTICAST, rx_accept_flags);
6191                 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6192
6193                 /* internal switching mode */
6194                 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6195                 __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
6196                 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6197
6198                 if (bp->accept_any_vlan) {
6199                         __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
6200                         __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
6201                 }
6202
6203                 break;
6204         case BNX2X_RX_MODE_ALLMULTI:
6205                 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6206                 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
6207                 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6208
6209                 /* internal switching mode */
6210                 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6211                 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
6212                 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6213
6214                 if (bp->accept_any_vlan) {
6215                         __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
6216                         __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
6217                 }
6218
6219                 break;
6220         case BNX2X_RX_MODE_PROMISC:
6221                 /* According to definition of SI mode, iface in promisc mode
6222                  * should receive matched and unmatched (in resolution of port)
6223                  * unicast packets.
6224                  */
6225                 __set_bit(BNX2X_ACCEPT_UNMATCHED, rx_accept_flags);
6226                 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6227                 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
6228                 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6229
6230                 /* internal switching mode */
6231                 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
6232                 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6233
6234                 if (IS_MF_SI(bp))
6235                         __set_bit(BNX2X_ACCEPT_ALL_UNICAST, tx_accept_flags);
6236                 else
6237                         __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6238
6239                 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
6240                 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
6241
6242                 break;
6243         default:
6244                 BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
6245                 return -EINVAL;
6246         }
6247
6248         return 0;
6249 }
6250
6251 /* called with netif_addr_lock_bh() */
6252 static int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
6253 {
6254         unsigned long rx_mode_flags = 0, ramrod_flags = 0;
6255         unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
6256         int rc;
6257
6258         if (!NO_FCOE(bp))
6259                 /* Configure rx_mode of FCoE Queue */
6260                 __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
6261
6262         rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags,
6263                                      &tx_accept_flags);
6264         if (rc)
6265                 return rc;
6266
6267         __set_bit(RAMROD_RX, &ramrod_flags);
6268         __set_bit(RAMROD_TX, &ramrod_flags);
6269
6270         return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags,
6271                                    rx_accept_flags, tx_accept_flags,
6272                                    ramrod_flags);
6273 }
6274
6275 static void bnx2x_init_internal_common(struct bnx2x *bp)
6276 {
6277         int i;
6278
6279         /* Zero this manually as its initialization is
6280            currently missing in the initTool */
6281         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
6282                 REG_WR(bp, BAR_USTRORM_INTMEM +
6283                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
6284         if (!CHIP_IS_E1x(bp)) {
6285                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
6286                         CHIP_INT_MODE_IS_BC(bp) ?
6287                         HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
6288         }
6289 }
6290
6291 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
6292 {
6293         switch (load_code) {
6294         case FW_MSG_CODE_DRV_LOAD_COMMON:
6295         case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
6296                 bnx2x_init_internal_common(bp);
6297                 /* no break */
6298
6299         case FW_MSG_CODE_DRV_LOAD_PORT:
6300                 /* nothing to do */
6301                 /* no break */
6302
6303         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6304                 /* internal memory per function is
6305                    initialized inside bnx2x_pf_init */
6306                 break;
6307
6308         default:
6309                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6310                 break;
6311         }
6312 }
6313
6314 static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
6315 {
6316         return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp);
6317 }
6318
6319 static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
6320 {
6321         return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp);
6322 }
6323
6324 static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
6325 {
6326         if (CHIP_IS_E1x(fp->bp))
6327                 return BP_L_ID(fp->bp) + fp->index;
6328         else    /* We want Client ID to be the same as IGU SB ID for 57712 */
6329                 return bnx2x_fp_igu_sb_id(fp);
6330 }
6331
6332 static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
6333 {
6334         struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
6335         u8 cos;
6336         unsigned long q_type = 0;
6337         u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
6338         fp->rx_queue = fp_idx;
6339         fp->cid = fp_idx;
6340         fp->cl_id = bnx2x_fp_cl_id(fp);
6341         fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
6342         fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp);
6343         /* qZone id equals to FW (per path) client id */
6344         fp->cl_qzone_id  = bnx2x_fp_qzone_id(fp);
6345
6346         /* init shortcut */
6347         fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
6348
6349         /* Setup SB indices */
6350         fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
6351
6352         /* Configure Queue State object */
6353         __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
6354         __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
6355
6356         BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS);
6357
6358         /* init tx data */
6359         for_each_cos_in_tx_queue(fp, cos) {
6360                 bnx2x_init_txdata(bp, fp->txdata_ptr[cos],
6361                                   CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp),
6362                                   FP_COS_TO_TXQ(fp, cos, bp),
6363                                   BNX2X_TX_SB_INDEX_BASE + cos, fp);
6364                 cids[cos] = fp->txdata_ptr[cos]->cid;
6365         }
6366
6367         /* nothing more for vf to do here */
6368         if (IS_VF(bp))
6369                 return;
6370
6371         bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
6372                       fp->fw_sb_id, fp->igu_sb_id);
6373         bnx2x_update_fpsb_idx(fp);
6374         bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
6375                              fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
6376                              bnx2x_sp_mapping(bp, q_rdata), q_type);
6377
6378         /**
6379          * Configure classification DBs: Always enable Tx switching
6380          */
6381         bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
6382
6383         DP(NETIF_MSG_IFUP,
6384            "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  fw_sb %d  igu_sb %d\n",
6385            fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
6386            fp->igu_sb_id);
6387 }
6388
6389 static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
6390 {
6391         int i;
6392
6393         for (i = 1; i <= NUM_TX_RINGS; i++) {
6394                 struct eth_tx_next_bd *tx_next_bd =
6395                         &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
6396
6397                 tx_next_bd->addr_hi =
6398                         cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
6399                                     BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
6400                 tx_next_bd->addr_lo =
6401                         cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
6402                                     BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
6403         }
6404
6405         *txdata->tx_cons_sb = cpu_to_le16(0);
6406
6407         SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
6408         txdata->tx_db.data.zero_fill1 = 0;
6409         txdata->tx_db.data.prod = 0;
6410
6411         txdata->tx_pkt_prod = 0;
6412         txdata->tx_pkt_cons = 0;
6413         txdata->tx_bd_prod = 0;
6414         txdata->tx_bd_cons = 0;
6415         txdata->tx_pkt = 0;
6416 }
6417
6418 static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp)
6419 {
6420         int i;
6421
6422         for_each_tx_queue_cnic(bp, i)
6423                 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]);
6424 }
6425
6426 static void bnx2x_init_tx_rings(struct bnx2x *bp)
6427 {
6428         int i;
6429         u8 cos;
6430
6431         for_each_eth_queue(bp, i)
6432                 for_each_cos_in_tx_queue(&bp->fp[i], cos)
6433                         bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
6434 }
6435
6436 static void bnx2x_init_fcoe_fp(struct bnx2x *bp)
6437 {
6438         struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
6439         unsigned long q_type = 0;
6440
6441         bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
6442         bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
6443                                                      BNX2X_FCOE_ETH_CL_ID_IDX);
6444         bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
6445         bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
6446         bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
6447         bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
6448         bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
6449                           fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
6450                           fp);
6451
6452         DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
6453
6454         /* qZone id equals to FW (per path) client id */
6455         bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
6456         /* init shortcut */
6457         bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
6458                 bnx2x_rx_ustorm_prods_offset(fp);
6459
6460         /* Configure Queue State object */
6461         __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
6462         __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
6463
6464         /* No multi-CoS for FCoE L2 client */
6465         BUG_ON(fp->max_cos != 1);
6466
6467         bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
6468                              &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
6469                              bnx2x_sp_mapping(bp, q_rdata), q_type);
6470
6471         DP(NETIF_MSG_IFUP,
6472            "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
6473            fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
6474            fp->igu_sb_id);
6475 }
6476
6477 void bnx2x_nic_init_cnic(struct bnx2x *bp)
6478 {
6479         if (!NO_FCOE(bp))
6480                 bnx2x_init_fcoe_fp(bp);
6481
6482         bnx2x_init_sb(bp, bp->cnic_sb_mapping,
6483                       BNX2X_VF_ID_INVALID, false,
6484                       bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
6485
6486         /* ensure status block indices were read */
6487         rmb();
6488         bnx2x_init_rx_rings_cnic(bp);
6489         bnx2x_init_tx_rings_cnic(bp);
6490
6491         /* flush all */
6492         mb();
6493         mmiowb();
6494 }
6495
6496 void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
6497 {
6498         int i;
6499
6500         /* Setup NIC internals and enable interrupts */
6501         for_each_eth_queue(bp, i)
6502                 bnx2x_init_eth_fp(bp, i);
6503
6504         /* ensure status block indices were read */
6505         rmb();
6506         bnx2x_init_rx_rings(bp);
6507         bnx2x_init_tx_rings(bp);
6508
6509         if (IS_PF(bp)) {
6510                 /* Initialize MOD_ABS interrupts */
6511                 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
6512                                        bp->common.shmem_base,
6513                                        bp->common.shmem2_base, BP_PORT(bp));
6514
6515                 /* initialize the default status block and sp ring */
6516                 bnx2x_init_def_sb(bp);
6517                 bnx2x_update_dsb_idx(bp);
6518                 bnx2x_init_sp_ring(bp);
6519         } else {
6520                 bnx2x_memset_stats(bp);
6521         }
6522 }
6523
6524 void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code)
6525 {
6526         bnx2x_init_eq_ring(bp);
6527         bnx2x_init_internal(bp, load_code);
6528         bnx2x_pf_init(bp);
6529         bnx2x_stats_init(bp);
6530
6531         /* flush all before enabling interrupts */
6532         mb();
6533         mmiowb();
6534
6535         bnx2x_int_enable(bp);
6536
6537         /* Check for SPIO5 */
6538         bnx2x_attn_int_deasserted0(bp,
6539                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6540                                    AEU_INPUTS_ATTN_BITS_SPIO5);
6541 }
6542
6543 /* gzip service functions */
6544 static int bnx2x_gunzip_init(struct bnx2x *bp)
6545 {
6546         bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6547                                             &bp->gunzip_mapping, GFP_KERNEL);
6548         if (bp->gunzip_buf  == NULL)
6549                 goto gunzip_nomem1;
6550
6551         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6552         if (bp->strm  == NULL)
6553                 goto gunzip_nomem2;
6554
6555         bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
6556         if (bp->strm->workspace == NULL)
6557                 goto gunzip_nomem3;
6558
6559         return 0;
6560
6561 gunzip_nomem3:
6562         kfree(bp->strm);
6563         bp->strm = NULL;
6564
6565 gunzip_nomem2:
6566         dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6567                           bp->gunzip_mapping);
6568         bp->gunzip_buf = NULL;
6569
6570 gunzip_nomem1:
6571         BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n");
6572         return -ENOMEM;
6573 }
6574
6575 static void bnx2x_gunzip_end(struct bnx2x *bp)
6576 {
6577         if (bp->strm) {
6578                 vfree(bp->strm->workspace);
6579                 kfree(bp->strm);
6580                 bp->strm = NULL;
6581         }
6582
6583         if (bp->gunzip_buf) {
6584                 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6585                                   bp->gunzip_mapping);
6586                 bp->gunzip_buf = NULL;
6587         }
6588 }
6589
6590 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
6591 {
6592         int n, rc;
6593
6594         /* check gzip header */
6595         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6596                 BNX2X_ERR("Bad gzip header\n");
6597                 return -EINVAL;
6598         }
6599
6600         n = 10;
6601
6602 #define FNAME                           0x8
6603
6604         if (zbuf[3] & FNAME)
6605                 while ((zbuf[n++] != 0) && (n < len));
6606
6607         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
6608         bp->strm->avail_in = len - n;
6609         bp->strm->next_out = bp->gunzip_buf;
6610         bp->strm->avail_out = FW_BUF_SIZE;
6611
6612         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6613         if (rc != Z_OK)
6614                 return rc;
6615
6616         rc = zlib_inflate(bp->strm, Z_FINISH);
6617         if ((rc != Z_OK) && (rc != Z_STREAM_END))
6618                 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6619                            bp->strm->msg);
6620
6621         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6622         if (bp->gunzip_outlen & 0x3)
6623                 netdev_err(bp->dev,
6624                            "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
6625                                 bp->gunzip_outlen);
6626         bp->gunzip_outlen >>= 2;
6627
6628         zlib_inflateEnd(bp->strm);
6629
6630         if (rc == Z_STREAM_END)
6631                 return 0;
6632
6633         return rc;
6634 }
6635
6636 /* nic load/unload */
6637
6638 /*
6639  * General service functions
6640  */
6641
6642 /* send a NIG loopback debug packet */
6643 static void bnx2x_lb_pckt(struct bnx2x *bp)
6644 {
6645         u32 wb_write[3];
6646
6647         /* Ethernet source and destination addresses */
6648         wb_write[0] = 0x55555555;
6649         wb_write[1] = 0x55555555;
6650         wb_write[2] = 0x20;             /* SOP */
6651         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6652
6653         /* NON-IP protocol */
6654         wb_write[0] = 0x09000000;
6655         wb_write[1] = 0x55555555;
6656         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
6657         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6658 }
6659
6660 /* some of the internal memories
6661  * are not directly readable from the driver
6662  * to test them we send debug packets
6663  */
6664 static int bnx2x_int_mem_test(struct bnx2x *bp)
6665 {
6666         int factor;
6667         int count, i;
6668         u32 val = 0;
6669
6670         if (CHIP_REV_IS_FPGA(bp))
6671                 factor = 120;
6672         else if (CHIP_REV_IS_EMUL(bp))
6673                 factor = 200;
6674         else
6675                 factor = 1;
6676
6677         /* Disable inputs of parser neighbor blocks */
6678         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6679         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6680         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6681         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6682
6683         /*  Write 0 to parser credits for CFC search request */
6684         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6685
6686         /* send Ethernet packet */
6687         bnx2x_lb_pckt(bp);
6688
6689         /* TODO do i reset NIG statistic? */
6690         /* Wait until NIG register shows 1 packet of size 0x10 */
6691         count = 1000 * factor;
6692         while (count) {
6693
6694                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6695                 val = *bnx2x_sp(bp, wb_data[0]);
6696                 if (val == 0x10)
6697                         break;
6698
6699                 usleep_range(10000, 20000);
6700                 count--;
6701         }
6702         if (val != 0x10) {
6703                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
6704                 return -1;
6705         }
6706
6707         /* Wait until PRS register shows 1 packet */
6708         count = 1000 * factor;
6709         while (count) {
6710                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6711                 if (val == 1)
6712                         break;
6713
6714                 usleep_range(10000, 20000);
6715                 count--;
6716         }
6717         if (val != 0x1) {
6718                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6719                 return -2;
6720         }
6721
6722         /* Reset and init BRB, PRS */
6723         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6724         msleep(50);
6725         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6726         msleep(50);
6727         bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6728         bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6729
6730         DP(NETIF_MSG_HW, "part2\n");
6731
6732         /* Disable inputs of parser neighbor blocks */
6733         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6734         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6735         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6736         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6737
6738         /* Write 0 to parser credits for CFC search request */
6739         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6740
6741         /* send 10 Ethernet packets */
6742         for (i = 0; i < 10; i++)
6743                 bnx2x_lb_pckt(bp);
6744
6745         /* Wait until NIG register shows 10 + 1
6746            packets of size 11*0x10 = 0xb0 */
6747         count = 1000 * factor;
6748         while (count) {
6749
6750                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6751                 val = *bnx2x_sp(bp, wb_data[0]);
6752                 if (val == 0xb0)
6753                         break;
6754
6755                 usleep_range(10000, 20000);
6756                 count--;
6757         }
6758         if (val != 0xb0) {
6759                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
6760                 return -3;
6761         }
6762
6763         /* Wait until PRS register shows 2 packets */
6764         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6765         if (val != 2)
6766                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
6767
6768         /* Write 1 to parser credits for CFC search request */
6769         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6770
6771         /* Wait until PRS register shows 3 packets */
6772         msleep(10 * factor);
6773         /* Wait until NIG register shows 1 packet of size 0x10 */
6774         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6775         if (val != 3)
6776                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
6777
6778         /* clear NIG EOP FIFO */
6779         for (i = 0; i < 11; i++)
6780                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6781         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6782         if (val != 1) {
6783                 BNX2X_ERR("clear of NIG failed\n");
6784                 return -4;
6785         }
6786
6787         /* Reset and init BRB, PRS, NIG */
6788         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6789         msleep(50);
6790         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6791         msleep(50);
6792         bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6793         bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6794         if (!CNIC_SUPPORT(bp))
6795                 /* set NIC mode */
6796                 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6797
6798         /* Enable inputs of parser neighbor blocks */
6799         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6800         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6801         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6802         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
6803
6804         DP(NETIF_MSG_HW, "done\n");
6805
6806         return 0; /* OK */
6807 }
6808
6809 static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
6810 {
6811         u32 val;
6812
6813         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6814         if (!CHIP_IS_E1x(bp))
6815                 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
6816         else
6817                 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6818         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6819         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6820         /*
6821          * mask read length error interrupts in brb for parser
6822          * (parsing unit and 'checksum and crc' unit)
6823          * these errors are legal (PU reads fixed length and CAC can cause
6824          * read length error on truncated packets)
6825          */
6826         REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
6827         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6828         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6829         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6830         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6831         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6832 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6833 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
6834         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6835         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6836         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6837 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6838 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
6839         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6840         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6841         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6842         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6843 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6844 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6845
6846         val = PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT  |
6847                 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
6848                 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN;
6849         if (!CHIP_IS_E1x(bp))
6850                 val |= PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
6851                         PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED;
6852         REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, val);
6853
6854         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6855         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6856         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6857 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6858
6859         if (!CHIP_IS_E1x(bp))
6860                 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
6861                 REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
6862
6863         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6864         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6865 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6866         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18);         /* bit 3,4 masked */
6867 }
6868
6869 static void bnx2x_reset_common(struct bnx2x *bp)
6870 {
6871         u32 val = 0x1400;
6872
6873         /* reset_common */
6874         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6875                0xd3ffff7f);
6876
6877         if (CHIP_IS_E3(bp)) {
6878                 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
6879                 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
6880         }
6881
6882         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val);
6883 }
6884
6885 static void bnx2x_setup_dmae(struct bnx2x *bp)
6886 {
6887         bp->dmae_ready = 0;
6888         spin_lock_init(&bp->dmae_lock);
6889 }
6890
6891 static void bnx2x_init_pxp(struct bnx2x *bp)
6892 {
6893         u16 devctl;
6894         int r_order, w_order;
6895
6896         pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl);
6897         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6898         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6899         if (bp->mrrs == -1)
6900                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6901         else {
6902                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6903                 r_order = bp->mrrs;
6904         }
6905
6906         bnx2x_init_pxp_arb(bp, r_order, w_order);
6907 }
6908
6909 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6910 {
6911         int is_required;
6912         u32 val;
6913         int port;
6914
6915         if (BP_NOMCP(bp))
6916                 return;
6917
6918         is_required = 0;
6919         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6920               SHARED_HW_CFG_FAN_FAILURE_MASK;
6921
6922         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6923                 is_required = 1;
6924
6925         /*
6926          * The fan failure mechanism is usually related to the PHY type since
6927          * the power consumption of the board is affected by the PHY. Currently,
6928          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6929          */
6930         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6931                 for (port = PORT_0; port < PORT_MAX; port++) {
6932                         is_required |=
6933                                 bnx2x_fan_failure_det_req(
6934                                         bp,
6935                                         bp->common.shmem_base,
6936                                         bp->common.shmem2_base,
6937                                         port);
6938                 }
6939
6940         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6941
6942         if (is_required == 0)
6943                 return;
6944
6945         /* Fan failure is indicated by SPIO 5 */
6946         bnx2x_set_spio(bp, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
6947
6948         /* set to active low mode */
6949         val = REG_RD(bp, MISC_REG_SPIO_INT);
6950         val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
6951         REG_WR(bp, MISC_REG_SPIO_INT, val);
6952
6953         /* enable interrupt to signal the IGU */
6954         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6955         val |= MISC_SPIO_SPIO5;
6956         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6957 }
6958
6959 void bnx2x_pf_disable(struct bnx2x *bp)
6960 {
6961         u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
6962         val &= ~IGU_PF_CONF_FUNC_EN;
6963
6964         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
6965         REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
6966         REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
6967 }
6968
6969 static void bnx2x__common_init_phy(struct bnx2x *bp)
6970 {
6971         u32 shmem_base[2], shmem2_base[2];
6972         /* Avoid common init in case MFW supports LFA */
6973         if (SHMEM2_RD(bp, size) >
6974             (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
6975                 return;
6976         shmem_base[0] =  bp->common.shmem_base;
6977         shmem2_base[0] = bp->common.shmem2_base;
6978         if (!CHIP_IS_E1x(bp)) {
6979                 shmem_base[1] =
6980                         SHMEM2_RD(bp, other_shmem_base_addr);
6981                 shmem2_base[1] =
6982                         SHMEM2_RD(bp, other_shmem2_base_addr);
6983         }
6984         bnx2x_acquire_phy_lock(bp);
6985         bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
6986                               bp->common.chip_id);
6987         bnx2x_release_phy_lock(bp);
6988 }
6989
6990 static void bnx2x_config_endianity(struct bnx2x *bp, u32 val)
6991 {
6992         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, val);
6993         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, val);
6994         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, val);
6995         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, val);
6996         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, val);
6997
6998         /* make sure this value is 0 */
6999         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
7000
7001         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, val);
7002         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, val);
7003         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, val);
7004         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, val);
7005 }
7006
7007 static void bnx2x_set_endianity(struct bnx2x *bp)
7008 {
7009 #ifdef __BIG_ENDIAN
7010         bnx2x_config_endianity(bp, 1);
7011 #else
7012         bnx2x_config_endianity(bp, 0);
7013 #endif
7014 }
7015
7016 static void bnx2x_reset_endianity(struct bnx2x *bp)
7017 {
7018         bnx2x_config_endianity(bp, 0);
7019 }
7020
7021 /**
7022  * bnx2x_init_hw_common - initialize the HW at the COMMON phase.
7023  *
7024  * @bp:         driver handle
7025  */
7026 static int bnx2x_init_hw_common(struct bnx2x *bp)
7027 {
7028         u32 val;
7029
7030         DP(NETIF_MSG_HW, "starting common init  func %d\n", BP_ABS_FUNC(bp));
7031
7032         /*
7033          * take the RESET lock to protect undi_unload flow from accessing
7034          * registers while we're resetting the chip
7035          */
7036         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
7037
7038         bnx2x_reset_common(bp);
7039         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
7040
7041         val = 0xfffc;
7042         if (CHIP_IS_E3(bp)) {
7043                 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
7044                 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
7045         }
7046         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
7047
7048         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
7049
7050         bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
7051
7052         if (!CHIP_IS_E1x(bp)) {
7053                 u8 abs_func_id;
7054
7055                 /**
7056                  * 4-port mode or 2-port mode we need to turn of master-enable
7057                  * for everyone, after that, turn it back on for self.
7058                  * so, we disregard multi-function or not, and always disable
7059                  * for all functions on the given path, this means 0,2,4,6 for
7060                  * path 0 and 1,3,5,7 for path 1
7061                  */
7062                 for (abs_func_id = BP_PATH(bp);
7063                      abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) {
7064                         if (abs_func_id == BP_ABS_FUNC(bp)) {
7065                                 REG_WR(bp,
7066                                     PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
7067                                     1);
7068                                 continue;
7069                         }
7070
7071                         bnx2x_pretend_func(bp, abs_func_id);
7072                         /* clear pf enable */
7073                         bnx2x_pf_disable(bp);
7074                         bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
7075                 }
7076         }
7077
7078         bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON);
7079         if (CHIP_IS_E1(bp)) {
7080                 /* enable HW interrupt from PXP on USDM overflow
7081                    bit 16 on INT_MASK_0 */
7082                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
7083         }
7084
7085         bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
7086         bnx2x_init_pxp(bp);
7087         bnx2x_set_endianity(bp);
7088         bnx2x_ilt_init_page_size(bp, INITOP_SET);
7089
7090         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
7091                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
7092
7093         /* let the HW do it's magic ... */
7094         msleep(100);
7095         /* finish PXP init */
7096         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
7097         if (val != 1) {
7098                 BNX2X_ERR("PXP2 CFG failed\n");
7099                 return -EBUSY;
7100         }
7101         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
7102         if (val != 1) {
7103                 BNX2X_ERR("PXP2 RD_INIT failed\n");
7104                 return -EBUSY;
7105         }
7106
7107         /* Timers bug workaround E2 only. We need to set the entire ILT to
7108          * have entries with value "0" and valid bit on.
7109          * This needs to be done by the first PF that is loaded in a path
7110          * (i.e. common phase)
7111          */
7112         if (!CHIP_IS_E1x(bp)) {
7113 /* In E2 there is a bug in the timers block that can cause function 6 / 7
7114  * (i.e. vnic3) to start even if it is marked as "scan-off".
7115  * This occurs when a different function (func2,3) is being marked
7116  * as "scan-off". Real-life scenario for example: if a driver is being
7117  * load-unloaded while func6,7 are down. This will cause the timer to access
7118  * the ilt, translate to a logical address and send a request to read/write.
7119  * Since the ilt for the function that is down is not valid, this will cause
7120  * a translation error which is unrecoverable.
7121  * The Workaround is intended to make sure that when this happens nothing fatal
7122  * will occur. The workaround:
7123  *      1.  First PF driver which loads on a path will:
7124  *              a.  After taking the chip out of reset, by using pretend,
7125  *                  it will write "0" to the following registers of
7126  *                  the other vnics.
7127  *                  REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
7128  *                  REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
7129  *                  REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
7130  *                  And for itself it will write '1' to
7131  *                  PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
7132  *                  dmae-operations (writing to pram for example.)
7133  *                  note: can be done for only function 6,7 but cleaner this
7134  *                        way.
7135  *              b.  Write zero+valid to the entire ILT.
7136  *              c.  Init the first_timers_ilt_entry, last_timers_ilt_entry of
7137  *                  VNIC3 (of that port). The range allocated will be the
7138  *                  entire ILT. This is needed to prevent  ILT range error.
7139  *      2.  Any PF driver load flow:
7140  *              a.  ILT update with the physical addresses of the allocated
7141  *                  logical pages.
7142  *              b.  Wait 20msec. - note that this timeout is needed to make
7143  *                  sure there are no requests in one of the PXP internal
7144  *                  queues with "old" ILT addresses.
7145  *              c.  PF enable in the PGLC.
7146  *              d.  Clear the was_error of the PF in the PGLC. (could have
7147  *                  occurred while driver was down)
7148  *              e.  PF enable in the CFC (WEAK + STRONG)
7149  *              f.  Timers scan enable
7150  *      3.  PF driver unload flow:
7151  *              a.  Clear the Timers scan_en.
7152  *              b.  Polling for scan_on=0 for that PF.
7153  *              c.  Clear the PF enable bit in the PXP.
7154  *              d.  Clear the PF enable in the CFC (WEAK + STRONG)
7155  *              e.  Write zero+valid to all ILT entries (The valid bit must
7156  *                  stay set)
7157  *              f.  If this is VNIC 3 of a port then also init
7158  *                  first_timers_ilt_entry to zero and last_timers_ilt_entry
7159  *                  to the last entry in the ILT.
7160  *
7161  *      Notes:
7162  *      Currently the PF error in the PGLC is non recoverable.
7163  *      In the future the there will be a recovery routine for this error.
7164  *      Currently attention is masked.
7165  *      Having an MCP lock on the load/unload process does not guarantee that
7166  *      there is no Timer disable during Func6/7 enable. This is because the
7167  *      Timers scan is currently being cleared by the MCP on FLR.
7168  *      Step 2.d can be done only for PF6/7 and the driver can also check if
7169  *      there is error before clearing it. But the flow above is simpler and
7170  *      more general.
7171  *      All ILT entries are written by zero+valid and not just PF6/7
7172  *      ILT entries since in the future the ILT entries allocation for
7173  *      PF-s might be dynamic.
7174  */
7175                 struct ilt_client_info ilt_cli;
7176                 struct bnx2x_ilt ilt;
7177                 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
7178                 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
7179
7180                 /* initialize dummy TM client */
7181                 ilt_cli.start = 0;
7182                 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
7183                 ilt_cli.client_num = ILT_CLIENT_TM;
7184
7185                 /* Step 1: set zeroes to all ilt page entries with valid bit on
7186                  * Step 2: set the timers first/last ilt entry to point
7187                  * to the entire range to prevent ILT range error for 3rd/4th
7188                  * vnic (this code assumes existence of the vnic)
7189                  *
7190                  * both steps performed by call to bnx2x_ilt_client_init_op()
7191                  * with dummy TM client
7192                  *
7193                  * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
7194                  * and his brother are split registers
7195                  */
7196                 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
7197                 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
7198                 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
7199
7200                 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
7201                 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
7202                 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
7203         }
7204
7205         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
7206         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
7207
7208         if (!CHIP_IS_E1x(bp)) {
7209                 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
7210                                 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
7211                 bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON);
7212
7213                 bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON);
7214
7215                 /* let the HW do it's magic ... */
7216                 do {
7217                         msleep(200);
7218                         val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
7219                 } while (factor-- && (val != 1));
7220
7221                 if (val != 1) {
7222                         BNX2X_ERR("ATC_INIT failed\n");
7223                         return -EBUSY;
7224                 }
7225         }
7226
7227         bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
7228
7229         bnx2x_iov_init_dmae(bp);
7230
7231         /* clean the DMAE memory */
7232         bp->dmae_ready = 1;
7233         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
7234
7235         bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON);
7236
7237         bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON);
7238
7239         bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON);
7240
7241         bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON);
7242
7243         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
7244         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
7245         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
7246         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
7247
7248         bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON);
7249
7250         /* QM queues pointers table */
7251         bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
7252
7253         /* soft reset pulse */
7254         REG_WR(bp, QM_REG_SOFT_RESET, 1);
7255         REG_WR(bp, QM_REG_SOFT_RESET, 0);
7256
7257         if (CNIC_SUPPORT(bp))
7258                 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
7259
7260         bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
7261
7262         if (!CHIP_REV_IS_SLOW(bp))
7263                 /* enable hw interrupt from doorbell Q */
7264                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
7265
7266         bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
7267
7268         bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
7269         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
7270
7271         if (!CHIP_IS_E1(bp))
7272                 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
7273
7274         if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) {
7275                 if (IS_MF_AFEX(bp)) {
7276                         /* configure that VNTag and VLAN headers must be
7277                          * received in afex mode
7278                          */
7279                         REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE);
7280                         REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA);
7281                         REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
7282                         REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
7283                         REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4);
7284                 } else {
7285                         /* Bit-map indicating which L2 hdrs may appear
7286                          * after the basic Ethernet header
7287                          */
7288                         REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
7289                                bp->path_has_ovlan ? 7 : 6);
7290                 }
7291         }
7292
7293         bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
7294         bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
7295         bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON);
7296         bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON);
7297
7298         if (!CHIP_IS_E1x(bp)) {
7299                 /* reset VFC memories */
7300                 REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
7301                            VFC_MEMORIES_RST_REG_CAM_RST |
7302                            VFC_MEMORIES_RST_REG_RAM_RST);
7303                 REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
7304                            VFC_MEMORIES_RST_REG_CAM_RST |
7305                            VFC_MEMORIES_RST_REG_RAM_RST);
7306
7307                 msleep(20);
7308         }
7309
7310         bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON);
7311         bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON);
7312         bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON);
7313         bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON);
7314
7315         /* sync semi rtc */
7316         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7317                0x80000000);
7318         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7319                0x80000000);
7320
7321         bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON);
7322         bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
7323         bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
7324
7325         if (!CHIP_IS_E1x(bp)) {
7326                 if (IS_MF_AFEX(bp)) {
7327                         /* configure that VNTag and VLAN headers must be
7328                          * sent in afex mode
7329                          */
7330                         REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE);
7331                         REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA);
7332                         REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
7333                         REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
7334                         REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4);
7335                 } else {
7336                         REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
7337                                bp->path_has_ovlan ? 7 : 6);
7338                 }
7339         }
7340
7341         REG_WR(bp, SRC_REG_SOFT_RST, 1);
7342
7343         bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
7344
7345         if (CNIC_SUPPORT(bp)) {
7346                 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
7347                 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
7348                 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
7349                 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
7350                 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
7351                 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
7352                 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
7353                 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
7354                 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
7355                 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
7356         }
7357         REG_WR(bp, SRC_REG_SOFT_RST, 0);
7358
7359         if (sizeof(union cdu_context) != 1024)
7360                 /* we currently assume that a context is 1024 bytes */
7361                 dev_alert(&bp->pdev->dev,
7362                           "please adjust the size of cdu_context(%ld)\n",
7363                           (long)sizeof(union cdu_context));
7364
7365         bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON);
7366         val = (4 << 24) + (0 << 12) + 1024;
7367         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
7368
7369         bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON);
7370         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
7371         /* enable context validation interrupt from CFC */
7372         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
7373
7374         /* set the thresholds to prevent CFC/CDU race */
7375         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
7376
7377         bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON);
7378
7379         if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp))
7380                 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
7381
7382         bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON);
7383         bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON);
7384
7385         /* Reset PCIE errors for debug */
7386         REG_WR(bp, 0x2814, 0xffffffff);
7387         REG_WR(bp, 0x3820, 0xffffffff);
7388
7389         if (!CHIP_IS_E1x(bp)) {
7390                 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
7391                            (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
7392                                 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
7393                 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
7394                            (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
7395                                 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
7396                                 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
7397                 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
7398                            (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
7399                                 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
7400                                 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
7401         }
7402
7403         bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON);
7404         if (!CHIP_IS_E1(bp)) {
7405                 /* in E3 this done in per-port section */
7406                 if (!CHIP_IS_E3(bp))
7407                         REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
7408         }
7409         if (CHIP_IS_E1H(bp))
7410                 /* not applicable for E2 (and above ...) */
7411                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
7412
7413         if (CHIP_REV_IS_SLOW(bp))
7414                 msleep(200);
7415
7416         /* finish CFC init */
7417         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
7418         if (val != 1) {
7419                 BNX2X_ERR("CFC LL_INIT failed\n");
7420                 return -EBUSY;
7421         }
7422         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
7423         if (val != 1) {
7424                 BNX2X_ERR("CFC AC_INIT failed\n");
7425                 return -EBUSY;
7426         }
7427         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
7428         if (val != 1) {
7429                 BNX2X_ERR("CFC CAM_INIT failed\n");
7430                 return -EBUSY;
7431         }
7432         REG_WR(bp, CFC_REG_DEBUG0, 0);
7433
7434         if (CHIP_IS_E1(bp)) {
7435                 /* read NIG statistic
7436                    to see if this is our first up since powerup */
7437                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
7438                 val = *bnx2x_sp(bp, wb_data[0]);
7439
7440                 /* do internal memory self test */
7441                 if ((val == 0) && bnx2x_int_mem_test(bp)) {
7442                         BNX2X_ERR("internal mem self test failed\n");
7443                         return -EBUSY;
7444                 }
7445         }
7446
7447         bnx2x_setup_fan_failure_detection(bp);
7448
7449         /* clear PXP2 attentions */
7450         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
7451
7452         bnx2x_enable_blocks_attention(bp);
7453         bnx2x_enable_blocks_parity(bp);
7454
7455         if (!BP_NOMCP(bp)) {
7456                 if (CHIP_IS_E1x(bp))
7457                         bnx2x__common_init_phy(bp);
7458         } else
7459                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
7460
7461         if (SHMEM2_HAS(bp, netproc_fw_ver))
7462                 SHMEM2_WR(bp, netproc_fw_ver, REG_RD(bp, XSEM_REG_PRAM));
7463
7464         return 0;
7465 }
7466
7467 /**
7468  * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase.
7469  *
7470  * @bp:         driver handle
7471  */
7472 static int bnx2x_init_hw_common_chip(struct bnx2x *bp)
7473 {
7474         int rc = bnx2x_init_hw_common(bp);
7475
7476         if (rc)
7477                 return rc;
7478
7479         /* In E2 2-PORT mode, same ext phy is used for the two paths */
7480         if (!BP_NOMCP(bp))
7481                 bnx2x__common_init_phy(bp);
7482
7483         return 0;
7484 }
7485
7486 static int bnx2x_init_hw_port(struct bnx2x *bp)
7487 {
7488         int port = BP_PORT(bp);
7489         int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
7490         u32 low, high;
7491         u32 val, reg;
7492
7493         DP(NETIF_MSG_HW, "starting port init  port %d\n", port);
7494
7495         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7496
7497         bnx2x_init_block(bp, BLOCK_MISC, init_phase);
7498         bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7499         bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7500
7501         /* Timers bug workaround: disables the pf_master bit in pglue at
7502          * common phase, we need to enable it here before any dmae access are
7503          * attempted. Therefore we manually added the enable-master to the
7504          * port phase (it also happens in the function phase)
7505          */
7506         if (!CHIP_IS_E1x(bp))
7507                 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
7508
7509         bnx2x_init_block(bp, BLOCK_ATC, init_phase);
7510         bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
7511         bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
7512         bnx2x_init_block(bp, BLOCK_QM, init_phase);
7513
7514         bnx2x_init_block(bp, BLOCK_TCM, init_phase);
7515         bnx2x_init_block(bp, BLOCK_UCM, init_phase);
7516         bnx2x_init_block(bp, BLOCK_CCM, init_phase);
7517         bnx2x_init_block(bp, BLOCK_XCM, init_phase);
7518
7519         /* QM cid (connection) count */
7520         bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
7521
7522         if (CNIC_SUPPORT(bp)) {
7523                 bnx2x_init_block(bp, BLOCK_TM, init_phase);
7524                 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
7525                 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
7526         }
7527
7528         bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
7529
7530         bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
7531
7532         if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
7533
7534                 if (IS_MF(bp))
7535                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
7536                 else if (bp->dev->mtu > 4096) {
7537                         if (bp->flags & ONE_PORT_FLAG)
7538                                 low = 160;
7539                         else {
7540                                 val = bp->dev->mtu;
7541                                 /* (24*1024 + val*4)/256 */
7542                                 low = 96 + (val/64) +
7543                                                 ((val % 64) ? 1 : 0);
7544                         }
7545                 } else
7546                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
7547                 high = low + 56;        /* 14*1024/256 */
7548                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
7549                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
7550         }
7551
7552         if (CHIP_MODE_IS_4_PORT(bp))
7553                 REG_WR(bp, (BP_PORT(bp) ?
7554                             BRB1_REG_MAC_GUARANTIED_1 :
7555                             BRB1_REG_MAC_GUARANTIED_0), 40);
7556
7557         bnx2x_init_block(bp, BLOCK_PRS, init_phase);
7558         if (CHIP_IS_E3B0(bp)) {
7559                 if (IS_MF_AFEX(bp)) {
7560                         /* configure headers for AFEX mode */
7561                         REG_WR(bp, BP_PORT(bp) ?
7562                                PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
7563                                PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
7564                         REG_WR(bp, BP_PORT(bp) ?
7565                                PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
7566                                PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
7567                         REG_WR(bp, BP_PORT(bp) ?
7568                                PRS_REG_MUST_HAVE_HDRS_PORT_1 :
7569                                PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
7570                 } else {
7571                         /* Ovlan exists only if we are in multi-function +
7572                          * switch-dependent mode, in switch-independent there
7573                          * is no ovlan headers
7574                          */
7575                         REG_WR(bp, BP_PORT(bp) ?
7576                                PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
7577                                PRS_REG_HDRS_AFTER_BASIC_PORT_0,
7578                                (bp->path_has_ovlan ? 7 : 6));
7579                 }
7580         }
7581
7582         bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
7583         bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
7584         bnx2x_init_block(bp, BLOCK_USDM, init_phase);
7585         bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
7586
7587         bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
7588         bnx2x_init_block(bp, BLOCK_USEM, init_phase);
7589         bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
7590         bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
7591
7592         bnx2x_init_block(bp, BLOCK_UPB, init_phase);
7593         bnx2x_init_block(bp, BLOCK_XPB, init_phase);
7594
7595         bnx2x_init_block(bp, BLOCK_PBF, init_phase);
7596
7597         if (CHIP_IS_E1x(bp)) {
7598                 /* configure PBF to work without PAUSE mtu 9000 */
7599                 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
7600
7601                 /* update threshold */
7602                 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
7603                 /* update init credit */
7604                 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
7605
7606                 /* probe changes */
7607                 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
7608                 udelay(50);
7609                 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
7610         }
7611
7612         if (CNIC_SUPPORT(bp))
7613                 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
7614
7615         bnx2x_init_block(bp, BLOCK_CDU, init_phase);
7616         bnx2x_init_block(bp, BLOCK_CFC, init_phase);
7617
7618         if (CHIP_IS_E1(bp)) {
7619                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7620                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7621         }
7622         bnx2x_init_block(bp, BLOCK_HC, init_phase);
7623
7624         bnx2x_init_block(bp, BLOCK_IGU, init_phase);
7625
7626         bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
7627         /* init aeu_mask_attn_func_0/1:
7628          *  - SF mode: bits 3-7 are masked. Only bits 0-2 are in use
7629          *  - MF mode: bit 3 is masked. Bits 0-2 are in use as in SF
7630          *             bits 4-7 are used for "per vn group attention" */
7631         val = IS_MF(bp) ? 0xF7 : 0x7;
7632         /* Enable DCBX attention for all but E1 */
7633         val |= CHIP_IS_E1(bp) ? 0 : 0x10;
7634         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
7635
7636         /* SCPAD_PARITY should NOT trigger close the gates */
7637         reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0;
7638         REG_WR(bp, reg,
7639                REG_RD(bp, reg) &
7640                ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
7641
7642         reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0;
7643         REG_WR(bp, reg,
7644                REG_RD(bp, reg) &
7645                ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
7646
7647         bnx2x_init_block(bp, BLOCK_NIG, init_phase);
7648
7649         if (!CHIP_IS_E1x(bp)) {
7650                 /* Bit-map indicating which L2 hdrs may appear after the
7651                  * basic Ethernet header
7652                  */
7653                 if (IS_MF_AFEX(bp))
7654                         REG_WR(bp, BP_PORT(bp) ?
7655                                NIG_REG_P1_HDRS_AFTER_BASIC :
7656                                NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
7657                 else
7658                         REG_WR(bp, BP_PORT(bp) ?
7659                                NIG_REG_P1_HDRS_AFTER_BASIC :
7660                                NIG_REG_P0_HDRS_AFTER_BASIC,
7661                                IS_MF_SD(bp) ? 7 : 6);
7662
7663                 if (CHIP_IS_E3(bp))
7664                         REG_WR(bp, BP_PORT(bp) ?
7665                                    NIG_REG_LLH1_MF_MODE :
7666                                    NIG_REG_LLH_MF_MODE, IS_MF(bp));
7667         }
7668         if (!CHIP_IS_E3(bp))
7669                 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
7670
7671         if (!CHIP_IS_E1(bp)) {
7672                 /* 0x2 disable mf_ov, 0x1 enable */
7673                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
7674                        (IS_MF_SD(bp) ? 0x1 : 0x2));
7675
7676                 if (!CHIP_IS_E1x(bp)) {
7677                         val = 0;
7678                         switch (bp->mf_mode) {
7679                         case MULTI_FUNCTION_SD:
7680                                 val = 1;
7681                                 break;
7682                         case MULTI_FUNCTION_SI:
7683                         case MULTI_FUNCTION_AFEX:
7684                                 val = 2;
7685                                 break;
7686                         }
7687
7688                         REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
7689                                                   NIG_REG_LLH0_CLS_TYPE), val);
7690                 }
7691                 {
7692                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
7693                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
7694                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
7695                 }
7696         }
7697
7698         /* If SPIO5 is set to generate interrupts, enable it for this port */
7699         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
7700         if (val & MISC_SPIO_SPIO5) {
7701                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
7702                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
7703                 val = REG_RD(bp, reg_addr);
7704                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
7705                 REG_WR(bp, reg_addr, val);
7706         }
7707
7708         return 0;
7709 }
7710
7711 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
7712 {
7713         int reg;
7714         u32 wb_write[2];
7715
7716         if (CHIP_IS_E1(bp))
7717                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
7718         else
7719                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
7720
7721         wb_write[0] = ONCHIP_ADDR1(addr);
7722         wb_write[1] = ONCHIP_ADDR2(addr);
7723         REG_WR_DMAE(bp, reg, wb_write, 2);
7724 }
7725
7726 void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf)
7727 {
7728         u32 data, ctl, cnt = 100;
7729         u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
7730         u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
7731         u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
7732         u32 sb_bit =  1 << (idu_sb_id%32);
7733         u32 func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
7734         u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
7735
7736         /* Not supported in BC mode */
7737         if (CHIP_INT_MODE_IS_BC(bp))
7738                 return;
7739
7740         data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
7741                         << IGU_REGULAR_CLEANUP_TYPE_SHIFT)      |
7742                 IGU_REGULAR_CLEANUP_SET                         |
7743                 IGU_REGULAR_BCLEANUP;
7744
7745         ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT         |
7746               func_encode << IGU_CTRL_REG_FID_SHIFT             |
7747               IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
7748
7749         DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7750                          data, igu_addr_data);
7751         REG_WR(bp, igu_addr_data, data);
7752         mmiowb();
7753         barrier();
7754         DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7755                           ctl, igu_addr_ctl);
7756         REG_WR(bp, igu_addr_ctl, ctl);
7757         mmiowb();
7758         barrier();
7759
7760         /* wait for clean up to finish */
7761         while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
7762                 msleep(20);
7763
7764         if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
7765                 DP(NETIF_MSG_HW,
7766                    "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
7767                           idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
7768         }
7769 }
7770
7771 static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
7772 {
7773         bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/);
7774 }
7775
7776 static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
7777 {
7778         u32 i, base = FUNC_ILT_BASE(func);
7779         for (i = base; i < base + ILT_PER_FUNC; i++)
7780                 bnx2x_ilt_wr(bp, i, 0);
7781 }
7782
7783 static void bnx2x_init_searcher(struct bnx2x *bp)
7784 {
7785         int port = BP_PORT(bp);
7786         bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
7787         /* T1 hash bits value determines the T1 number of entries */
7788         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
7789 }
7790
7791 static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend)
7792 {
7793         int rc;
7794         struct bnx2x_func_state_params func_params = {NULL};
7795         struct bnx2x_func_switch_update_params *switch_update_params =
7796                 &func_params.params.switch_update;
7797
7798         /* Prepare parameters for function state transitions */
7799         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
7800         __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
7801
7802         func_params.f_obj = &bp->func_obj;
7803         func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
7804
7805         /* Function parameters */
7806         __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
7807                   &switch_update_params->changes);
7808         if (suspend)
7809                 __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
7810                           &switch_update_params->changes);
7811
7812         rc = bnx2x_func_state_change(bp, &func_params);
7813
7814         return rc;
7815 }
7816
7817 static int bnx2x_reset_nic_mode(struct bnx2x *bp)
7818 {
7819         int rc, i, port = BP_PORT(bp);
7820         int vlan_en = 0, mac_en[NUM_MACS];
7821
7822         /* Close input from network */
7823         if (bp->mf_mode == SINGLE_FUNCTION) {
7824                 bnx2x_set_rx_filter(&bp->link_params, 0);
7825         } else {
7826                 vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN :
7827                                    NIG_REG_LLH0_FUNC_EN);
7828                 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7829                           NIG_REG_LLH0_FUNC_EN, 0);
7830                 for (i = 0; i < NUM_MACS; i++) {
7831                         mac_en[i] = REG_RD(bp, port ?
7832                                              (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7833                                               4 * i) :
7834                                              (NIG_REG_LLH0_FUNC_MEM_ENABLE +
7835                                               4 * i));
7836                         REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7837                                               4 * i) :
7838                                   (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 0);
7839                 }
7840         }
7841
7842         /* Close BMC to host */
7843         REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7844                NIG_REG_P1_TX_MNG_HOST_ENABLE, 0);
7845
7846         /* Suspend Tx switching to the PF. Completion of this ramrod
7847          * further guarantees that all the packets of that PF / child
7848          * VFs in BRB were processed by the Parser, so it is safe to
7849          * change the NIC_MODE register.
7850          */
7851         rc = bnx2x_func_switch_update(bp, 1);
7852         if (rc) {
7853                 BNX2X_ERR("Can't suspend tx-switching!\n");
7854                 return rc;
7855         }
7856
7857         /* Change NIC_MODE register */
7858         REG_WR(bp, PRS_REG_NIC_MODE, 0);
7859
7860         /* Open input from network */
7861         if (bp->mf_mode == SINGLE_FUNCTION) {
7862                 bnx2x_set_rx_filter(&bp->link_params, 1);
7863         } else {
7864                 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7865                           NIG_REG_LLH0_FUNC_EN, vlan_en);
7866                 for (i = 0; i < NUM_MACS; i++) {
7867                         REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7868                                               4 * i) :
7869                                   (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i),
7870                                   mac_en[i]);
7871                 }
7872         }
7873
7874         /* Enable BMC to host */
7875         REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7876                NIG_REG_P1_TX_MNG_HOST_ENABLE, 1);
7877
7878         /* Resume Tx switching to the PF */
7879         rc = bnx2x_func_switch_update(bp, 0);
7880         if (rc) {
7881                 BNX2X_ERR("Can't resume tx-switching!\n");
7882                 return rc;
7883         }
7884
7885         DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7886         return 0;
7887 }
7888
7889 int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
7890 {
7891         int rc;
7892
7893         bnx2x_ilt_init_op_cnic(bp, INITOP_SET);
7894
7895         if (CONFIGURE_NIC_MODE(bp)) {
7896                 /* Configure searcher as part of function hw init */
7897                 bnx2x_init_searcher(bp);
7898
7899                 /* Reset NIC mode */
7900                 rc = bnx2x_reset_nic_mode(bp);
7901                 if (rc)
7902                         BNX2X_ERR("Can't change NIC mode!\n");
7903                 return rc;
7904         }
7905
7906         return 0;
7907 }
7908
7909 /* previous driver DMAE transaction may have occurred when pre-boot stage ended
7910  * and boot began, or when kdump kernel was loaded. Either case would invalidate
7911  * the addresses of the transaction, resulting in was-error bit set in the pci
7912  * causing all hw-to-host pcie transactions to timeout. If this happened we want
7913  * to clear the interrupt which detected this from the pglueb and the was done
7914  * bit
7915  */
7916 static void bnx2x_clean_pglue_errors(struct bnx2x *bp)
7917 {
7918         if (!CHIP_IS_E1x(bp))
7919                 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
7920                        1 << BP_ABS_FUNC(bp));
7921 }
7922
7923 static int bnx2x_init_hw_func(struct bnx2x *bp)
7924 {
7925         int port = BP_PORT(bp);
7926         int func = BP_FUNC(bp);
7927         int init_phase = PHASE_PF0 + func;
7928         struct bnx2x_ilt *ilt = BP_ILT(bp);
7929         u16 cdu_ilt_start;
7930         u32 addr, val;
7931         u32 main_mem_base, main_mem_size, main_mem_prty_clr;
7932         int i, main_mem_width, rc;
7933
7934         DP(NETIF_MSG_HW, "starting func init  func %d\n", func);
7935
7936         /* FLR cleanup - hmmm */
7937         if (!CHIP_IS_E1x(bp)) {
7938                 rc = bnx2x_pf_flr_clnup(bp);
7939                 if (rc) {
7940                         bnx2x_fw_dump(bp);
7941                         return rc;
7942                 }
7943         }
7944
7945         /* set MSI reconfigure capability */
7946         if (bp->common.int_block == INT_BLOCK_HC) {
7947                 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
7948                 val = REG_RD(bp, addr);
7949                 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
7950                 REG_WR(bp, addr, val);
7951         }
7952
7953         bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7954         bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7955
7956         ilt = BP_ILT(bp);
7957         cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7958
7959         if (IS_SRIOV(bp))
7960                 cdu_ilt_start += BNX2X_FIRST_VF_CID/ILT_PAGE_CIDS;
7961         cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start);
7962
7963         /* since BNX2X_FIRST_VF_CID > 0 the PF L2 cids precedes
7964          * those of the VFs, so start line should be reset
7965          */
7966         cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7967         for (i = 0; i < L2_ILT_LINES(bp); i++) {
7968                 ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
7969                 ilt->lines[cdu_ilt_start + i].page_mapping =
7970                         bp->context[i].cxt_mapping;
7971                 ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
7972         }
7973
7974         bnx2x_ilt_init_op(bp, INITOP_SET);
7975
7976         if (!CONFIGURE_NIC_MODE(bp)) {
7977                 bnx2x_init_searcher(bp);
7978                 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7979                 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7980         } else {
7981                 /* Set NIC mode */
7982                 REG_WR(bp, PRS_REG_NIC_MODE, 1);
7983                 DP(NETIF_MSG_IFUP, "NIC MODE configured\n");
7984         }
7985
7986         if (!CHIP_IS_E1x(bp)) {
7987                 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
7988
7989                 /* Turn on a single ISR mode in IGU if driver is going to use
7990                  * INT#x or MSI
7991                  */
7992                 if (!(bp->flags & USING_MSIX_FLAG))
7993                         pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
7994                 /*
7995                  * Timers workaround bug: function init part.
7996                  * Need to wait 20msec after initializing ILT,
7997                  * needed to make sure there are no requests in
7998                  * one of the PXP internal queues with "old" ILT addresses
7999                  */
8000                 msleep(20);
8001                 /*
8002                  * Master enable - Due to WB DMAE writes performed before this
8003                  * register is re-initialized as part of the regular function
8004                  * init
8005                  */
8006                 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
8007                 /* Enable the function in IGU */
8008                 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
8009         }
8010
8011         bp->dmae_ready = 1;
8012
8013         bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
8014
8015         bnx2x_clean_pglue_errors(bp);
8016
8017         bnx2x_init_block(bp, BLOCK_ATC, init_phase);
8018         bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
8019         bnx2x_init_block(bp, BLOCK_NIG, init_phase);
8020         bnx2x_init_block(bp, BLOCK_SRC, init_phase);
8021         bnx2x_init_block(bp, BLOCK_MISC, init_phase);
8022         bnx2x_init_block(bp, BLOCK_TCM, init_phase);
8023         bnx2x_init_block(bp, BLOCK_UCM, init_phase);
8024         bnx2x_init_block(bp, BLOCK_CCM, init_phase);
8025         bnx2x_init_block(bp, BLOCK_XCM, init_phase);
8026         bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
8027         bnx2x_init_block(bp, BLOCK_USEM, init_phase);
8028         bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
8029         bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
8030
8031         if (!CHIP_IS_E1x(bp))
8032                 REG_WR(bp, QM_REG_PF_EN, 1);
8033
8034         if (!CHIP_IS_E1x(bp)) {
8035                 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8036                 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8037                 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8038                 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8039         }
8040         bnx2x_init_block(bp, BLOCK_QM, init_phase);
8041
8042         bnx2x_init_block(bp, BLOCK_TM, init_phase);
8043         bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
8044         REG_WR(bp, DORQ_REG_MODE_ACT, 1); /* no dpm */
8045
8046         bnx2x_iov_init_dq(bp);
8047
8048         bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
8049         bnx2x_init_block(bp, BLOCK_PRS, init_phase);
8050         bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
8051         bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
8052         bnx2x_init_block(bp, BLOCK_USDM, init_phase);
8053         bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
8054         bnx2x_init_block(bp, BLOCK_UPB, init_phase);
8055         bnx2x_init_block(bp, BLOCK_XPB, init_phase);
8056         bnx2x_init_block(bp, BLOCK_PBF, init_phase);
8057         if (!CHIP_IS_E1x(bp))
8058                 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
8059
8060         bnx2x_init_block(bp, BLOCK_CDU, init_phase);
8061
8062         bnx2x_init_block(bp, BLOCK_CFC, init_phase);
8063
8064         if (!CHIP_IS_E1x(bp))
8065                 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
8066
8067         if (IS_MF(bp)) {
8068                 if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) {
8069                         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
8070                         REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port * 8,
8071                                bp->mf_ov);
8072                 }
8073         }
8074
8075         bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
8076
8077         /* HC init per function */
8078         if (bp->common.int_block == INT_BLOCK_HC) {
8079                 if (CHIP_IS_E1H(bp)) {
8080                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
8081
8082                         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8083                         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8084                 }
8085                 bnx2x_init_block(bp, BLOCK_HC, init_phase);
8086
8087         } else {
8088                 int num_segs, sb_idx, prod_offset;
8089
8090                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
8091
8092                 if (!CHIP_IS_E1x(bp)) {
8093                         REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
8094                         REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
8095                 }
8096
8097                 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
8098
8099                 if (!CHIP_IS_E1x(bp)) {
8100                         int dsb_idx = 0;
8101                         /**
8102                          * Producer memory:
8103                          * E2 mode: address 0-135 match to the mapping memory;
8104                          * 136 - PF0 default prod; 137 - PF1 default prod;
8105                          * 138 - PF2 default prod; 139 - PF3 default prod;
8106                          * 140 - PF0 attn prod;    141 - PF1 attn prod;
8107                          * 142 - PF2 attn prod;    143 - PF3 attn prod;
8108                          * 144-147 reserved.
8109                          *
8110                          * E1.5 mode - In backward compatible mode;
8111                          * for non default SB; each even line in the memory
8112                          * holds the U producer and each odd line hold
8113                          * the C producer. The first 128 producers are for
8114                          * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
8115                          * producers are for the DSB for each PF.
8116                          * Each PF has five segments: (the order inside each
8117                          * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
8118                          * 132-135 C prods; 136-139 X prods; 140-143 T prods;
8119                          * 144-147 attn prods;
8120                          */
8121                         /* non-default-status-blocks */
8122                         num_segs = CHIP_INT_MODE_IS_BC(bp) ?
8123                                 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
8124                         for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
8125                                 prod_offset = (bp->igu_base_sb + sb_idx) *
8126                                         num_segs;
8127
8128                                 for (i = 0; i < num_segs; i++) {
8129                                         addr = IGU_REG_PROD_CONS_MEMORY +
8130                                                         (prod_offset + i) * 4;
8131                                         REG_WR(bp, addr, 0);
8132                                 }
8133                                 /* send consumer update with value 0 */
8134                                 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
8135                                              USTORM_ID, 0, IGU_INT_NOP, 1);
8136                                 bnx2x_igu_clear_sb(bp,
8137                                                    bp->igu_base_sb + sb_idx);
8138                         }
8139
8140                         /* default-status-blocks */
8141                         num_segs = CHIP_INT_MODE_IS_BC(bp) ?
8142                                 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
8143
8144                         if (CHIP_MODE_IS_4_PORT(bp))
8145                                 dsb_idx = BP_FUNC(bp);
8146                         else
8147                                 dsb_idx = BP_VN(bp);
8148
8149                         prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
8150                                        IGU_BC_BASE_DSB_PROD + dsb_idx :
8151                                        IGU_NORM_BASE_DSB_PROD + dsb_idx);
8152
8153                         /*
8154                          * igu prods come in chunks of E1HVN_MAX (4) -
8155                          * does not matters what is the current chip mode
8156                          */
8157                         for (i = 0; i < (num_segs * E1HVN_MAX);
8158                              i += E1HVN_MAX) {
8159                                 addr = IGU_REG_PROD_CONS_MEMORY +
8160                                                         (prod_offset + i)*4;
8161                                 REG_WR(bp, addr, 0);
8162                         }
8163                         /* send consumer update with 0 */
8164                         if (CHIP_INT_MODE_IS_BC(bp)) {
8165                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8166                                              USTORM_ID, 0, IGU_INT_NOP, 1);
8167                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8168                                              CSTORM_ID, 0, IGU_INT_NOP, 1);
8169                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8170                                              XSTORM_ID, 0, IGU_INT_NOP, 1);
8171                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8172                                              TSTORM_ID, 0, IGU_INT_NOP, 1);
8173                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8174                                              ATTENTION_ID, 0, IGU_INT_NOP, 1);
8175                         } else {
8176                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8177                                              USTORM_ID, 0, IGU_INT_NOP, 1);
8178                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8179                                              ATTENTION_ID, 0, IGU_INT_NOP, 1);
8180                         }
8181                         bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
8182
8183                         /* !!! These should become driver const once
8184                            rf-tool supports split-68 const */
8185                         REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
8186                         REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
8187                         REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
8188                         REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
8189                         REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
8190                         REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
8191                 }
8192         }
8193
8194         /* Reset PCIE errors for debug */
8195         REG_WR(bp, 0x2114, 0xffffffff);
8196         REG_WR(bp, 0x2120, 0xffffffff);
8197
8198         if (CHIP_IS_E1x(bp)) {
8199                 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
8200                 main_mem_base = HC_REG_MAIN_MEMORY +
8201                                 BP_PORT(bp) * (main_mem_size * 4);
8202                 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
8203                 main_mem_width = 8;
8204
8205                 val = REG_RD(bp, main_mem_prty_clr);
8206                 if (val)
8207                         DP(NETIF_MSG_HW,
8208                            "Hmmm... Parity errors in HC block during function init (0x%x)!\n",
8209                            val);
8210
8211                 /* Clear "false" parity errors in MSI-X table */
8212                 for (i = main_mem_base;
8213                      i < main_mem_base + main_mem_size * 4;
8214                      i += main_mem_width) {
8215                         bnx2x_read_dmae(bp, i, main_mem_width / 4);
8216                         bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
8217                                          i, main_mem_width / 4);
8218                 }
8219                 /* Clear HC parity attention */
8220                 REG_RD(bp, main_mem_prty_clr);
8221         }
8222
8223 #ifdef BNX2X_STOP_ON_ERROR
8224         /* Enable STORMs SP logging */
8225         REG_WR8(bp, BAR_USTRORM_INTMEM +
8226                USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8227         REG_WR8(bp, BAR_TSTRORM_INTMEM +
8228                TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8229         REG_WR8(bp, BAR_CSTRORM_INTMEM +
8230                CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8231         REG_WR8(bp, BAR_XSTRORM_INTMEM +
8232                XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8233 #endif
8234
8235         bnx2x_phy_probe(&bp->link_params);
8236
8237         return 0;
8238 }
8239
8240 void bnx2x_free_mem_cnic(struct bnx2x *bp)
8241 {
8242         bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE);
8243
8244         if (!CHIP_IS_E1x(bp))
8245                 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
8246                                sizeof(struct host_hc_status_block_e2));
8247         else
8248                 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
8249                                sizeof(struct host_hc_status_block_e1x));
8250
8251         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
8252 }
8253
8254 void bnx2x_free_mem(struct bnx2x *bp)
8255 {
8256         int i;
8257
8258         BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
8259                        bp->fw_stats_data_sz + bp->fw_stats_req_sz);
8260
8261         if (IS_VF(bp))
8262                 return;
8263
8264         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
8265                        sizeof(struct host_sp_status_block));
8266
8267         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
8268                        sizeof(struct bnx2x_slowpath));
8269
8270         for (i = 0; i < L2_ILT_LINES(bp); i++)
8271                 BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping,
8272                                bp->context[i].size);
8273         bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
8274
8275         BNX2X_FREE(bp->ilt->lines);
8276
8277         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
8278
8279         BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
8280                        BCM_PAGE_SIZE * NUM_EQ_PAGES);
8281
8282         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
8283
8284         bnx2x_iov_free_mem(bp);
8285 }
8286
8287 int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
8288 {
8289         if (!CHIP_IS_E1x(bp)) {
8290                 /* size = the status block + ramrod buffers */
8291                 bp->cnic_sb.e2_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
8292                                                     sizeof(struct host_hc_status_block_e2));
8293                 if (!bp->cnic_sb.e2_sb)
8294                         goto alloc_mem_err;
8295         } else {
8296                 bp->cnic_sb.e1x_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
8297                                                      sizeof(struct host_hc_status_block_e1x));
8298                 if (!bp->cnic_sb.e1x_sb)
8299                         goto alloc_mem_err;
8300         }
8301
8302         if (CONFIGURE_NIC_MODE(bp) && !bp->t2) {
8303                 /* allocate searcher T2 table, as it wasn't allocated before */
8304                 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
8305                 if (!bp->t2)
8306                         goto alloc_mem_err;
8307         }
8308
8309         /* write address to which L5 should insert its values */
8310         bp->cnic_eth_dev.addr_drv_info_to_mcp =
8311                 &bp->slowpath->drv_info_to_mcp;
8312
8313         if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC))
8314                 goto alloc_mem_err;
8315
8316         return 0;
8317
8318 alloc_mem_err:
8319         bnx2x_free_mem_cnic(bp);
8320         BNX2X_ERR("Can't allocate memory\n");
8321         return -ENOMEM;
8322 }
8323
8324 int bnx2x_alloc_mem(struct bnx2x *bp)
8325 {
8326         int i, allocated, context_size;
8327
8328         if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) {
8329                 /* allocate searcher T2 table */
8330                 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
8331                 if (!bp->t2)
8332                         goto alloc_mem_err;
8333         }
8334
8335         bp->def_status_blk = BNX2X_PCI_ALLOC(&bp->def_status_blk_mapping,
8336                                              sizeof(struct host_sp_status_block));
8337         if (!bp->def_status_blk)
8338                 goto alloc_mem_err;
8339
8340         bp->slowpath = BNX2X_PCI_ALLOC(&bp->slowpath_mapping,
8341                                        sizeof(struct bnx2x_slowpath));
8342         if (!bp->slowpath)
8343                 goto alloc_mem_err;
8344
8345         /* Allocate memory for CDU context:
8346          * This memory is allocated separately and not in the generic ILT
8347          * functions because CDU differs in few aspects:
8348          * 1. There are multiple entities allocating memory for context -
8349          * 'regular' driver, CNIC and SRIOV driver. Each separately controls
8350          * its own ILT lines.
8351          * 2. Since CDU page-size is not a single 4KB page (which is the case
8352          * for the other ILT clients), to be efficient we want to support
8353          * allocation of sub-page-size in the last entry.
8354          * 3. Context pointers are used by the driver to pass to FW / update
8355          * the context (for the other ILT clients the pointers are used just to
8356          * free the memory during unload).
8357          */
8358         context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
8359
8360         for (i = 0, allocated = 0; allocated < context_size; i++) {
8361                 bp->context[i].size = min(CDU_ILT_PAGE_SZ,
8362                                           (context_size - allocated));
8363                 bp->context[i].vcxt = BNX2X_PCI_ALLOC(&bp->context[i].cxt_mapping,
8364                                                       bp->context[i].size);
8365                 if (!bp->context[i].vcxt)
8366                         goto alloc_mem_err;
8367                 allocated += bp->context[i].size;
8368         }
8369         bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line),
8370                                  GFP_KERNEL);
8371         if (!bp->ilt->lines)
8372                 goto alloc_mem_err;
8373
8374         if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
8375                 goto alloc_mem_err;
8376
8377         if (bnx2x_iov_alloc_mem(bp))
8378                 goto alloc_mem_err;
8379
8380         /* Slow path ring */
8381         bp->spq = BNX2X_PCI_ALLOC(&bp->spq_mapping, BCM_PAGE_SIZE);
8382         if (!bp->spq)
8383                 goto alloc_mem_err;
8384
8385         /* EQ */
8386         bp->eq_ring = BNX2X_PCI_ALLOC(&bp->eq_mapping,
8387                                       BCM_PAGE_SIZE * NUM_EQ_PAGES);
8388         if (!bp->eq_ring)
8389                 goto alloc_mem_err;
8390
8391         return 0;
8392
8393 alloc_mem_err:
8394         bnx2x_free_mem(bp);
8395         BNX2X_ERR("Can't allocate memory\n");
8396         return -ENOMEM;
8397 }
8398
8399 /*
8400  * Init service functions
8401  */
8402
8403 int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
8404                       struct bnx2x_vlan_mac_obj *obj, bool set,
8405                       int mac_type, unsigned long *ramrod_flags)
8406 {
8407         int rc;
8408         struct bnx2x_vlan_mac_ramrod_params ramrod_param;
8409
8410         memset(&ramrod_param, 0, sizeof(ramrod_param));
8411
8412         /* Fill general parameters */
8413         ramrod_param.vlan_mac_obj = obj;
8414         ramrod_param.ramrod_flags = *ramrod_flags;
8415
8416         /* Fill a user request section if needed */
8417         if (!test_bit(RAMROD_CONT, ramrod_flags)) {
8418                 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
8419
8420                 __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
8421
8422                 /* Set the command: ADD or DEL */
8423                 if (set)
8424                         ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
8425                 else
8426                         ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
8427         }
8428
8429         rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
8430
8431         if (rc == -EEXIST) {
8432                 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
8433                 /* do not treat adding same MAC as error */
8434                 rc = 0;
8435         } else if (rc < 0)
8436                 BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del"));
8437
8438         return rc;
8439 }
8440
8441 int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
8442                        struct bnx2x_vlan_mac_obj *obj, bool set,
8443                        unsigned long *ramrod_flags)
8444 {
8445         int rc;
8446         struct bnx2x_vlan_mac_ramrod_params ramrod_param;
8447
8448         memset(&ramrod_param, 0, sizeof(ramrod_param));
8449
8450         /* Fill general parameters */
8451         ramrod_param.vlan_mac_obj = obj;
8452         ramrod_param.ramrod_flags = *ramrod_flags;
8453
8454         /* Fill a user request section if needed */
8455         if (!test_bit(RAMROD_CONT, ramrod_flags)) {
8456                 ramrod_param.user_req.u.vlan.vlan = vlan;
8457                 /* Set the command: ADD or DEL */
8458                 if (set)
8459                         ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
8460                 else
8461                         ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
8462         }
8463
8464         rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
8465
8466         if (rc == -EEXIST) {
8467                 /* Do not treat adding same vlan as error. */
8468                 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
8469                 rc = 0;
8470         } else if (rc < 0) {
8471                 BNX2X_ERR("%s VLAN failed\n", (set ? "Set" : "Del"));
8472         }
8473
8474         return rc;
8475 }
8476
8477 int bnx2x_del_all_macs(struct bnx2x *bp,
8478                        struct bnx2x_vlan_mac_obj *mac_obj,
8479                        int mac_type, bool wait_for_comp)
8480 {
8481         int rc;
8482         unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
8483
8484         /* Wait for completion of requested */
8485         if (wait_for_comp)
8486                 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8487
8488         /* Set the mac type of addresses we want to clear */
8489         __set_bit(mac_type, &vlan_mac_flags);
8490
8491         rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags);
8492         if (rc < 0)
8493                 BNX2X_ERR("Failed to delete MACs: %d\n", rc);
8494
8495         return rc;
8496 }
8497
8498 int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
8499 {
8500         if (IS_PF(bp)) {
8501                 unsigned long ramrod_flags = 0;
8502
8503                 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
8504                 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8505                 return bnx2x_set_mac_one(bp, bp->dev->dev_addr,
8506                                          &bp->sp_objs->mac_obj, set,
8507                                          BNX2X_ETH_MAC, &ramrod_flags);
8508         } else { /* vf */
8509                 return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr,
8510                                              bp->fp->index, set);
8511         }
8512 }
8513
8514 int bnx2x_setup_leading(struct bnx2x *bp)
8515 {
8516         if (IS_PF(bp))
8517                 return bnx2x_setup_queue(bp, &bp->fp[0], true);
8518         else /* VF */
8519                 return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true);
8520 }
8521
8522 /**
8523  * bnx2x_set_int_mode - configure interrupt mode
8524  *
8525  * @bp:         driver handle
8526  *
8527  * In case of MSI-X it will also try to enable MSI-X.
8528  */
8529 int bnx2x_set_int_mode(struct bnx2x *bp)
8530 {
8531         int rc = 0;
8532
8533         if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) {
8534                 BNX2X_ERR("VF not loaded since interrupt mode not msix\n");
8535                 return -EINVAL;
8536         }
8537
8538         switch (int_mode) {
8539         case BNX2X_INT_MODE_MSIX:
8540                 /* attempt to enable msix */
8541                 rc = bnx2x_enable_msix(bp);
8542
8543                 /* msix attained */
8544                 if (!rc)
8545                         return 0;
8546
8547                 /* vfs use only msix */
8548                 if (rc && IS_VF(bp))
8549                         return rc;
8550
8551                 /* failed to enable multiple MSI-X */
8552                 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
8553                                bp->num_queues,
8554                                1 + bp->num_cnic_queues);
8555
8556                 /* falling through... */
8557         case BNX2X_INT_MODE_MSI:
8558                 bnx2x_enable_msi(bp);
8559
8560                 /* falling through... */
8561         case BNX2X_INT_MODE_INTX:
8562                 bp->num_ethernet_queues = 1;
8563                 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
8564                 BNX2X_DEV_INFO("set number of queues to 1\n");
8565                 break;
8566         default:
8567                 BNX2X_DEV_INFO("unknown value in int_mode module parameter\n");
8568                 return -EINVAL;
8569         }
8570         return 0;
8571 }
8572
8573 /* must be called prior to any HW initializations */
8574 static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
8575 {
8576         if (IS_SRIOV(bp))
8577                 return (BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)/ILT_PAGE_CIDS;
8578         return L2_ILT_LINES(bp);
8579 }
8580
8581 void bnx2x_ilt_set_info(struct bnx2x *bp)
8582 {
8583         struct ilt_client_info *ilt_client;
8584         struct bnx2x_ilt *ilt = BP_ILT(bp);
8585         u16 line = 0;
8586
8587         ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
8588         DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
8589
8590         /* CDU */
8591         ilt_client = &ilt->clients[ILT_CLIENT_CDU];
8592         ilt_client->client_num = ILT_CLIENT_CDU;
8593         ilt_client->page_size = CDU_ILT_PAGE_SZ;
8594         ilt_client->flags = ILT_CLIENT_SKIP_MEM;
8595         ilt_client->start = line;
8596         line += bnx2x_cid_ilt_lines(bp);
8597
8598         if (CNIC_SUPPORT(bp))
8599                 line += CNIC_ILT_LINES;
8600         ilt_client->end = line - 1;
8601
8602         DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8603            ilt_client->start,
8604            ilt_client->end,
8605            ilt_client->page_size,
8606            ilt_client->flags,
8607            ilog2(ilt_client->page_size >> 12));
8608
8609         /* QM */
8610         if (QM_INIT(bp->qm_cid_count)) {
8611                 ilt_client = &ilt->clients[ILT_CLIENT_QM];
8612                 ilt_client->client_num = ILT_CLIENT_QM;
8613                 ilt_client->page_size = QM_ILT_PAGE_SZ;
8614                 ilt_client->flags = 0;
8615                 ilt_client->start = line;
8616
8617                 /* 4 bytes for each cid */
8618                 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
8619                                                          QM_ILT_PAGE_SZ);
8620
8621                 ilt_client->end = line - 1;
8622
8623                 DP(NETIF_MSG_IFUP,
8624                    "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8625                    ilt_client->start,
8626                    ilt_client->end,
8627                    ilt_client->page_size,
8628                    ilt_client->flags,
8629                    ilog2(ilt_client->page_size >> 12));
8630         }
8631
8632         if (CNIC_SUPPORT(bp)) {
8633                 /* SRC */
8634                 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
8635                 ilt_client->client_num = ILT_CLIENT_SRC;
8636                 ilt_client->page_size = SRC_ILT_PAGE_SZ;
8637                 ilt_client->flags = 0;
8638                 ilt_client->start = line;
8639                 line += SRC_ILT_LINES;
8640                 ilt_client->end = line - 1;
8641
8642                 DP(NETIF_MSG_IFUP,
8643                    "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8644                    ilt_client->start,
8645                    ilt_client->end,
8646                    ilt_client->page_size,
8647                    ilt_client->flags,
8648                    ilog2(ilt_client->page_size >> 12));
8649
8650                 /* TM */
8651                 ilt_client = &ilt->clients[ILT_CLIENT_TM];
8652                 ilt_client->client_num = ILT_CLIENT_TM;
8653                 ilt_client->page_size = TM_ILT_PAGE_SZ;
8654                 ilt_client->flags = 0;
8655                 ilt_client->start = line;
8656                 line += TM_ILT_LINES;
8657                 ilt_client->end = line - 1;
8658
8659                 DP(NETIF_MSG_IFUP,
8660                    "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8661                    ilt_client->start,
8662                    ilt_client->end,
8663                    ilt_client->page_size,
8664                    ilt_client->flags,
8665                    ilog2(ilt_client->page_size >> 12));
8666         }
8667
8668         BUG_ON(line > ILT_MAX_LINES);
8669 }
8670
8671 /**
8672  * bnx2x_pf_q_prep_init - prepare INIT transition parameters
8673  *
8674  * @bp:                 driver handle
8675  * @fp:                 pointer to fastpath
8676  * @init_params:        pointer to parameters structure
8677  *
8678  * parameters configured:
8679  *      - HC configuration
8680  *      - Queue's CDU context
8681  */
8682 static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
8683         struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
8684 {
8685         u8 cos;
8686         int cxt_index, cxt_offset;
8687
8688         /* FCoE Queue uses Default SB, thus has no HC capabilities */
8689         if (!IS_FCOE_FP(fp)) {
8690                 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
8691                 __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags);
8692
8693                 /* If HC is supported, enable host coalescing in the transition
8694                  * to INIT state.
8695                  */
8696                 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags);
8697                 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags);
8698
8699                 /* HC rate */
8700                 init_params->rx.hc_rate = bp->rx_ticks ?
8701                         (1000000 / bp->rx_ticks) : 0;
8702                 init_params->tx.hc_rate = bp->tx_ticks ?
8703                         (1000000 / bp->tx_ticks) : 0;
8704
8705                 /* FW SB ID */
8706                 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id =
8707                         fp->fw_sb_id;
8708
8709                 /*
8710                  * CQ index among the SB indices: FCoE clients uses the default
8711                  * SB, therefore it's different.
8712                  */
8713                 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
8714                 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
8715         }
8716
8717         /* set maximum number of COSs supported by this queue */
8718         init_params->max_cos = fp->max_cos;
8719
8720         DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n",
8721             fp->index, init_params->max_cos);
8722
8723         /* set the context pointers queue object */
8724         for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
8725                 cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS;
8726                 cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index *
8727                                 ILT_PAGE_CIDS);
8728                 init_params->cxts[cos] =
8729                         &bp->context[cxt_index].vcxt[cxt_offset].eth;
8730         }
8731 }
8732
8733 static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8734                         struct bnx2x_queue_state_params *q_params,
8735                         struct bnx2x_queue_setup_tx_only_params *tx_only_params,
8736                         int tx_index, bool leading)
8737 {
8738         memset(tx_only_params, 0, sizeof(*tx_only_params));
8739
8740         /* Set the command */
8741         q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
8742
8743         /* Set tx-only QUEUE flags: don't zero statistics */
8744         tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false);
8745
8746         /* choose the index of the cid to send the slow path on */
8747         tx_only_params->cid_index = tx_index;
8748
8749         /* Set general TX_ONLY_SETUP parameters */
8750         bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index);
8751
8752         /* Set Tx TX_ONLY_SETUP parameters */
8753         bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index);
8754
8755         DP(NETIF_MSG_IFUP,
8756            "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n",
8757            tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX],
8758            q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id,
8759            tx_only_params->gen_params.spcl_id, tx_only_params->flags);
8760
8761         /* send the ramrod */
8762         return bnx2x_queue_state_change(bp, q_params);
8763 }
8764
8765 /**
8766  * bnx2x_setup_queue - setup queue
8767  *
8768  * @bp:         driver handle
8769  * @fp:         pointer to fastpath
8770  * @leading:    is leading
8771  *
8772  * This function performs 2 steps in a Queue state machine
8773  *      actually: 1) RESET->INIT 2) INIT->SETUP
8774  */
8775
8776 int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8777                        bool leading)
8778 {
8779         struct bnx2x_queue_state_params q_params = {NULL};
8780         struct bnx2x_queue_setup_params *setup_params =
8781                                                 &q_params.params.setup;
8782         struct bnx2x_queue_setup_tx_only_params *tx_only_params =
8783                                                 &q_params.params.tx_only;
8784         int rc;
8785         u8 tx_index;
8786
8787         DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index);
8788
8789         /* reset IGU state skip FCoE L2 queue */
8790         if (!IS_FCOE_FP(fp))
8791                 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
8792                              IGU_INT_ENABLE, 0);
8793
8794         q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8795         /* We want to wait for completion in this context */
8796         __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8797
8798         /* Prepare the INIT parameters */
8799         bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init);
8800
8801         /* Set the command */
8802         q_params.cmd = BNX2X_Q_CMD_INIT;
8803
8804         /* Change the state to INIT */
8805         rc = bnx2x_queue_state_change(bp, &q_params);
8806         if (rc) {
8807                 BNX2X_ERR("Queue(%d) INIT failed\n", fp->index);
8808                 return rc;
8809         }
8810
8811         DP(NETIF_MSG_IFUP, "init complete\n");
8812
8813         /* Now move the Queue to the SETUP state... */
8814         memset(setup_params, 0, sizeof(*setup_params));
8815
8816         /* Set QUEUE flags */
8817         setup_params->flags = bnx2x_get_q_flags(bp, fp, leading);
8818
8819         /* Set general SETUP parameters */
8820         bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params,
8821                                 FIRST_TX_COS_INDEX);
8822
8823         bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params,
8824                             &setup_params->rxq_params);
8825
8826         bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params,
8827                            FIRST_TX_COS_INDEX);
8828
8829         /* Set the command */
8830         q_params.cmd = BNX2X_Q_CMD_SETUP;
8831
8832         if (IS_FCOE_FP(fp))
8833                 bp->fcoe_init = true;
8834
8835         /* Change the state to SETUP */
8836         rc = bnx2x_queue_state_change(bp, &q_params);
8837         if (rc) {
8838                 BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index);
8839                 return rc;
8840         }
8841
8842         /* loop through the relevant tx-only indices */
8843         for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8844               tx_index < fp->max_cos;
8845               tx_index++) {
8846
8847                 /* prepare and send tx-only ramrod*/
8848                 rc = bnx2x_setup_tx_only(bp, fp, &q_params,
8849                                           tx_only_params, tx_index, leading);
8850                 if (rc) {
8851                         BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n",
8852                                   fp->index, tx_index);
8853                         return rc;
8854                 }
8855         }
8856
8857         return rc;
8858 }
8859
8860 static int bnx2x_stop_queue(struct bnx2x *bp, int index)
8861 {
8862         struct bnx2x_fastpath *fp = &bp->fp[index];
8863         struct bnx2x_fp_txdata *txdata;
8864         struct bnx2x_queue_state_params q_params = {NULL};
8865         int rc, tx_index;
8866
8867         DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid);
8868
8869         q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8870         /* We want to wait for completion in this context */
8871         __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8872
8873         /* close tx-only connections */
8874         for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8875              tx_index < fp->max_cos;
8876              tx_index++){
8877
8878                 /* ascertain this is a normal queue*/
8879                 txdata = fp->txdata_ptr[tx_index];
8880
8881                 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n",
8882                                                         txdata->txq_index);
8883
8884                 /* send halt terminate on tx-only connection */
8885                 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8886                 memset(&q_params.params.terminate, 0,
8887                        sizeof(q_params.params.terminate));
8888                 q_params.params.terminate.cid_index = tx_index;
8889
8890                 rc = bnx2x_queue_state_change(bp, &q_params);
8891                 if (rc)
8892                         return rc;
8893
8894                 /* send halt terminate on tx-only connection */
8895                 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8896                 memset(&q_params.params.cfc_del, 0,
8897                        sizeof(q_params.params.cfc_del));
8898                 q_params.params.cfc_del.cid_index = tx_index;
8899                 rc = bnx2x_queue_state_change(bp, &q_params);
8900                 if (rc)
8901                         return rc;
8902         }
8903         /* Stop the primary connection: */
8904         /* ...halt the connection */
8905         q_params.cmd = BNX2X_Q_CMD_HALT;
8906         rc = bnx2x_queue_state_change(bp, &q_params);
8907         if (rc)
8908                 return rc;
8909
8910         /* ...terminate the connection */
8911         q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8912         memset(&q_params.params.terminate, 0,
8913                sizeof(q_params.params.terminate));
8914         q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
8915         rc = bnx2x_queue_state_change(bp, &q_params);
8916         if (rc)
8917                 return rc;
8918         /* ...delete cfc entry */
8919         q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8920         memset(&q_params.params.cfc_del, 0,
8921                sizeof(q_params.params.cfc_del));
8922         q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
8923         return bnx2x_queue_state_change(bp, &q_params);
8924 }
8925
8926 static void bnx2x_reset_func(struct bnx2x *bp)
8927 {
8928         int port = BP_PORT(bp);
8929         int func = BP_FUNC(bp);
8930         int i;
8931
8932         /* Disable the function in the FW */
8933         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
8934         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
8935         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
8936         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
8937
8938         /* FP SBs */
8939         for_each_eth_queue(bp, i) {
8940                 struct bnx2x_fastpath *fp = &bp->fp[i];
8941                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8942                            CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
8943                            SB_DISABLED);
8944         }
8945
8946         if (CNIC_LOADED(bp))
8947                 /* CNIC SB */
8948                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8949                         CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET
8950                         (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED);
8951
8952         /* SP SB */
8953         REG_WR8(bp, BAR_CSTRORM_INTMEM +
8954                 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
8955                 SB_DISABLED);
8956
8957         for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
8958                 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
8959                        0);
8960
8961         /* Configure IGU */
8962         if (bp->common.int_block == INT_BLOCK_HC) {
8963                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8964                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8965         } else {
8966                 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
8967                 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
8968         }
8969
8970         if (CNIC_LOADED(bp)) {
8971                 /* Disable Timer scan */
8972                 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8973                 /*
8974                  * Wait for at least 10ms and up to 2 second for the timers
8975                  * scan to complete
8976                  */
8977                 for (i = 0; i < 200; i++) {
8978                         usleep_range(10000, 20000);
8979                         if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8980                                 break;
8981                 }
8982         }
8983         /* Clear ILT */
8984         bnx2x_clear_func_ilt(bp, func);
8985
8986         /* Timers workaround bug for E2: if this is vnic-3,
8987          * we need to set the entire ilt range for this timers.
8988          */
8989         if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) {
8990                 struct ilt_client_info ilt_cli;
8991                 /* use dummy TM client */
8992                 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
8993                 ilt_cli.start = 0;
8994                 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
8995                 ilt_cli.client_num = ILT_CLIENT_TM;
8996
8997                 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
8998         }
8999
9000         /* this assumes that reset_port() called before reset_func()*/
9001         if (!CHIP_IS_E1x(bp))
9002                 bnx2x_pf_disable(bp);
9003
9004         bp->dmae_ready = 0;
9005 }
9006
9007 static void bnx2x_reset_port(struct bnx2x *bp)
9008 {
9009         int port = BP_PORT(bp);
9010         u32 val;
9011
9012         /* Reset physical Link */
9013         bnx2x__link_reset(bp);
9014
9015         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
9016
9017         /* Do not rcv packets to BRB */
9018         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
9019         /* Do not direct rcv packets that are not for MCP to the BRB */
9020         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
9021                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
9022
9023         /* Configure AEU */
9024         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
9025
9026         msleep(100);
9027         /* Check for BRB port occupancy */
9028         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
9029         if (val)
9030                 DP(NETIF_MSG_IFDOWN,
9031                    "BRB1 is not empty  %d blocks are occupied\n", val);
9032
9033         /* TODO: Close Doorbell port? */
9034 }
9035
9036 static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
9037 {
9038         struct bnx2x_func_state_params func_params = {NULL};
9039
9040         /* Prepare parameters for function state transitions */
9041         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
9042
9043         func_params.f_obj = &bp->func_obj;
9044         func_params.cmd = BNX2X_F_CMD_HW_RESET;
9045
9046         func_params.params.hw_init.load_phase = load_code;
9047
9048         return bnx2x_func_state_change(bp, &func_params);
9049 }
9050
9051 static int bnx2x_func_stop(struct bnx2x *bp)
9052 {
9053         struct bnx2x_func_state_params func_params = {NULL};
9054         int rc;
9055
9056         /* Prepare parameters for function state transitions */
9057         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
9058         func_params.f_obj = &bp->func_obj;
9059         func_params.cmd = BNX2X_F_CMD_STOP;
9060
9061         /*
9062          * Try to stop the function the 'good way'. If fails (in case
9063          * of a parity error during bnx2x_chip_cleanup()) and we are
9064          * not in a debug mode, perform a state transaction in order to
9065          * enable further HW_RESET transaction.
9066          */
9067         rc = bnx2x_func_state_change(bp, &func_params);
9068         if (rc) {
9069 #ifdef BNX2X_STOP_ON_ERROR
9070                 return rc;
9071 #else
9072                 BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n");
9073                 __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
9074                 return bnx2x_func_state_change(bp, &func_params);
9075 #endif
9076         }
9077
9078         return 0;
9079 }
9080
9081 /**
9082  * bnx2x_send_unload_req - request unload mode from the MCP.
9083  *
9084  * @bp:                 driver handle
9085  * @unload_mode:        requested function's unload mode
9086  *
9087  * Return unload mode returned by the MCP: COMMON, PORT or FUNC.
9088  */
9089 u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
9090 {
9091         u32 reset_code = 0;
9092         int port = BP_PORT(bp);
9093
9094         /* Select the UNLOAD request mode */
9095         if (unload_mode == UNLOAD_NORMAL)
9096                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9097
9098         else if (bp->flags & NO_WOL_FLAG)
9099                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
9100
9101         else if (bp->wol) {
9102                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
9103                 u8 *mac_addr = bp->dev->dev_addr;
9104                 struct pci_dev *pdev = bp->pdev;
9105                 u32 val;
9106                 u16 pmc;
9107
9108                 /* The mac address is written to entries 1-4 to
9109                  * preserve entry 0 which is used by the PMF
9110                  */
9111                 u8 entry = (BP_VN(bp) + 1)*8;
9112
9113                 val = (mac_addr[0] << 8) | mac_addr[1];
9114                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
9115
9116                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
9117                       (mac_addr[4] << 8) | mac_addr[5];
9118                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
9119
9120                 /* Enable the PME and clear the status */
9121                 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc);
9122                 pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
9123                 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc);
9124
9125                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
9126
9127         } else
9128                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9129
9130         /* Send the request to the MCP */
9131         if (!BP_NOMCP(bp))
9132                 reset_code = bnx2x_fw_command(bp, reset_code, 0);
9133         else {
9134                 int path = BP_PATH(bp);
9135
9136                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d]      %d, %d, %d\n",
9137                    path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
9138                    bnx2x_load_count[path][2]);
9139                 bnx2x_load_count[path][0]--;
9140                 bnx2x_load_count[path][1 + port]--;
9141                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d]  %d, %d, %d\n",
9142                    path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
9143                    bnx2x_load_count[path][2]);
9144                 if (bnx2x_load_count[path][0] == 0)
9145                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
9146                 else if (bnx2x_load_count[path][1 + port] == 0)
9147                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
9148                 else
9149                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
9150         }
9151
9152         return reset_code;
9153 }
9154
9155 /**
9156  * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
9157  *
9158  * @bp:         driver handle
9159  * @keep_link:          true iff link should be kept up
9160  */
9161 void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link)
9162 {
9163         u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
9164
9165         /* Report UNLOAD_DONE to MCP */
9166         if (!BP_NOMCP(bp))
9167                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
9168 }
9169
9170 static int bnx2x_func_wait_started(struct bnx2x *bp)
9171 {
9172         int tout = 50;
9173         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
9174
9175         if (!bp->port.pmf)
9176                 return 0;
9177
9178         /*
9179          * (assumption: No Attention from MCP at this stage)
9180          * PMF probably in the middle of TX disable/enable transaction
9181          * 1. Sync IRS for default SB
9182          * 2. Sync SP queue - this guarantees us that attention handling started
9183          * 3. Wait, that TX disable/enable transaction completes
9184          *
9185          * 1+2 guarantee that if DCBx attention was scheduled it already changed
9186          * pending bit of transaction from STARTED-->TX_STOPPED, if we already
9187          * received completion for the transaction the state is TX_STOPPED.
9188          * State will return to STARTED after completion of TX_STOPPED-->STARTED
9189          * transaction.
9190          */
9191
9192         /* make sure default SB ISR is done */
9193         if (msix)
9194                 synchronize_irq(bp->msix_table[0].vector);
9195         else
9196                 synchronize_irq(bp->pdev->irq);
9197
9198         flush_workqueue(bnx2x_wq);
9199         flush_workqueue(bnx2x_iov_wq);
9200
9201         while (bnx2x_func_get_state(bp, &bp->func_obj) !=
9202                                 BNX2X_F_STATE_STARTED && tout--)
9203                 msleep(20);
9204
9205         if (bnx2x_func_get_state(bp, &bp->func_obj) !=
9206                                                 BNX2X_F_STATE_STARTED) {
9207 #ifdef BNX2X_STOP_ON_ERROR
9208                 BNX2X_ERR("Wrong function state\n");
9209                 return -EBUSY;
9210 #else
9211                 /*
9212                  * Failed to complete the transaction in a "good way"
9213                  * Force both transactions with CLR bit
9214                  */
9215                 struct bnx2x_func_state_params func_params = {NULL};
9216
9217                 DP(NETIF_MSG_IFDOWN,
9218                    "Hmmm... Unexpected function state! Forcing STARTED-->TX_STOPPED-->STARTED\n");
9219
9220                 func_params.f_obj = &bp->func_obj;
9221                 __set_bit(RAMROD_DRV_CLR_ONLY,
9222                                         &func_params.ramrod_flags);
9223
9224                 /* STARTED-->TX_ST0PPED */
9225                 func_params.cmd = BNX2X_F_CMD_TX_STOP;
9226                 bnx2x_func_state_change(bp, &func_params);
9227
9228                 /* TX_ST0PPED-->STARTED */
9229                 func_params.cmd = BNX2X_F_CMD_TX_START;
9230                 return bnx2x_func_state_change(bp, &func_params);
9231 #endif
9232         }
9233
9234         return 0;
9235 }
9236
9237 static void bnx2x_disable_ptp(struct bnx2x *bp)
9238 {
9239         int port = BP_PORT(bp);
9240
9241         /* Disable sending PTP packets to host */
9242         REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
9243                NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
9244
9245         /* Reset PTP event detection rules */
9246         REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
9247                NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
9248         REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
9249                NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
9250         REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
9251                NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
9252         REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
9253                NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
9254
9255         /* Disable the PTP feature */
9256         REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
9257                NIG_REG_P0_PTP_EN, 0x0);
9258 }
9259
9260 /* Called during unload, to stop PTP-related stuff */
9261 static void bnx2x_stop_ptp(struct bnx2x *bp)
9262 {
9263         /* Cancel PTP work queue. Should be done after the Tx queues are
9264          * drained to prevent additional scheduling.
9265          */
9266         cancel_work_sync(&bp->ptp_task);
9267
9268         if (bp->ptp_tx_skb) {
9269                 dev_kfree_skb_any(bp->ptp_tx_skb);
9270                 bp->ptp_tx_skb = NULL;
9271         }
9272
9273         /* Disable PTP in HW */
9274         bnx2x_disable_ptp(bp);
9275
9276         DP(BNX2X_MSG_PTP, "PTP stop ended successfully\n");
9277 }
9278
9279 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
9280 {
9281         int port = BP_PORT(bp);
9282         int i, rc = 0;
9283         u8 cos;
9284         struct bnx2x_mcast_ramrod_params rparam = {NULL};
9285         u32 reset_code;
9286
9287         /* Wait until tx fastpath tasks complete */
9288         for_each_tx_queue(bp, i) {
9289                 struct bnx2x_fastpath *fp = &bp->fp[i];
9290
9291                 for_each_cos_in_tx_queue(fp, cos)
9292                         rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
9293 #ifdef BNX2X_STOP_ON_ERROR
9294                 if (rc)
9295                         return;
9296 #endif
9297         }
9298
9299         /* Give HW time to discard old tx messages */
9300         usleep_range(1000, 2000);
9301
9302         /* Clean all ETH MACs */
9303         rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
9304                                 false);
9305         if (rc < 0)
9306                 BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
9307
9308         /* Clean up UC list  */
9309         rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC,
9310                                 true);
9311         if (rc < 0)
9312                 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
9313                           rc);
9314
9315         /* Disable LLH */
9316         if (!CHIP_IS_E1(bp))
9317                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
9318
9319         /* Set "drop all" (stop Rx).
9320          * We need to take a netif_addr_lock() here in order to prevent
9321          * a race between the completion code and this code.
9322          */
9323         netif_addr_lock_bh(bp->dev);
9324         /* Schedule the rx_mode command */
9325         if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
9326                 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
9327         else
9328                 bnx2x_set_storm_rx_mode(bp);
9329
9330         /* Cleanup multicast configuration */
9331         rparam.mcast_obj = &bp->mcast_obj;
9332         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
9333         if (rc < 0)
9334                 BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc);
9335
9336         netif_addr_unlock_bh(bp->dev);
9337
9338         bnx2x_iov_chip_cleanup(bp);
9339
9340         /*
9341          * Send the UNLOAD_REQUEST to the MCP. This will return if
9342          * this function should perform FUNC, PORT or COMMON HW
9343          * reset.
9344          */
9345         reset_code = bnx2x_send_unload_req(bp, unload_mode);
9346
9347         /*
9348          * (assumption: No Attention from MCP at this stage)
9349          * PMF probably in the middle of TX disable/enable transaction
9350          */
9351         rc = bnx2x_func_wait_started(bp);
9352         if (rc) {
9353                 BNX2X_ERR("bnx2x_func_wait_started failed\n");
9354 #ifdef BNX2X_STOP_ON_ERROR
9355                 return;
9356 #endif
9357         }
9358
9359         /* Close multi and leading connections
9360          * Completions for ramrods are collected in a synchronous way
9361          */
9362         for_each_eth_queue(bp, i)
9363                 if (bnx2x_stop_queue(bp, i))
9364 #ifdef BNX2X_STOP_ON_ERROR
9365                         return;
9366 #else
9367                         goto unload_error;
9368 #endif
9369
9370         if (CNIC_LOADED(bp)) {
9371                 for_each_cnic_queue(bp, i)
9372                         if (bnx2x_stop_queue(bp, i))
9373 #ifdef BNX2X_STOP_ON_ERROR
9374                                 return;
9375 #else
9376                                 goto unload_error;
9377 #endif
9378         }
9379
9380         /* If SP settings didn't get completed so far - something
9381          * very wrong has happen.
9382          */
9383         if (!bnx2x_wait_sp_comp(bp, ~0x0UL))
9384                 BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n");
9385
9386 #ifndef BNX2X_STOP_ON_ERROR
9387 unload_error:
9388 #endif
9389         rc = bnx2x_func_stop(bp);
9390         if (rc) {
9391                 BNX2X_ERR("Function stop failed!\n");
9392 #ifdef BNX2X_STOP_ON_ERROR
9393                 return;
9394 #endif
9395         }
9396
9397         /* stop_ptp should be after the Tx queues are drained to prevent
9398          * scheduling to the cancelled PTP work queue. It should also be after
9399          * function stop ramrod is sent, since as part of this ramrod FW access
9400          * PTP registers.
9401          */
9402         if (bp->flags & PTP_SUPPORTED)
9403                 bnx2x_stop_ptp(bp);
9404
9405         /* Disable HW interrupts, NAPI */
9406         bnx2x_netif_stop(bp, 1);
9407         /* Delete all NAPI objects */
9408         bnx2x_del_all_napi(bp);
9409         if (CNIC_LOADED(bp))
9410                 bnx2x_del_all_napi_cnic(bp);
9411
9412         /* Release IRQs */
9413         bnx2x_free_irq(bp);
9414
9415         /* Reset the chip */
9416         rc = bnx2x_reset_hw(bp, reset_code);
9417         if (rc)
9418                 BNX2X_ERR("HW_RESET failed\n");
9419
9420         /* Report UNLOAD_DONE to MCP */
9421         bnx2x_send_unload_done(bp, keep_link);
9422 }
9423
9424 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
9425 {
9426         u32 val;
9427
9428         DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n");
9429
9430         if (CHIP_IS_E1(bp)) {
9431                 int port = BP_PORT(bp);
9432                 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9433                         MISC_REG_AEU_MASK_ATTN_FUNC_0;
9434
9435                 val = REG_RD(bp, addr);
9436                 val &= ~(0x300);
9437                 REG_WR(bp, addr, val);
9438         } else {
9439                 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
9440                 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
9441                          MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
9442                 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
9443         }
9444 }
9445
9446 /* Close gates #2, #3 and #4: */
9447 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
9448 {
9449         u32 val;
9450
9451         /* Gates #2 and #4a are closed/opened for "not E1" only */
9452         if (!CHIP_IS_E1(bp)) {
9453                 /* #4 */
9454                 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
9455                 /* #2 */
9456                 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
9457         }
9458
9459         /* #3 */
9460         if (CHIP_IS_E1x(bp)) {
9461                 /* Prevent interrupts from HC on both ports */
9462                 val = REG_RD(bp, HC_REG_CONFIG_1);
9463                 REG_WR(bp, HC_REG_CONFIG_1,
9464                        (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
9465                        (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
9466
9467                 val = REG_RD(bp, HC_REG_CONFIG_0);
9468                 REG_WR(bp, HC_REG_CONFIG_0,
9469                        (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
9470                        (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
9471         } else {
9472                 /* Prevent incoming interrupts in IGU */
9473                 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
9474
9475                 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
9476                        (!close) ?
9477                        (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
9478                        (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
9479         }
9480
9481         DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n",
9482                 close ? "closing" : "opening");
9483         mmiowb();
9484 }
9485
9486 #define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */
9487
9488 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
9489 {
9490         /* Do some magic... */
9491         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
9492         *magic_val = val & SHARED_MF_CLP_MAGIC;
9493         MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
9494 }
9495
9496 /**
9497  * bnx2x_clp_reset_done - restore the value of the `magic' bit.
9498  *
9499  * @bp:         driver handle
9500  * @magic_val:  old value of the `magic' bit.
9501  */
9502 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
9503 {
9504         /* Restore the `magic' bit value... */
9505         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
9506         MF_CFG_WR(bp, shared_mf_config.clp_mb,
9507                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
9508 }
9509
9510 /**
9511  * bnx2x_reset_mcp_prep - prepare for MCP reset.
9512  *
9513  * @bp:         driver handle
9514  * @magic_val:  old value of 'magic' bit.
9515  *
9516  * Takes care of CLP configurations.
9517  */
9518 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
9519 {
9520         u32 shmem;
9521         u32 validity_offset;
9522
9523         DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n");
9524
9525         /* Set `magic' bit in order to save MF config */
9526         if (!CHIP_IS_E1(bp))
9527                 bnx2x_clp_reset_prep(bp, magic_val);
9528
9529         /* Get shmem offset */
9530         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9531         validity_offset =
9532                 offsetof(struct shmem_region, validity_map[BP_PORT(bp)]);
9533
9534         /* Clear validity map flags */
9535         if (shmem > 0)
9536                 REG_WR(bp, shmem + validity_offset, 0);
9537 }
9538
9539 #define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
9540 #define MCP_ONE_TIMEOUT  100    /* 100 ms */
9541
9542 /**
9543  * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT
9544  *
9545  * @bp: driver handle
9546  */
9547 static void bnx2x_mcp_wait_one(struct bnx2x *bp)
9548 {
9549         /* special handling for emulation and FPGA,
9550            wait 10 times longer */
9551         if (CHIP_REV_IS_SLOW(bp))
9552                 msleep(MCP_ONE_TIMEOUT*10);
9553         else
9554                 msleep(MCP_ONE_TIMEOUT);
9555 }
9556
9557 /*
9558  * initializes bp->common.shmem_base and waits for validity signature to appear
9559  */
9560 static int bnx2x_init_shmem(struct bnx2x *bp)
9561 {
9562         int cnt = 0;
9563         u32 val = 0;
9564
9565         do {
9566                 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9567
9568                 /* If we read all 0xFFs, means we are in PCI error state and
9569                  * should bail out to avoid crashes on adapter's FW reads.
9570                  */
9571                 if (bp->common.shmem_base == 0xFFFFFFFF) {
9572                         bp->flags |= NO_MCP_FLAG;
9573                         return -ENODEV;
9574                 }
9575
9576                 if (bp->common.shmem_base) {
9577                         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9578                         if (val & SHR_MEM_VALIDITY_MB)
9579                                 return 0;
9580                 }
9581
9582                 bnx2x_mcp_wait_one(bp);
9583
9584         } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
9585
9586         BNX2X_ERR("BAD MCP validity signature\n");
9587
9588         return -ENODEV;
9589 }
9590
9591 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
9592 {
9593         int rc = bnx2x_init_shmem(bp);
9594
9595         /* Restore the `magic' bit value */
9596         if (!CHIP_IS_E1(bp))
9597                 bnx2x_clp_reset_done(bp, magic_val);
9598
9599         return rc;
9600 }
9601
9602 static void bnx2x_pxp_prep(struct bnx2x *bp)
9603 {
9604         if (!CHIP_IS_E1(bp)) {
9605                 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
9606                 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
9607                 mmiowb();
9608         }
9609 }
9610
9611 /*
9612  * Reset the whole chip except for:
9613  *      - PCIE core
9614  *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
9615  *              one reset bit)
9616  *      - IGU
9617  *      - MISC (including AEU)
9618  *      - GRC
9619  *      - RBCN, RBCP
9620  */
9621 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
9622 {
9623         u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
9624         u32 global_bits2, stay_reset2;
9625
9626         /*
9627          * Bits that have to be set in reset_mask2 if we want to reset 'global'
9628          * (per chip) blocks.
9629          */
9630         global_bits2 =
9631                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
9632                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
9633
9634         /* Don't reset the following blocks.
9635          * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
9636          *            reset, as in 4 port device they might still be owned
9637          *            by the MCP (there is only one leader per path).
9638          */
9639         not_reset_mask1 =
9640                 MISC_REGISTERS_RESET_REG_1_RST_HC |
9641                 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
9642                 MISC_REGISTERS_RESET_REG_1_RST_PXP;
9643
9644         not_reset_mask2 =
9645                 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
9646                 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
9647                 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
9648                 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
9649                 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
9650                 MISC_REGISTERS_RESET_REG_2_RST_GRC  |
9651                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
9652                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
9653                 MISC_REGISTERS_RESET_REG_2_RST_ATC |
9654                 MISC_REGISTERS_RESET_REG_2_PGLC |
9655                 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
9656                 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
9657                 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
9658                 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
9659                 MISC_REGISTERS_RESET_REG_2_UMAC0 |
9660                 MISC_REGISTERS_RESET_REG_2_UMAC1;
9661
9662         /*
9663          * Keep the following blocks in reset:
9664          *  - all xxMACs are handled by the bnx2x_link code.
9665          */
9666         stay_reset2 =
9667                 MISC_REGISTERS_RESET_REG_2_XMAC |
9668                 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
9669
9670         /* Full reset masks according to the chip */
9671         reset_mask1 = 0xffffffff;
9672
9673         if (CHIP_IS_E1(bp))
9674                 reset_mask2 = 0xffff;
9675         else if (CHIP_IS_E1H(bp))
9676                 reset_mask2 = 0x1ffff;
9677         else if (CHIP_IS_E2(bp))
9678                 reset_mask2 = 0xfffff;
9679         else /* CHIP_IS_E3 */
9680                 reset_mask2 = 0x3ffffff;
9681
9682         /* Don't reset global blocks unless we need to */
9683         if (!global)
9684                 reset_mask2 &= ~global_bits2;
9685
9686         /*
9687          * In case of attention in the QM, we need to reset PXP
9688          * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
9689          * because otherwise QM reset would release 'close the gates' shortly
9690          * before resetting the PXP, then the PSWRQ would send a write
9691          * request to PGLUE. Then when PXP is reset, PGLUE would try to
9692          * read the payload data from PSWWR, but PSWWR would not
9693          * respond. The write queue in PGLUE would stuck, dmae commands
9694          * would not return. Therefore it's important to reset the second
9695          * reset register (containing the
9696          * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
9697          * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
9698          * bit).
9699          */
9700         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9701                reset_mask2 & (~not_reset_mask2));
9702
9703         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
9704                reset_mask1 & (~not_reset_mask1));
9705
9706         barrier();
9707         mmiowb();
9708
9709         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
9710                reset_mask2 & (~stay_reset2));
9711
9712         barrier();
9713         mmiowb();
9714
9715         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
9716         mmiowb();
9717 }
9718
9719 /**
9720  * bnx2x_er_poll_igu_vq - poll for pending writes bit.
9721  * It should get cleared in no more than 1s.
9722  *
9723  * @bp: driver handle
9724  *
9725  * It should get cleared in no more than 1s. Returns 0 if
9726  * pending writes bit gets cleared.
9727  */
9728 static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
9729 {
9730         u32 cnt = 1000;
9731         u32 pend_bits = 0;
9732
9733         do {
9734                 pend_bits  = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS);
9735
9736                 if (pend_bits == 0)
9737                         break;
9738
9739                 usleep_range(1000, 2000);
9740         } while (cnt-- > 0);
9741
9742         if (cnt <= 0) {
9743                 BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n",
9744                           pend_bits);
9745                 return -EBUSY;
9746         }
9747
9748         return 0;
9749 }
9750
9751 static int bnx2x_process_kill(struct bnx2x *bp, bool global)
9752 {
9753         int cnt = 1000;
9754         u32 val = 0;
9755         u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
9756         u32 tags_63_32 = 0;
9757
9758         /* Empty the Tetris buffer, wait for 1s */
9759         do {
9760                 sr_cnt  = REG_RD(bp, PXP2_REG_RD_SR_CNT);
9761                 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
9762                 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
9763                 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
9764                 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
9765                 if (CHIP_IS_E3(bp))
9766                         tags_63_32 = REG_RD(bp, PGLUE_B_REG_TAGS_63_32);
9767
9768                 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
9769                     ((port_is_idle_0 & 0x1) == 0x1) &&
9770                     ((port_is_idle_1 & 0x1) == 0x1) &&
9771                     (pgl_exp_rom2 == 0xffffffff) &&
9772                     (!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff)))
9773                         break;
9774                 usleep_range(1000, 2000);
9775         } while (cnt-- > 0);
9776
9777         if (cnt <= 0) {
9778                 BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n");
9779                 BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
9780                           sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
9781                           pgl_exp_rom2);
9782                 return -EAGAIN;
9783         }
9784
9785         barrier();
9786
9787         /* Close gates #2, #3 and #4 */
9788         bnx2x_set_234_gates(bp, true);
9789
9790         /* Poll for IGU VQs for 57712 and newer chips */
9791         if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp))
9792                 return -EAGAIN;
9793
9794         /* TBD: Indicate that "process kill" is in progress to MCP */
9795
9796         /* Clear "unprepared" bit */
9797         REG_WR(bp, MISC_REG_UNPREPARED, 0);
9798         barrier();
9799
9800         /* Make sure all is written to the chip before the reset */
9801         mmiowb();
9802
9803         /* Wait for 1ms to empty GLUE and PCI-E core queues,
9804          * PSWHST, GRC and PSWRD Tetris buffer.
9805          */
9806         usleep_range(1000, 2000);
9807
9808         /* Prepare to chip reset: */
9809         /* MCP */
9810         if (global)
9811                 bnx2x_reset_mcp_prep(bp, &val);
9812
9813         /* PXP */
9814         bnx2x_pxp_prep(bp);
9815         barrier();
9816
9817         /* reset the chip */
9818         bnx2x_process_kill_chip_reset(bp, global);
9819         barrier();
9820
9821         /* clear errors in PGB */
9822         if (!CHIP_IS_E1x(bp))
9823                 REG_WR(bp, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
9824
9825         /* Recover after reset: */
9826         /* MCP */
9827         if (global && bnx2x_reset_mcp_comp(bp, val))
9828                 return -EAGAIN;
9829
9830         /* TBD: Add resetting the NO_MCP mode DB here */
9831
9832         /* Open the gates #2, #3 and #4 */
9833         bnx2x_set_234_gates(bp, false);
9834
9835         /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
9836          * reset state, re-enable attentions. */
9837
9838         return 0;
9839 }
9840
9841 static int bnx2x_leader_reset(struct bnx2x *bp)
9842 {
9843         int rc = 0;
9844         bool global = bnx2x_reset_is_global(bp);
9845         u32 load_code;
9846
9847         /* if not going to reset MCP - load "fake" driver to reset HW while
9848          * driver is owner of the HW
9849          */
9850         if (!global && !BP_NOMCP(bp)) {
9851                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
9852                                              DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
9853                 if (!load_code) {
9854                         BNX2X_ERR("MCP response failure, aborting\n");
9855                         rc = -EAGAIN;
9856                         goto exit_leader_reset;
9857                 }
9858                 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
9859                     (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
9860                         BNX2X_ERR("MCP unexpected resp, aborting\n");
9861                         rc = -EAGAIN;
9862                         goto exit_leader_reset2;
9863                 }
9864                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9865                 if (!load_code) {
9866                         BNX2X_ERR("MCP response failure, aborting\n");
9867                         rc = -EAGAIN;
9868                         goto exit_leader_reset2;
9869                 }
9870         }
9871
9872         /* Try to recover after the failure */
9873         if (bnx2x_process_kill(bp, global)) {
9874                 BNX2X_ERR("Something bad had happen on engine %d! Aii!\n",
9875                           BP_PATH(bp));
9876                 rc = -EAGAIN;
9877                 goto exit_leader_reset2;
9878         }
9879
9880         /*
9881          * Clear RESET_IN_PROGRES and RESET_GLOBAL bits and update the driver
9882          * state.
9883          */
9884         bnx2x_set_reset_done(bp);
9885         if (global)
9886                 bnx2x_clear_reset_global(bp);
9887
9888 exit_leader_reset2:
9889         /* unload "fake driver" if it was loaded */
9890         if (!global && !BP_NOMCP(bp)) {
9891                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
9892                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
9893         }
9894 exit_leader_reset:
9895         bp->is_leader = 0;
9896         bnx2x_release_leader_lock(bp);
9897         smp_mb();
9898         return rc;
9899 }
9900
9901 static void bnx2x_recovery_failed(struct bnx2x *bp)
9902 {
9903         netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
9904
9905         /* Disconnect this device */
9906         netif_device_detach(bp->dev);
9907
9908         /*
9909          * Block ifup for all function on this engine until "process kill"
9910          * or power cycle.
9911          */
9912         bnx2x_set_reset_in_progress(bp);
9913
9914         /* Shut down the power */
9915         bnx2x_set_power_state(bp, PCI_D3hot);
9916
9917         bp->recovery_state = BNX2X_RECOVERY_FAILED;
9918
9919         smp_mb();
9920 }
9921
9922 /*
9923  * Assumption: runs under rtnl lock. This together with the fact
9924  * that it's called only from bnx2x_sp_rtnl() ensure that it
9925  * will never be called when netif_running(bp->dev) is false.
9926  */
9927 static void bnx2x_parity_recover(struct bnx2x *bp)
9928 {
9929         bool global = false;
9930         u32 error_recovered, error_unrecovered;
9931         bool is_parity;
9932
9933         DP(NETIF_MSG_HW, "Handling parity\n");
9934         while (1) {
9935                 switch (bp->recovery_state) {
9936                 case BNX2X_RECOVERY_INIT:
9937                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
9938                         is_parity = bnx2x_chk_parity_attn(bp, &global, false);
9939                         WARN_ON(!is_parity);
9940
9941                         /* Try to get a LEADER_LOCK HW lock */
9942                         if (bnx2x_trylock_leader_lock(bp)) {
9943                                 bnx2x_set_reset_in_progress(bp);
9944                                 /*
9945                                  * Check if there is a global attention and if
9946                                  * there was a global attention, set the global
9947                                  * reset bit.
9948                                  */
9949
9950                                 if (global)
9951                                         bnx2x_set_reset_global(bp);
9952
9953                                 bp->is_leader = 1;
9954                         }
9955
9956                         /* Stop the driver */
9957                         /* If interface has been removed - break */
9958                         if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false))
9959                                 return;
9960
9961                         bp->recovery_state = BNX2X_RECOVERY_WAIT;
9962
9963                         /* Ensure "is_leader", MCP command sequence and
9964                          * "recovery_state" update values are seen on other
9965                          * CPUs.
9966                          */
9967                         smp_mb();
9968                         break;
9969
9970                 case BNX2X_RECOVERY_WAIT:
9971                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
9972                         if (bp->is_leader) {
9973                                 int other_engine = BP_PATH(bp) ? 0 : 1;
9974                                 bool other_load_status =
9975                                         bnx2x_get_load_status(bp, other_engine);
9976                                 bool load_status =
9977                                         bnx2x_get_load_status(bp, BP_PATH(bp));
9978                                 global = bnx2x_reset_is_global(bp);
9979
9980                                 /*
9981                                  * In case of a parity in a global block, let
9982                                  * the first leader that performs a
9983                                  * leader_reset() reset the global blocks in
9984                                  * order to clear global attentions. Otherwise
9985                                  * the gates will remain closed for that
9986                                  * engine.
9987                                  */
9988                                 if (load_status ||
9989                                     (global && other_load_status)) {
9990                                         /* Wait until all other functions get
9991                                          * down.
9992                                          */
9993                                         schedule_delayed_work(&bp->sp_rtnl_task,
9994                                                                 HZ/10);
9995                                         return;
9996                                 } else {
9997                                         /* If all other functions got down -
9998                                          * try to bring the chip back to
9999                                          * normal. In any case it's an exit
10000                                          * point for a leader.
10001                                          */
10002                                         if (bnx2x_leader_reset(bp)) {
10003                                                 bnx2x_recovery_failed(bp);
10004                                                 return;
10005                                         }
10006
10007                                         /* If we are here, means that the
10008                                          * leader has succeeded and doesn't
10009                                          * want to be a leader any more. Try
10010                                          * to continue as a none-leader.
10011                                          */
10012                                         break;
10013                                 }
10014                         } else { /* non-leader */
10015                                 if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) {
10016                                         /* Try to get a LEADER_LOCK HW lock as
10017                                          * long as a former leader may have
10018                                          * been unloaded by the user or
10019                                          * released a leadership by another
10020                                          * reason.
10021                                          */
10022                                         if (bnx2x_trylock_leader_lock(bp)) {
10023                                                 /* I'm a leader now! Restart a
10024                                                  * switch case.
10025                                                  */
10026                                                 bp->is_leader = 1;
10027                                                 break;
10028                                         }
10029
10030                                         schedule_delayed_work(&bp->sp_rtnl_task,
10031                                                                 HZ/10);
10032                                         return;
10033
10034                                 } else {
10035                                         /*
10036                                          * If there was a global attention, wait
10037                                          * for it to be cleared.
10038                                          */
10039                                         if (bnx2x_reset_is_global(bp)) {
10040                                                 schedule_delayed_work(
10041                                                         &bp->sp_rtnl_task,
10042                                                         HZ/10);
10043                                                 return;
10044                                         }
10045
10046                                         error_recovered =
10047                                           bp->eth_stats.recoverable_error;
10048                                         error_unrecovered =
10049                                           bp->eth_stats.unrecoverable_error;
10050                                         bp->recovery_state =
10051                                                 BNX2X_RECOVERY_NIC_LOADING;
10052                                         if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
10053                                                 error_unrecovered++;
10054                                                 netdev_err(bp->dev,
10055                                                            "Recovery failed. Power cycle needed\n");
10056                                                 /* Disconnect this device */
10057                                                 netif_device_detach(bp->dev);
10058                                                 /* Shut down the power */
10059                                                 bnx2x_set_power_state(
10060                                                         bp, PCI_D3hot);
10061                                                 smp_mb();
10062                                         } else {
10063                                                 bp->recovery_state =
10064                                                         BNX2X_RECOVERY_DONE;
10065                                                 error_recovered++;
10066                                                 smp_mb();
10067                                         }
10068                                         bp->eth_stats.recoverable_error =
10069                                                 error_recovered;
10070                                         bp->eth_stats.unrecoverable_error =
10071                                                 error_unrecovered;
10072
10073                                         return;
10074                                 }
10075                         }
10076                 default:
10077                         return;
10078                 }
10079         }
10080 }
10081
10082 #ifdef CONFIG_BNX2X_VXLAN
10083 static int bnx2x_vxlan_port_update(struct bnx2x *bp, u16 port)
10084 {
10085         struct bnx2x_func_switch_update_params *switch_update_params;
10086         struct bnx2x_func_state_params func_params = {NULL};
10087         int rc;
10088
10089         switch_update_params = &func_params.params.switch_update;
10090
10091         /* Prepare parameters for function state transitions */
10092         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
10093         __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
10094
10095         func_params.f_obj = &bp->func_obj;
10096         func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
10097
10098         /* Function parameters */
10099         __set_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
10100                   &switch_update_params->changes);
10101         switch_update_params->vxlan_dst_port = port;
10102         rc = bnx2x_func_state_change(bp, &func_params);
10103         if (rc)
10104                 BNX2X_ERR("failed to change vxlan dst port to %d (rc = 0x%x)\n",
10105                           port, rc);
10106         return rc;
10107 }
10108
10109 static void __bnx2x_add_vxlan_port(struct bnx2x *bp, u16 port)
10110 {
10111         if (!netif_running(bp->dev))
10112                 return;
10113
10114         if (bp->vxlan_dst_port_count && bp->vxlan_dst_port == port) {
10115                 bp->vxlan_dst_port_count++;
10116                 return;
10117         }
10118
10119         if (bp->vxlan_dst_port_count || !IS_PF(bp)) {
10120                 DP(BNX2X_MSG_SP, "Vxlan destination port limit reached\n");
10121                 return;
10122         }
10123
10124         bp->vxlan_dst_port = port;
10125         bp->vxlan_dst_port_count = 1;
10126         bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_ADD_VXLAN_PORT, 0);
10127 }
10128
10129 static void bnx2x_add_vxlan_port(struct net_device *netdev,
10130                                  sa_family_t sa_family, __be16 port)
10131 {
10132         struct bnx2x *bp = netdev_priv(netdev);
10133         u16 t_port = ntohs(port);
10134
10135         __bnx2x_add_vxlan_port(bp, t_port);
10136 }
10137
10138 static void __bnx2x_del_vxlan_port(struct bnx2x *bp, u16 port)
10139 {
10140         if (!bp->vxlan_dst_port_count || bp->vxlan_dst_port != port ||
10141             !IS_PF(bp)) {
10142                 DP(BNX2X_MSG_SP, "Invalid vxlan port\n");
10143                 return;
10144         }
10145         bp->vxlan_dst_port_count--;
10146         if (bp->vxlan_dst_port_count)
10147                 return;
10148
10149         if (netif_running(bp->dev)) {
10150                 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_DEL_VXLAN_PORT, 0);
10151         } else {
10152                 bp->vxlan_dst_port = 0;
10153                 netdev_info(bp->dev, "Deleted vxlan dest port %d", port);
10154         }
10155 }
10156
10157 static void bnx2x_del_vxlan_port(struct net_device *netdev,
10158                                  sa_family_t sa_family, __be16 port)
10159 {
10160         struct bnx2x *bp = netdev_priv(netdev);
10161         u16 t_port = ntohs(port);
10162
10163         __bnx2x_del_vxlan_port(bp, t_port);
10164 }
10165 #endif
10166
10167 static int bnx2x_close(struct net_device *dev);
10168
10169 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
10170  * scheduled on a general queue in order to prevent a dead lock.
10171  */
10172 static void bnx2x_sp_rtnl_task(struct work_struct *work)
10173 {
10174         struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
10175 #ifdef CONFIG_BNX2X_VXLAN
10176         u16 port;
10177 #endif
10178
10179         rtnl_lock();
10180
10181         if (!netif_running(bp->dev)) {
10182                 rtnl_unlock();
10183                 return;
10184         }
10185
10186         if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
10187 #ifdef BNX2X_STOP_ON_ERROR
10188                 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
10189                           "you will need to reboot when done\n");
10190                 goto sp_rtnl_not_reset;
10191 #endif
10192                 /*
10193                  * Clear all pending SP commands as we are going to reset the
10194                  * function anyway.
10195                  */
10196                 bp->sp_rtnl_state = 0;
10197                 smp_mb();
10198
10199                 bnx2x_parity_recover(bp);
10200
10201                 rtnl_unlock();
10202                 return;
10203         }
10204
10205         if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
10206 #ifdef BNX2X_STOP_ON_ERROR
10207                 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
10208                           "you will need to reboot when done\n");
10209                 goto sp_rtnl_not_reset;
10210 #endif
10211
10212                 /*
10213                  * Clear all pending SP commands as we are going to reset the
10214                  * function anyway.
10215                  */
10216                 bp->sp_rtnl_state = 0;
10217                 smp_mb();
10218
10219                 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
10220                 bnx2x_nic_load(bp, LOAD_NORMAL);
10221
10222                 rtnl_unlock();
10223                 return;
10224         }
10225 #ifdef BNX2X_STOP_ON_ERROR
10226 sp_rtnl_not_reset:
10227 #endif
10228         if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
10229                 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
10230         if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state))
10231                 bnx2x_after_function_update(bp);
10232         /*
10233          * in case of fan failure we need to reset id if the "stop on error"
10234          * debug flag is set, since we trying to prevent permanent overheating
10235          * damage
10236          */
10237         if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) {
10238                 DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n");
10239                 netif_device_detach(bp->dev);
10240                 bnx2x_close(bp->dev);
10241                 rtnl_unlock();
10242                 return;
10243         }
10244
10245         if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) {
10246                 DP(BNX2X_MSG_SP,
10247                    "sending set mcast vf pf channel message from rtnl sp-task\n");
10248                 bnx2x_vfpf_set_mcast(bp->dev);
10249         }
10250         if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
10251                                &bp->sp_rtnl_state)){
10252                 if (!test_bit(__LINK_STATE_NOCARRIER, &bp->dev->state)) {
10253                         bnx2x_tx_disable(bp);
10254                         BNX2X_ERR("PF indicated channel is not servicable anymore. This means this VF device is no longer operational\n");
10255                 }
10256         }
10257
10258         if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) {
10259                 DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n");
10260                 bnx2x_set_rx_mode_inner(bp);
10261         }
10262
10263         if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
10264                                &bp->sp_rtnl_state))
10265                 bnx2x_pf_set_vfs_vlan(bp);
10266
10267         if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) {
10268                 bnx2x_dcbx_stop_hw_tx(bp);
10269                 bnx2x_dcbx_resume_hw_tx(bp);
10270         }
10271
10272         if (test_and_clear_bit(BNX2X_SP_RTNL_GET_DRV_VERSION,
10273                                &bp->sp_rtnl_state))
10274                 bnx2x_update_mng_version(bp);
10275
10276 #ifdef CONFIG_BNX2X_VXLAN
10277         port = bp->vxlan_dst_port;
10278         if (test_and_clear_bit(BNX2X_SP_RTNL_ADD_VXLAN_PORT,
10279                                &bp->sp_rtnl_state)) {
10280                 if (!bnx2x_vxlan_port_update(bp, port))
10281                         netdev_info(bp->dev, "Added vxlan dest port %d", port);
10282                 else
10283                         bp->vxlan_dst_port = 0;
10284         }
10285
10286         if (test_and_clear_bit(BNX2X_SP_RTNL_DEL_VXLAN_PORT,
10287                                &bp->sp_rtnl_state)) {
10288                 if (!bnx2x_vxlan_port_update(bp, 0)) {
10289                         netdev_info(bp->dev,
10290                                     "Deleted vxlan dest port %d", port);
10291                         bp->vxlan_dst_port = 0;
10292                         vxlan_get_rx_port(bp->dev);
10293                 }
10294         }
10295 #endif
10296
10297         /* work which needs rtnl lock not-taken (as it takes the lock itself and
10298          * can be called from other contexts as well)
10299          */
10300         rtnl_unlock();
10301
10302         /* enable SR-IOV if applicable */
10303         if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
10304                                                &bp->sp_rtnl_state)) {
10305                 bnx2x_disable_sriov(bp);
10306                 bnx2x_enable_sriov(bp);
10307         }
10308 }
10309
10310 static void bnx2x_period_task(struct work_struct *work)
10311 {
10312         struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work);
10313
10314         if (!netif_running(bp->dev))
10315                 goto period_task_exit;
10316
10317         if (CHIP_REV_IS_SLOW(bp)) {
10318                 BNX2X_ERR("period task called on emulation, ignoring\n");
10319                 goto period_task_exit;
10320         }
10321
10322         bnx2x_acquire_phy_lock(bp);
10323         /*
10324          * The barrier is needed to ensure the ordering between the writing to
10325          * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and
10326          * the reading here.
10327          */
10328         smp_mb();
10329         if (bp->port.pmf) {
10330                 bnx2x_period_func(&bp->link_params, &bp->link_vars);
10331
10332                 /* Re-queue task in 1 sec */
10333                 queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ);
10334         }
10335
10336         bnx2x_release_phy_lock(bp);
10337 period_task_exit:
10338         return;
10339 }
10340
10341 /*
10342  * Init service functions
10343  */
10344
10345 static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
10346 {
10347         u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
10348         u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
10349         return base + (BP_ABS_FUNC(bp)) * stride;
10350 }
10351
10352 static bool bnx2x_prev_unload_close_umac(struct bnx2x *bp,
10353                                          u8 port, u32 reset_reg,
10354                                          struct bnx2x_mac_vals *vals)
10355 {
10356         u32 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
10357         u32 base_addr;
10358
10359         if (!(mask & reset_reg))
10360                 return false;
10361
10362         BNX2X_DEV_INFO("Disable umac Rx %02x\n", port);
10363         base_addr = port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
10364         vals->umac_addr[port] = base_addr + UMAC_REG_COMMAND_CONFIG;
10365         vals->umac_val[port] = REG_RD(bp, vals->umac_addr[port]);
10366         REG_WR(bp, vals->umac_addr[port], 0);
10367
10368         return true;
10369 }
10370
10371 static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
10372                                         struct bnx2x_mac_vals *vals)
10373 {
10374         u32 val, base_addr, offset, mask, reset_reg;
10375         bool mac_stopped = false;
10376         u8 port = BP_PORT(bp);
10377
10378         /* reset addresses as they also mark which values were changed */
10379         memset(vals, 0, sizeof(*vals));
10380
10381         reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
10382
10383         if (!CHIP_IS_E3(bp)) {
10384                 val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
10385                 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
10386                 if ((mask & reset_reg) && val) {
10387                         u32 wb_data[2];
10388                         BNX2X_DEV_INFO("Disable bmac Rx\n");
10389                         base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM
10390                                                 : NIG_REG_INGRESS_BMAC0_MEM;
10391                         offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL
10392                                                 : BIGMAC_REGISTER_BMAC_CONTROL;
10393
10394                         /*
10395                          * use rd/wr since we cannot use dmae. This is safe
10396                          * since MCP won't access the bus due to the request
10397                          * to unload, and no function on the path can be
10398                          * loaded at this time.
10399                          */
10400                         wb_data[0] = REG_RD(bp, base_addr + offset);
10401                         wb_data[1] = REG_RD(bp, base_addr + offset + 0x4);
10402                         vals->bmac_addr = base_addr + offset;
10403                         vals->bmac_val[0] = wb_data[0];
10404                         vals->bmac_val[1] = wb_data[1];
10405                         wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
10406                         REG_WR(bp, vals->bmac_addr, wb_data[0]);
10407                         REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]);
10408                 }
10409                 BNX2X_DEV_INFO("Disable emac Rx\n");
10410                 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4;
10411                 vals->emac_val = REG_RD(bp, vals->emac_addr);
10412                 REG_WR(bp, vals->emac_addr, 0);
10413                 mac_stopped = true;
10414         } else {
10415                 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
10416                         BNX2X_DEV_INFO("Disable xmac Rx\n");
10417                         base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
10418                         val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI);
10419                         REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
10420                                val & ~(1 << 1));
10421                         REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
10422                                val | (1 << 1));
10423                         vals->xmac_addr = base_addr + XMAC_REG_CTRL;
10424                         vals->xmac_val = REG_RD(bp, vals->xmac_addr);
10425                         REG_WR(bp, vals->xmac_addr, 0);
10426                         mac_stopped = true;
10427                 }
10428
10429                 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 0,
10430                                                             reset_reg, vals);
10431                 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 1,
10432                                                             reset_reg, vals);
10433         }
10434
10435         if (mac_stopped)
10436                 msleep(20);
10437 }
10438
10439 #define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
10440 #define BNX2X_PREV_UNDI_PROD_ADDR_H(f) (BAR_TSTRORM_INTMEM + \
10441                                         0x1848 + ((f) << 4))
10442 #define BNX2X_PREV_UNDI_RCQ(val)        ((val) & 0xffff)
10443 #define BNX2X_PREV_UNDI_BD(val)         ((val) >> 16 & 0xffff)
10444 #define BNX2X_PREV_UNDI_PROD(rcq, bd)   ((bd) << 16 | (rcq))
10445
10446 #define BCM_5710_UNDI_FW_MF_MAJOR       (0x07)
10447 #define BCM_5710_UNDI_FW_MF_MINOR       (0x08)
10448 #define BCM_5710_UNDI_FW_MF_VERS        (0x05)
10449
10450 static bool bnx2x_prev_is_after_undi(struct bnx2x *bp)
10451 {
10452         /* UNDI marks its presence in DORQ -
10453          * it initializes CID offset for normal bell to 0x7
10454          */
10455         if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
10456             MISC_REGISTERS_RESET_REG_1_RST_DORQ))
10457                 return false;
10458
10459         if (REG_RD(bp, DORQ_REG_NORM_CID_OFST) == 0x7) {
10460                 BNX2X_DEV_INFO("UNDI previously loaded\n");
10461                 return true;
10462         }
10463
10464         return false;
10465 }
10466
10467 static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 inc)
10468 {
10469         u16 rcq, bd;
10470         u32 addr, tmp_reg;
10471
10472         if (BP_FUNC(bp) < 2)
10473                 addr = BNX2X_PREV_UNDI_PROD_ADDR(BP_PORT(bp));
10474         else
10475                 addr = BNX2X_PREV_UNDI_PROD_ADDR_H(BP_FUNC(bp) - 2);
10476
10477         tmp_reg = REG_RD(bp, addr);
10478         rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc;
10479         bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc;
10480
10481         tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd);
10482         REG_WR(bp, addr, tmp_reg);
10483
10484         BNX2X_DEV_INFO("UNDI producer [%d/%d][%08x] rings bd -> 0x%04x, rcq -> 0x%04x\n",
10485                        BP_PORT(bp), BP_FUNC(bp), addr, bd, rcq);
10486 }
10487
10488 static int bnx2x_prev_mcp_done(struct bnx2x *bp)
10489 {
10490         u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE,
10491                                   DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
10492         if (!rc) {
10493                 BNX2X_ERR("MCP response failure, aborting\n");
10494                 return -EBUSY;
10495         }
10496
10497         return 0;
10498 }
10499
10500 static struct bnx2x_prev_path_list *
10501                 bnx2x_prev_path_get_entry(struct bnx2x *bp)
10502 {
10503         struct bnx2x_prev_path_list *tmp_list;
10504
10505         list_for_each_entry(tmp_list, &bnx2x_prev_list, list)
10506                 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
10507                     bp->pdev->bus->number == tmp_list->bus &&
10508                     BP_PATH(bp) == tmp_list->path)
10509                         return tmp_list;
10510
10511         return NULL;
10512 }
10513
10514 static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
10515 {
10516         struct bnx2x_prev_path_list *tmp_list;
10517         int rc;
10518
10519         rc = down_interruptible(&bnx2x_prev_sem);
10520         if (rc) {
10521                 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10522                 return rc;
10523         }
10524
10525         tmp_list = bnx2x_prev_path_get_entry(bp);
10526         if (tmp_list) {
10527                 tmp_list->aer = 1;
10528                 rc = 0;
10529         } else {
10530                 BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n",
10531                           BP_PATH(bp));
10532         }
10533
10534         up(&bnx2x_prev_sem);
10535
10536         return rc;
10537 }
10538
10539 static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
10540 {
10541         struct bnx2x_prev_path_list *tmp_list;
10542         bool rc = false;
10543
10544         if (down_trylock(&bnx2x_prev_sem))
10545                 return false;
10546
10547         tmp_list = bnx2x_prev_path_get_entry(bp);
10548         if (tmp_list) {
10549                 if (tmp_list->aer) {
10550                         DP(NETIF_MSG_HW, "Path %d was marked by AER\n",
10551                            BP_PATH(bp));
10552                 } else {
10553                         rc = true;
10554                         BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
10555                                        BP_PATH(bp));
10556                 }
10557         }
10558
10559         up(&bnx2x_prev_sem);
10560
10561         return rc;
10562 }
10563
10564 bool bnx2x_port_after_undi(struct bnx2x *bp)
10565 {
10566         struct bnx2x_prev_path_list *entry;
10567         bool val;
10568
10569         down(&bnx2x_prev_sem);
10570
10571         entry = bnx2x_prev_path_get_entry(bp);
10572         val = !!(entry && (entry->undi & (1 << BP_PORT(bp))));
10573
10574         up(&bnx2x_prev_sem);
10575
10576         return val;
10577 }
10578
10579 static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
10580 {
10581         struct bnx2x_prev_path_list *tmp_list;
10582         int rc;
10583
10584         rc = down_interruptible(&bnx2x_prev_sem);
10585         if (rc) {
10586                 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10587                 return rc;
10588         }
10589
10590         /* Check whether the entry for this path already exists */
10591         tmp_list = bnx2x_prev_path_get_entry(bp);
10592         if (tmp_list) {
10593                 if (!tmp_list->aer) {
10594                         BNX2X_ERR("Re-Marking the path.\n");
10595                 } else {
10596                         DP(NETIF_MSG_HW, "Removing AER indication from path %d\n",
10597                            BP_PATH(bp));
10598                         tmp_list->aer = 0;
10599                 }
10600                 up(&bnx2x_prev_sem);
10601                 return 0;
10602         }
10603         up(&bnx2x_prev_sem);
10604
10605         /* Create an entry for this path and add it */
10606         tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
10607         if (!tmp_list) {
10608                 BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
10609                 return -ENOMEM;
10610         }
10611
10612         tmp_list->bus = bp->pdev->bus->number;
10613         tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
10614         tmp_list->path = BP_PATH(bp);
10615         tmp_list->aer = 0;
10616         tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
10617
10618         rc = down_interruptible(&bnx2x_prev_sem);
10619         if (rc) {
10620                 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10621                 kfree(tmp_list);
10622         } else {
10623                 DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n",
10624                    BP_PATH(bp));
10625                 list_add(&tmp_list->list, &bnx2x_prev_list);
10626                 up(&bnx2x_prev_sem);
10627         }
10628
10629         return rc;
10630 }
10631
10632 static int bnx2x_do_flr(struct bnx2x *bp)
10633 {
10634         struct pci_dev *dev = bp->pdev;
10635
10636         if (CHIP_IS_E1x(bp)) {
10637                 BNX2X_DEV_INFO("FLR not supported in E1/E1H\n");
10638                 return -EINVAL;
10639         }
10640
10641         /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
10642         if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
10643                 BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
10644                           bp->common.bc_ver);
10645                 return -EINVAL;
10646         }
10647
10648         if (!pci_wait_for_pending_transaction(dev))
10649                 dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
10650
10651         BNX2X_DEV_INFO("Initiating FLR\n");
10652         bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
10653
10654         return 0;
10655 }
10656
10657 static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
10658 {
10659         int rc;
10660
10661         BNX2X_DEV_INFO("Uncommon unload Flow\n");
10662
10663         /* Test if previous unload process was already finished for this path */
10664         if (bnx2x_prev_is_path_marked(bp))
10665                 return bnx2x_prev_mcp_done(bp);
10666
10667         BNX2X_DEV_INFO("Path is unmarked\n");
10668
10669         /* Cannot proceed with FLR if UNDI is loaded, since FW does not match */
10670         if (bnx2x_prev_is_after_undi(bp))
10671                 goto out;
10672
10673         /* If function has FLR capabilities, and existing FW version matches
10674          * the one required, then FLR will be sufficient to clean any residue
10675          * left by previous driver
10676          */
10677         rc = bnx2x_compare_fw_ver(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION, false);
10678
10679         if (!rc) {
10680                 /* fw version is good */
10681                 BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n");
10682                 rc = bnx2x_do_flr(bp);
10683         }
10684
10685         if (!rc) {
10686                 /* FLR was performed */
10687                 BNX2X_DEV_INFO("FLR successful\n");
10688                 return 0;
10689         }
10690
10691         BNX2X_DEV_INFO("Could not FLR\n");
10692
10693 out:
10694         /* Close the MCP request, return failure*/
10695         rc = bnx2x_prev_mcp_done(bp);
10696         if (!rc)
10697                 rc = BNX2X_PREV_WAIT_NEEDED;
10698
10699         return rc;
10700 }
10701
10702 static int bnx2x_prev_unload_common(struct bnx2x *bp)
10703 {
10704         u32 reset_reg, tmp_reg = 0, rc;
10705         bool prev_undi = false;
10706         struct bnx2x_mac_vals mac_vals;
10707
10708         /* It is possible a previous function received 'common' answer,
10709          * but hasn't loaded yet, therefore creating a scenario of
10710          * multiple functions receiving 'common' on the same path.
10711          */
10712         BNX2X_DEV_INFO("Common unload Flow\n");
10713
10714         memset(&mac_vals, 0, sizeof(mac_vals));
10715
10716         if (bnx2x_prev_is_path_marked(bp))
10717                 return bnx2x_prev_mcp_done(bp);
10718
10719         reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
10720
10721         /* Reset should be performed after BRB is emptied */
10722         if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
10723                 u32 timer_count = 1000;
10724
10725                 /* Close the MAC Rx to prevent BRB from filling up */
10726                 bnx2x_prev_unload_close_mac(bp, &mac_vals);
10727
10728                 /* close LLH filters for both ports towards the BRB */
10729                 bnx2x_set_rx_filter(&bp->link_params, 0);
10730                 bp->link_params.port ^= 1;
10731                 bnx2x_set_rx_filter(&bp->link_params, 0);
10732                 bp->link_params.port ^= 1;
10733
10734                 /* Check if the UNDI driver was previously loaded */
10735                 if (bnx2x_prev_is_after_undi(bp)) {
10736                         prev_undi = true;
10737                         /* clear the UNDI indication */
10738                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
10739                         /* clear possible idle check errors */
10740                         REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
10741                 }
10742                 if (!CHIP_IS_E1x(bp))
10743                         /* block FW from writing to host */
10744                         REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
10745
10746                 /* wait until BRB is empty */
10747                 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
10748                 while (timer_count) {
10749                         u32 prev_brb = tmp_reg;
10750
10751                         tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
10752                         if (!tmp_reg)
10753                                 break;
10754
10755                         BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg);
10756
10757                         /* reset timer as long as BRB actually gets emptied */
10758                         if (prev_brb > tmp_reg)
10759                                 timer_count = 1000;
10760                         else
10761                                 timer_count--;
10762
10763                         /* If UNDI resides in memory, manually increment it */
10764                         if (prev_undi)
10765                                 bnx2x_prev_unload_undi_inc(bp, 1);
10766
10767                         udelay(10);
10768                 }
10769
10770                 if (!timer_count)
10771                         BNX2X_ERR("Failed to empty BRB, hope for the best\n");
10772         }
10773
10774         /* No packets are in the pipeline, path is ready for reset */
10775         bnx2x_reset_common(bp);
10776
10777         if (mac_vals.xmac_addr)
10778                 REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
10779         if (mac_vals.umac_addr[0])
10780                 REG_WR(bp, mac_vals.umac_addr[0], mac_vals.umac_val[0]);
10781         if (mac_vals.umac_addr[1])
10782                 REG_WR(bp, mac_vals.umac_addr[1], mac_vals.umac_val[1]);
10783         if (mac_vals.emac_addr)
10784                 REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
10785         if (mac_vals.bmac_addr) {
10786                 REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
10787                 REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
10788         }
10789
10790         rc = bnx2x_prev_mark_path(bp, prev_undi);
10791         if (rc) {
10792                 bnx2x_prev_mcp_done(bp);
10793                 return rc;
10794         }
10795
10796         return bnx2x_prev_mcp_done(bp);
10797 }
10798
10799 static int bnx2x_prev_unload(struct bnx2x *bp)
10800 {
10801         int time_counter = 10;
10802         u32 rc, fw, hw_lock_reg, hw_lock_val;
10803         BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
10804
10805         /* clear hw from errors which may have resulted from an interrupted
10806          * dmae transaction.
10807          */
10808         bnx2x_clean_pglue_errors(bp);
10809
10810         /* Release previously held locks */
10811         hw_lock_reg = (BP_FUNC(bp) <= 5) ?
10812                       (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
10813                       (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
10814
10815         hw_lock_val = REG_RD(bp, hw_lock_reg);
10816         if (hw_lock_val) {
10817                 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
10818                         BNX2X_DEV_INFO("Release Previously held NVRAM lock\n");
10819                         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10820                                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp)));
10821                 }
10822
10823                 BNX2X_DEV_INFO("Release Previously held hw lock\n");
10824                 REG_WR(bp, hw_lock_reg, 0xffffffff);
10825         } else
10826                 BNX2X_DEV_INFO("No need to release hw/nvram locks\n");
10827
10828         if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) {
10829                 BNX2X_DEV_INFO("Release previously held alr\n");
10830                 bnx2x_release_alr(bp);
10831         }
10832
10833         do {
10834                 int aer = 0;
10835                 /* Lock MCP using an unload request */
10836                 fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
10837                 if (!fw) {
10838                         BNX2X_ERR("MCP response failure, aborting\n");
10839                         rc = -EBUSY;
10840                         break;
10841                 }
10842
10843                 rc = down_interruptible(&bnx2x_prev_sem);
10844                 if (rc) {
10845                         BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n",
10846                                   rc);
10847                 } else {
10848                         /* If Path is marked by EEH, ignore unload status */
10849                         aer = !!(bnx2x_prev_path_get_entry(bp) &&
10850                                  bnx2x_prev_path_get_entry(bp)->aer);
10851                         up(&bnx2x_prev_sem);
10852                 }
10853
10854                 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) {
10855                         rc = bnx2x_prev_unload_common(bp);
10856                         break;
10857                 }
10858
10859                 /* non-common reply from MCP might require looping */
10860                 rc = bnx2x_prev_unload_uncommon(bp);
10861                 if (rc != BNX2X_PREV_WAIT_NEEDED)
10862                         break;
10863
10864                 msleep(20);
10865         } while (--time_counter);
10866
10867         if (!time_counter || rc) {
10868                 BNX2X_DEV_INFO("Unloading previous driver did not occur, Possibly due to MF UNDI\n");
10869                 rc = -EPROBE_DEFER;
10870         }
10871
10872         /* Mark function if its port was used to boot from SAN */
10873         if (bnx2x_port_after_undi(bp))
10874                 bp->link_params.feature_config_flags |=
10875                         FEATURE_CONFIG_BOOT_FROM_SAN;
10876
10877         BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc);
10878
10879         return rc;
10880 }
10881
10882 static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
10883 {
10884         u32 val, val2, val3, val4, id, boot_mode;
10885         u16 pmc;
10886
10887         /* Get the chip revision id and number. */
10888         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
10889         val = REG_RD(bp, MISC_REG_CHIP_NUM);
10890         id = ((val & 0xffff) << 16);
10891         val = REG_RD(bp, MISC_REG_CHIP_REV);
10892         id |= ((val & 0xf) << 12);
10893
10894         /* Metal is read from PCI regs, but we can't access >=0x400 from
10895          * the configuration space (so we need to reg_rd)
10896          */
10897         val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3);
10898         id |= (((val >> 24) & 0xf) << 4);
10899         val = REG_RD(bp, MISC_REG_BOND_ID);
10900         id |= (val & 0xf);
10901         bp->common.chip_id = id;
10902
10903         /* force 57811 according to MISC register */
10904         if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
10905                 if (CHIP_IS_57810(bp))
10906                         bp->common.chip_id = (CHIP_NUM_57811 << 16) |
10907                                 (bp->common.chip_id & 0x0000FFFF);
10908                 else if (CHIP_IS_57810_MF(bp))
10909                         bp->common.chip_id = (CHIP_NUM_57811_MF << 16) |
10910                                 (bp->common.chip_id & 0x0000FFFF);
10911                 bp->common.chip_id |= 0x1;
10912         }
10913
10914         /* Set doorbell size */
10915         bp->db_size = (1 << BNX2X_DB_SHIFT);
10916
10917         if (!CHIP_IS_E1x(bp)) {
10918                 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
10919                 if ((val & 1) == 0)
10920                         val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
10921                 else
10922                         val = (val >> 1) & 1;
10923                 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
10924                                                        "2_PORT_MODE");
10925                 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
10926                                                  CHIP_2_PORT_MODE;
10927
10928                 if (CHIP_MODE_IS_4_PORT(bp))
10929                         bp->pfid = (bp->pf_num >> 1);   /* 0..3 */
10930                 else
10931                         bp->pfid = (bp->pf_num & 0x6);  /* 0, 2, 4, 6 */
10932         } else {
10933                 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
10934                 bp->pfid = bp->pf_num;                  /* 0..7 */
10935         }
10936
10937         BNX2X_DEV_INFO("pf_id: %x", bp->pfid);
10938
10939         bp->link_params.chip_id = bp->common.chip_id;
10940         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
10941
10942         val = (REG_RD(bp, 0x2874) & 0x55);
10943         if ((bp->common.chip_id & 0x1) ||
10944             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
10945                 bp->flags |= ONE_PORT_FLAG;
10946                 BNX2X_DEV_INFO("single port device\n");
10947         }
10948
10949         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
10950         bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE <<
10951                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
10952         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
10953                        bp->common.flash_size, bp->common.flash_size);
10954
10955         bnx2x_init_shmem(bp);
10956
10957         bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
10958                                         MISC_REG_GENERIC_CR_1 :
10959                                         MISC_REG_GENERIC_CR_0));
10960
10961         bp->link_params.shmem_base = bp->common.shmem_base;
10962         bp->link_params.shmem2_base = bp->common.shmem2_base;
10963         if (SHMEM2_RD(bp, size) >
10964             (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
10965                 bp->link_params.lfa_base =
10966                 REG_RD(bp, bp->common.shmem2_base +
10967                        (u32)offsetof(struct shmem2_region,
10968                                      lfa_host_addr[BP_PORT(bp)]));
10969         else
10970                 bp->link_params.lfa_base = 0;
10971         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
10972                        bp->common.shmem_base, bp->common.shmem2_base);
10973
10974         if (!bp->common.shmem_base) {
10975                 BNX2X_DEV_INFO("MCP not active\n");
10976                 bp->flags |= NO_MCP_FLAG;
10977                 return;
10978         }
10979
10980         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
10981         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
10982
10983         bp->link_params.hw_led_mode = ((bp->common.hw_config &
10984                                         SHARED_HW_CFG_LED_MODE_MASK) >>
10985                                        SHARED_HW_CFG_LED_MODE_SHIFT);
10986
10987         bp->link_params.feature_config_flags = 0;
10988         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
10989         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
10990                 bp->link_params.feature_config_flags |=
10991                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
10992         else
10993                 bp->link_params.feature_config_flags &=
10994                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
10995
10996         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
10997         bp->common.bc_ver = val;
10998         BNX2X_DEV_INFO("bc_ver %X\n", val);
10999         if (val < BNX2X_BC_VER) {
11000                 /* for now only warn
11001                  * later we might need to enforce this */
11002                 BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n",
11003                           BNX2X_BC_VER, val);
11004         }
11005         bp->link_params.feature_config_flags |=
11006                                 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
11007                                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
11008
11009         bp->link_params.feature_config_flags |=
11010                 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
11011                 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
11012         bp->link_params.feature_config_flags |=
11013                 (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ?
11014                 FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0;
11015         bp->link_params.feature_config_flags |=
11016                 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
11017                 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
11018
11019         bp->link_params.feature_config_flags |=
11020                 (val >= REQ_BC_VER_4_MT_SUPPORTED) ?
11021                 FEATURE_CONFIG_MT_SUPPORT : 0;
11022
11023         bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
11024                         BC_SUPPORTS_PFC_STATS : 0;
11025
11026         bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ?
11027                         BC_SUPPORTS_FCOE_FEATURES : 0;
11028
11029         bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
11030                         BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
11031
11032         bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ?
11033                         BC_SUPPORTS_RMMOD_CMD : 0;
11034
11035         boot_mode = SHMEM_RD(bp,
11036                         dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
11037                         PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
11038         switch (boot_mode) {
11039         case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE:
11040                 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE;
11041                 break;
11042         case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB:
11043                 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI;
11044                 break;
11045         case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT:
11046                 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE;
11047                 break;
11048         case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE:
11049                 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE;
11050                 break;
11051         }
11052
11053         pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc);
11054         bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
11055
11056         BNX2X_DEV_INFO("%sWoL capable\n",
11057                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
11058
11059         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
11060         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
11061         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
11062         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
11063
11064         dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
11065                  val, val2, val3, val4);
11066 }
11067
11068 #define IGU_FID(val)    GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
11069 #define IGU_VEC(val)    GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
11070
11071 static int bnx2x_get_igu_cam_info(struct bnx2x *bp)
11072 {
11073         int pfid = BP_FUNC(bp);
11074         int igu_sb_id;
11075         u32 val;
11076         u8 fid, igu_sb_cnt = 0;
11077
11078         bp->igu_base_sb = 0xff;
11079         if (CHIP_INT_MODE_IS_BC(bp)) {
11080                 int vn = BP_VN(bp);
11081                 igu_sb_cnt = bp->igu_sb_cnt;
11082                 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
11083                         FP_SB_MAX_E1x;
11084
11085                 bp->igu_dsb_id =  E1HVN_MAX * FP_SB_MAX_E1x +
11086                         (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
11087
11088                 return 0;
11089         }
11090
11091         /* IGU in normal mode - read CAM */
11092         for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
11093              igu_sb_id++) {
11094                 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
11095                 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
11096                         continue;
11097                 fid = IGU_FID(val);
11098                 if ((fid & IGU_FID_ENCODE_IS_PF)) {
11099                         if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
11100                                 continue;
11101                         if (IGU_VEC(val) == 0)
11102                                 /* default status block */
11103                                 bp->igu_dsb_id = igu_sb_id;
11104                         else {
11105                                 if (bp->igu_base_sb == 0xff)
11106                                         bp->igu_base_sb = igu_sb_id;
11107                                 igu_sb_cnt++;
11108                         }
11109                 }
11110         }
11111
11112 #ifdef CONFIG_PCI_MSI
11113         /* Due to new PF resource allocation by MFW T7.4 and above, it's
11114          * optional that number of CAM entries will not be equal to the value
11115          * advertised in PCI.
11116          * Driver should use the minimal value of both as the actual status
11117          * block count
11118          */
11119         bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt);
11120 #endif
11121
11122         if (igu_sb_cnt == 0) {
11123                 BNX2X_ERR("CAM configuration error\n");
11124                 return -EINVAL;
11125         }
11126
11127         return 0;
11128 }
11129
11130 static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
11131 {
11132         int cfg_size = 0, idx, port = BP_PORT(bp);
11133
11134         /* Aggregation of supported attributes of all external phys */
11135         bp->port.supported[0] = 0;
11136         bp->port.supported[1] = 0;
11137         switch (bp->link_params.num_phys) {
11138         case 1:
11139                 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
11140                 cfg_size = 1;
11141                 break;
11142         case 2:
11143                 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
11144                 cfg_size = 1;
11145                 break;
11146         case 3:
11147                 if (bp->link_params.multi_phy_config &
11148                     PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
11149                         bp->port.supported[1] =
11150                                 bp->link_params.phy[EXT_PHY1].supported;
11151                         bp->port.supported[0] =
11152                                 bp->link_params.phy[EXT_PHY2].supported;
11153                 } else {
11154                         bp->port.supported[0] =
11155                                 bp->link_params.phy[EXT_PHY1].supported;
11156                         bp->port.supported[1] =
11157                                 bp->link_params.phy[EXT_PHY2].supported;
11158                 }
11159                 cfg_size = 2;
11160                 break;
11161         }
11162
11163         if (!(bp->port.supported[0] || bp->port.supported[1])) {
11164                 BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n",
11165                            SHMEM_RD(bp,
11166                            dev_info.port_hw_config[port].external_phy_config),
11167                            SHMEM_RD(bp,
11168                            dev_info.port_hw_config[port].external_phy_config2));
11169                         return;
11170         }
11171
11172         if (CHIP_IS_E3(bp))
11173                 bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR);
11174         else {
11175                 switch (switch_cfg) {
11176                 case SWITCH_CFG_1G:
11177                         bp->port.phy_addr = REG_RD(
11178                                 bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
11179                         break;
11180                 case SWITCH_CFG_10G:
11181                         bp->port.phy_addr = REG_RD(
11182                                 bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
11183                         break;
11184                 default:
11185                         BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
11186                                   bp->port.link_config[0]);
11187                         return;
11188                 }
11189         }
11190         BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
11191         /* mask what we support according to speed_cap_mask per configuration */
11192         for (idx = 0; idx < cfg_size; idx++) {
11193                 if (!(bp->link_params.speed_cap_mask[idx] &
11194                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
11195                         bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
11196
11197                 if (!(bp->link_params.speed_cap_mask[idx] &
11198                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
11199                         bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
11200
11201                 if (!(bp->link_params.speed_cap_mask[idx] &
11202                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
11203                         bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
11204
11205                 if (!(bp->link_params.speed_cap_mask[idx] &
11206                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
11207                         bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
11208
11209                 if (!(bp->link_params.speed_cap_mask[idx] &
11210                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
11211                         bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
11212                                                      SUPPORTED_1000baseT_Full);
11213
11214                 if (!(bp->link_params.speed_cap_mask[idx] &
11215                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
11216                         bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
11217
11218                 if (!(bp->link_params.speed_cap_mask[idx] &
11219                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
11220                         bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
11221
11222                 if (!(bp->link_params.speed_cap_mask[idx] &
11223                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))
11224                         bp->port.supported[idx] &= ~SUPPORTED_20000baseKR2_Full;
11225         }
11226
11227         BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
11228                        bp->port.supported[1]);
11229 }
11230
11231 static void bnx2x_link_settings_requested(struct bnx2x *bp)
11232 {
11233         u32 link_config, idx, cfg_size = 0;
11234         bp->port.advertising[0] = 0;
11235         bp->port.advertising[1] = 0;
11236         switch (bp->link_params.num_phys) {
11237         case 1:
11238         case 2:
11239                 cfg_size = 1;
11240                 break;
11241         case 3:
11242                 cfg_size = 2;
11243                 break;
11244         }
11245         for (idx = 0; idx < cfg_size; idx++) {
11246                 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
11247                 link_config = bp->port.link_config[idx];
11248                 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
11249                 case PORT_FEATURE_LINK_SPEED_AUTO:
11250                         if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
11251                                 bp->link_params.req_line_speed[idx] =
11252                                         SPEED_AUTO_NEG;
11253                                 bp->port.advertising[idx] |=
11254                                         bp->port.supported[idx];
11255                                 if (bp->link_params.phy[EXT_PHY1].type ==
11256                                     PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
11257                                         bp->port.advertising[idx] |=
11258                                         (SUPPORTED_100baseT_Half |
11259                                          SUPPORTED_100baseT_Full);
11260                         } else {
11261                                 /* force 10G, no AN */
11262                                 bp->link_params.req_line_speed[idx] =
11263                                         SPEED_10000;
11264                                 bp->port.advertising[idx] |=
11265                                         (ADVERTISED_10000baseT_Full |
11266                                          ADVERTISED_FIBRE);
11267                                 continue;
11268                         }
11269                         break;
11270
11271                 case PORT_FEATURE_LINK_SPEED_10M_FULL:
11272                         if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
11273                                 bp->link_params.req_line_speed[idx] =
11274                                         SPEED_10;
11275                                 bp->port.advertising[idx] |=
11276                                         (ADVERTISED_10baseT_Full |
11277                                          ADVERTISED_TP);
11278                         } else {
11279                                 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
11280                                             link_config,
11281                                     bp->link_params.speed_cap_mask[idx]);
11282                                 return;
11283                         }
11284                         break;
11285
11286                 case PORT_FEATURE_LINK_SPEED_10M_HALF:
11287                         if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
11288                                 bp->link_params.req_line_speed[idx] =
11289                                         SPEED_10;
11290                                 bp->link_params.req_duplex[idx] =
11291                                         DUPLEX_HALF;
11292                                 bp->port.advertising[idx] |=
11293                                         (ADVERTISED_10baseT_Half |
11294                                          ADVERTISED_TP);
11295                         } else {
11296                                 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
11297                                             link_config,
11298                                           bp->link_params.speed_cap_mask[idx]);
11299                                 return;
11300                         }
11301                         break;
11302
11303                 case PORT_FEATURE_LINK_SPEED_100M_FULL:
11304                         if (bp->port.supported[idx] &
11305                             SUPPORTED_100baseT_Full) {
11306                                 bp->link_params.req_line_speed[idx] =
11307                                         SPEED_100;
11308                                 bp->port.advertising[idx] |=
11309                                         (ADVERTISED_100baseT_Full |
11310                                          ADVERTISED_TP);
11311                         } else {
11312                                 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
11313                                             link_config,
11314                                           bp->link_params.speed_cap_mask[idx]);
11315                                 return;
11316                         }
11317                         break;
11318
11319                 case PORT_FEATURE_LINK_SPEED_100M_HALF:
11320                         if (bp->port.supported[idx] &
11321                             SUPPORTED_100baseT_Half) {
11322                                 bp->link_params.req_line_speed[idx] =
11323                                                                 SPEED_100;
11324                                 bp->link_params.req_duplex[idx] =
11325                                                                 DUPLEX_HALF;
11326                                 bp->port.advertising[idx] |=
11327                                         (ADVERTISED_100baseT_Half |
11328                                          ADVERTISED_TP);
11329                         } else {
11330                                 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
11331                                     link_config,
11332                                     bp->link_params.speed_cap_mask[idx]);
11333                                 return;
11334                         }
11335                         break;
11336
11337                 case PORT_FEATURE_LINK_SPEED_1G:
11338                         if (bp->port.supported[idx] &
11339                             SUPPORTED_1000baseT_Full) {
11340                                 bp->link_params.req_line_speed[idx] =
11341                                         SPEED_1000;
11342                                 bp->port.advertising[idx] |=
11343                                         (ADVERTISED_1000baseT_Full |
11344                                          ADVERTISED_TP);
11345                         } else if (bp->port.supported[idx] &
11346                                    SUPPORTED_1000baseKX_Full) {
11347                                 bp->link_params.req_line_speed[idx] =
11348                                         SPEED_1000;
11349                                 bp->port.advertising[idx] |=
11350                                         ADVERTISED_1000baseKX_Full;
11351                         } else {
11352                                 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
11353                                     link_config,
11354                                     bp->link_params.speed_cap_mask[idx]);
11355                                 return;
11356                         }
11357                         break;
11358
11359                 case PORT_FEATURE_LINK_SPEED_2_5G:
11360                         if (bp->port.supported[idx] &
11361                             SUPPORTED_2500baseX_Full) {
11362                                 bp->link_params.req_line_speed[idx] =
11363                                         SPEED_2500;
11364                                 bp->port.advertising[idx] |=
11365                                         (ADVERTISED_2500baseX_Full |
11366                                                 ADVERTISED_TP);
11367                         } else {
11368                                 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
11369                                     link_config,
11370                                     bp->link_params.speed_cap_mask[idx]);
11371                                 return;
11372                         }
11373                         break;
11374
11375                 case PORT_FEATURE_LINK_SPEED_10G_CX4:
11376                         if (bp->port.supported[idx] &
11377                             SUPPORTED_10000baseT_Full) {
11378                                 bp->link_params.req_line_speed[idx] =
11379                                         SPEED_10000;
11380                                 bp->port.advertising[idx] |=
11381                                         (ADVERTISED_10000baseT_Full |
11382                                                 ADVERTISED_FIBRE);
11383                         } else if (bp->port.supported[idx] &
11384                                    SUPPORTED_10000baseKR_Full) {
11385                                 bp->link_params.req_line_speed[idx] =
11386                                         SPEED_10000;
11387                                 bp->port.advertising[idx] |=
11388                                         (ADVERTISED_10000baseKR_Full |
11389                                                 ADVERTISED_FIBRE);
11390                         } else {
11391                                 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
11392                                     link_config,
11393                                     bp->link_params.speed_cap_mask[idx]);
11394                                 return;
11395                         }
11396                         break;
11397                 case PORT_FEATURE_LINK_SPEED_20G:
11398                         bp->link_params.req_line_speed[idx] = SPEED_20000;
11399
11400                         break;
11401                 default:
11402                         BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n",
11403                                   link_config);
11404                                 bp->link_params.req_line_speed[idx] =
11405                                                         SPEED_AUTO_NEG;
11406                                 bp->port.advertising[idx] =
11407                                                 bp->port.supported[idx];
11408                         break;
11409                 }
11410
11411                 bp->link_params.req_flow_ctrl[idx] = (link_config &
11412                                          PORT_FEATURE_FLOW_CONTROL_MASK);
11413                 if (bp->link_params.req_flow_ctrl[idx] ==
11414                     BNX2X_FLOW_CTRL_AUTO) {
11415                         if (!(bp->port.supported[idx] & SUPPORTED_Autoneg))
11416                                 bp->link_params.req_flow_ctrl[idx] =
11417                                                         BNX2X_FLOW_CTRL_NONE;
11418                         else
11419                                 bnx2x_set_requested_fc(bp);
11420                 }
11421
11422                 BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n",
11423                                bp->link_params.req_line_speed[idx],
11424                                bp->link_params.req_duplex[idx],
11425                                bp->link_params.req_flow_ctrl[idx],
11426                                bp->port.advertising[idx]);
11427         }
11428 }
11429
11430 static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
11431 {
11432         __be16 mac_hi_be = cpu_to_be16(mac_hi);
11433         __be32 mac_lo_be = cpu_to_be32(mac_lo);
11434         memcpy(mac_buf, &mac_hi_be, sizeof(mac_hi_be));
11435         memcpy(mac_buf + sizeof(mac_hi_be), &mac_lo_be, sizeof(mac_lo_be));
11436 }
11437
11438 static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
11439 {
11440         int port = BP_PORT(bp);
11441         u32 config;
11442         u32 ext_phy_type, ext_phy_config, eee_mode;
11443
11444         bp->link_params.bp = bp;
11445         bp->link_params.port = port;
11446
11447         bp->link_params.lane_config =
11448                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
11449
11450         bp->link_params.speed_cap_mask[0] =
11451                 SHMEM_RD(bp,
11452                          dev_info.port_hw_config[port].speed_capability_mask) &
11453                 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
11454         bp->link_params.speed_cap_mask[1] =
11455                 SHMEM_RD(bp,
11456                          dev_info.port_hw_config[port].speed_capability_mask2) &
11457                 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
11458         bp->port.link_config[0] =
11459                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
11460
11461         bp->port.link_config[1] =
11462                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
11463
11464         bp->link_params.multi_phy_config =
11465                 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
11466         /* If the device is capable of WoL, set the default state according
11467          * to the HW
11468          */
11469         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
11470         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
11471                    (config & PORT_FEATURE_WOL_ENABLED));
11472
11473         if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
11474             PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE && !IS_MF(bp))
11475                 bp->flags |= NO_ISCSI_FLAG;
11476         if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
11477             PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI && !(IS_MF(bp)))
11478                 bp->flags |= NO_FCOE_FLAG;
11479
11480         BNX2X_DEV_INFO("lane_config 0x%08x  speed_cap_mask0 0x%08x  link_config0 0x%08x\n",
11481                        bp->link_params.lane_config,
11482                        bp->link_params.speed_cap_mask[0],
11483                        bp->port.link_config[0]);
11484
11485         bp->link_params.switch_cfg = (bp->port.link_config[0] &
11486                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
11487         bnx2x_phy_probe(&bp->link_params);
11488         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
11489
11490         bnx2x_link_settings_requested(bp);
11491
11492         /*
11493          * If connected directly, work with the internal PHY, otherwise, work
11494          * with the external PHY
11495          */
11496         ext_phy_config =
11497                 SHMEM_RD(bp,
11498                          dev_info.port_hw_config[port].external_phy_config);
11499         ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
11500         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
11501                 bp->mdio.prtad = bp->port.phy_addr;
11502
11503         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
11504                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
11505                 bp->mdio.prtad =
11506                         XGXS_EXT_PHY_ADDR(ext_phy_config);
11507
11508         /* Configure link feature according to nvram value */
11509         eee_mode = (((SHMEM_RD(bp, dev_info.
11510                       port_feature_config[port].eee_power_mode)) &
11511                      PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
11512                     PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
11513         if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
11514                 bp->link_params.eee_mode = EEE_MODE_ADV_LPI |
11515                                            EEE_MODE_ENABLE_LPI |
11516                                            EEE_MODE_OUTPUT_TIME;
11517         } else {
11518                 bp->link_params.eee_mode = 0;
11519         }
11520 }
11521
11522 void bnx2x_get_iscsi_info(struct bnx2x *bp)
11523 {
11524         u32 no_flags = NO_ISCSI_FLAG;
11525         int port = BP_PORT(bp);
11526         u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
11527                                 drv_lic_key[port].max_iscsi_conn);
11528
11529         if (!CNIC_SUPPORT(bp)) {
11530                 bp->flags |= no_flags;
11531                 return;
11532         }
11533
11534         /* Get the number of maximum allowed iSCSI connections */
11535         bp->cnic_eth_dev.max_iscsi_conn =
11536                 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
11537                 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
11538
11539         BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n",
11540                        bp->cnic_eth_dev.max_iscsi_conn);
11541
11542         /*
11543          * If maximum allowed number of connections is zero -
11544          * disable the feature.
11545          */
11546         if (!bp->cnic_eth_dev.max_iscsi_conn)
11547                 bp->flags |= no_flags;
11548 }
11549
11550 static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
11551 {
11552         /* Port info */
11553         bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
11554                 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper);
11555         bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
11556                 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower);
11557
11558         /* Node info */
11559         bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
11560                 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper);
11561         bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
11562                 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
11563 }
11564
11565 static int bnx2x_shared_fcoe_funcs(struct bnx2x *bp)
11566 {
11567         u8 count = 0;
11568
11569         if (IS_MF(bp)) {
11570                 u8 fid;
11571
11572                 /* iterate over absolute function ids for this path: */
11573                 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX * 2; fid += 2) {
11574                         if (IS_MF_SD(bp)) {
11575                                 u32 cfg = MF_CFG_RD(bp,
11576                                                     func_mf_config[fid].config);
11577
11578                                 if (!(cfg & FUNC_MF_CFG_FUNC_HIDE) &&
11579                                     ((cfg & FUNC_MF_CFG_PROTOCOL_MASK) ==
11580                                             FUNC_MF_CFG_PROTOCOL_FCOE))
11581                                         count++;
11582                         } else {
11583                                 u32 cfg = MF_CFG_RD(bp,
11584                                                     func_ext_config[fid].
11585                                                                       func_cfg);
11586
11587                                 if ((cfg & MACP_FUNC_CFG_FLAGS_ENABLED) &&
11588                                     (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD))
11589                                         count++;
11590                         }
11591                 }
11592         } else { /* SF */
11593                 int port, port_cnt = CHIP_MODE_IS_4_PORT(bp) ? 2 : 1;
11594
11595                 for (port = 0; port < port_cnt; port++) {
11596                         u32 lic = SHMEM_RD(bp,
11597                                            drv_lic_key[port].max_fcoe_conn) ^
11598                                   FW_ENCODE_32BIT_PATTERN;
11599                         if (lic)
11600                                 count++;
11601                 }
11602         }
11603
11604         return count;
11605 }
11606
11607 static void bnx2x_get_fcoe_info(struct bnx2x *bp)
11608 {
11609         int port = BP_PORT(bp);
11610         int func = BP_ABS_FUNC(bp);
11611         u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
11612                                 drv_lic_key[port].max_fcoe_conn);
11613         u8 num_fcoe_func = bnx2x_shared_fcoe_funcs(bp);
11614
11615         if (!CNIC_SUPPORT(bp)) {
11616                 bp->flags |= NO_FCOE_FLAG;
11617                 return;
11618         }
11619
11620         /* Get the number of maximum allowed FCoE connections */
11621         bp->cnic_eth_dev.max_fcoe_conn =
11622                 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
11623                 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
11624
11625         /* Calculate the number of maximum allowed FCoE tasks */
11626         bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE;
11627
11628         /* check if FCoE resources must be shared between different functions */
11629         if (num_fcoe_func)
11630                 bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func;
11631
11632         /* Read the WWN: */
11633         if (!IS_MF(bp)) {
11634                 /* Port info */
11635                 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
11636                         SHMEM_RD(bp,
11637                                  dev_info.port_hw_config[port].
11638                                  fcoe_wwn_port_name_upper);
11639                 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
11640                         SHMEM_RD(bp,
11641                                  dev_info.port_hw_config[port].
11642                                  fcoe_wwn_port_name_lower);
11643
11644                 /* Node info */
11645                 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
11646                         SHMEM_RD(bp,
11647                                  dev_info.port_hw_config[port].
11648                                  fcoe_wwn_node_name_upper);
11649                 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
11650                         SHMEM_RD(bp,
11651                                  dev_info.port_hw_config[port].
11652                                  fcoe_wwn_node_name_lower);
11653         } else if (!IS_MF_SD(bp)) {
11654                 /* Read the WWN info only if the FCoE feature is enabled for
11655                  * this function.
11656                  */
11657                 if (BNX2X_HAS_MF_EXT_PROTOCOL_FCOE(bp))
11658                         bnx2x_get_ext_wwn_info(bp, func);
11659         } else {
11660                 if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp))
11661                         bnx2x_get_ext_wwn_info(bp, func);
11662         }
11663
11664         BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
11665
11666         /*
11667          * If maximum allowed number of connections is zero -
11668          * disable the feature.
11669          */
11670         if (!bp->cnic_eth_dev.max_fcoe_conn)
11671                 bp->flags |= NO_FCOE_FLAG;
11672 }
11673
11674 static void bnx2x_get_cnic_info(struct bnx2x *bp)
11675 {
11676         /*
11677          * iSCSI may be dynamically disabled but reading
11678          * info here we will decrease memory usage by driver
11679          * if the feature is disabled for good
11680          */
11681         bnx2x_get_iscsi_info(bp);
11682         bnx2x_get_fcoe_info(bp);
11683 }
11684
11685 static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
11686 {
11687         u32 val, val2;
11688         int func = BP_ABS_FUNC(bp);
11689         int port = BP_PORT(bp);
11690         u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
11691         u8 *fip_mac = bp->fip_mac;
11692
11693         if (IS_MF(bp)) {
11694                 /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
11695                  * FCoE MAC then the appropriate feature should be disabled.
11696                  * In non SD mode features configuration comes from struct
11697                  * func_ext_config.
11698                  */
11699                 if (!IS_MF_SD(bp)) {
11700                         u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
11701                         if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
11702                                 val2 = MF_CFG_RD(bp, func_ext_config[func].
11703                                                  iscsi_mac_addr_upper);
11704                                 val = MF_CFG_RD(bp, func_ext_config[func].
11705                                                 iscsi_mac_addr_lower);
11706                                 bnx2x_set_mac_buf(iscsi_mac, val, val2);
11707                                 BNX2X_DEV_INFO
11708                                         ("Read iSCSI MAC: %pM\n", iscsi_mac);
11709                         } else {
11710                                 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
11711                         }
11712
11713                         if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
11714                                 val2 = MF_CFG_RD(bp, func_ext_config[func].
11715                                                  fcoe_mac_addr_upper);
11716                                 val = MF_CFG_RD(bp, func_ext_config[func].
11717                                                 fcoe_mac_addr_lower);
11718                                 bnx2x_set_mac_buf(fip_mac, val, val2);
11719                                 BNX2X_DEV_INFO
11720                                         ("Read FCoE L2 MAC: %pM\n", fip_mac);
11721                         } else {
11722                                 bp->flags |= NO_FCOE_FLAG;
11723                         }
11724
11725                         bp->mf_ext_config = cfg;
11726
11727                 } else { /* SD MODE */
11728                         if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
11729                                 /* use primary mac as iscsi mac */
11730                                 memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
11731
11732                                 BNX2X_DEV_INFO("SD ISCSI MODE\n");
11733                                 BNX2X_DEV_INFO
11734                                         ("Read iSCSI MAC: %pM\n", iscsi_mac);
11735                         } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
11736                                 /* use primary mac as fip mac */
11737                                 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
11738                                 BNX2X_DEV_INFO("SD FCoE MODE\n");
11739                                 BNX2X_DEV_INFO
11740                                         ("Read FIP MAC: %pM\n", fip_mac);
11741                         }
11742                 }
11743
11744                 /* If this is a storage-only interface, use SAN mac as
11745                  * primary MAC. Notice that for SD this is already the case,
11746                  * as the SAN mac was copied from the primary MAC.
11747                  */
11748                 if (IS_MF_FCOE_AFEX(bp))
11749                         memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
11750         } else {
11751                 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
11752                                 iscsi_mac_upper);
11753                 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
11754                                iscsi_mac_lower);
11755                 bnx2x_set_mac_buf(iscsi_mac, val, val2);
11756
11757                 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
11758                                 fcoe_fip_mac_upper);
11759                 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
11760                                fcoe_fip_mac_lower);
11761                 bnx2x_set_mac_buf(fip_mac, val, val2);
11762         }
11763
11764         /* Disable iSCSI OOO if MAC configuration is invalid. */
11765         if (!is_valid_ether_addr(iscsi_mac)) {
11766                 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
11767                 eth_zero_addr(iscsi_mac);
11768         }
11769
11770         /* Disable FCoE if MAC configuration is invalid. */
11771         if (!is_valid_ether_addr(fip_mac)) {
11772                 bp->flags |= NO_FCOE_FLAG;
11773                 eth_zero_addr(bp->fip_mac);
11774         }
11775 }
11776
11777 static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
11778 {
11779         u32 val, val2;
11780         int func = BP_ABS_FUNC(bp);
11781         int port = BP_PORT(bp);
11782
11783         /* Zero primary MAC configuration */
11784         eth_zero_addr(bp->dev->dev_addr);
11785
11786         if (BP_NOMCP(bp)) {
11787                 BNX2X_ERROR("warning: random MAC workaround active\n");
11788                 eth_hw_addr_random(bp->dev);
11789         } else if (IS_MF(bp)) {
11790                 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
11791                 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
11792                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
11793                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
11794                         bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
11795
11796                 if (CNIC_SUPPORT(bp))
11797                         bnx2x_get_cnic_mac_hwinfo(bp);
11798         } else {
11799                 /* in SF read MACs from port configuration */
11800                 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
11801                 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
11802                 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
11803
11804                 if (CNIC_SUPPORT(bp))
11805                         bnx2x_get_cnic_mac_hwinfo(bp);
11806         }
11807
11808         if (!BP_NOMCP(bp)) {
11809                 /* Read physical port identifier from shmem */
11810                 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
11811                 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
11812                 bnx2x_set_mac_buf(bp->phys_port_id, val, val2);
11813                 bp->flags |= HAS_PHYS_PORT_ID;
11814         }
11815
11816         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
11817
11818         if (!is_valid_ether_addr(bp->dev->dev_addr))
11819                 dev_err(&bp->pdev->dev,
11820                         "bad Ethernet MAC address configuration: %pM\n"
11821                         "change it manually before bringing up the appropriate network interface\n",
11822                         bp->dev->dev_addr);
11823 }
11824
11825 static bool bnx2x_get_dropless_info(struct bnx2x *bp)
11826 {
11827         int tmp;
11828         u32 cfg;
11829
11830         if (IS_VF(bp))
11831                 return false;
11832
11833         if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
11834                 /* Take function: tmp = func */
11835                 tmp = BP_ABS_FUNC(bp);
11836                 cfg = MF_CFG_RD(bp, func_ext_config[tmp].func_cfg);
11837                 cfg = !!(cfg & MACP_FUNC_CFG_PAUSE_ON_HOST_RING);
11838         } else {
11839                 /* Take port: tmp = port */
11840                 tmp = BP_PORT(bp);
11841                 cfg = SHMEM_RD(bp,
11842                                dev_info.port_hw_config[tmp].generic_features);
11843                 cfg = !!(cfg & PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED);
11844         }
11845         return cfg;
11846 }
11847
11848 static void validate_set_si_mode(struct bnx2x *bp)
11849 {
11850         u8 func = BP_ABS_FUNC(bp);
11851         u32 val;
11852
11853         val = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
11854
11855         /* check for legal mac (upper bytes) */
11856         if (val != 0xffff) {
11857                 bp->mf_mode = MULTI_FUNCTION_SI;
11858                 bp->mf_config[BP_VN(bp)] =
11859                         MF_CFG_RD(bp, func_mf_config[func].config);
11860         } else
11861                 BNX2X_DEV_INFO("illegal MAC address for SI\n");
11862 }
11863
11864 static int bnx2x_get_hwinfo(struct bnx2x *bp)
11865 {
11866         int /*abs*/func = BP_ABS_FUNC(bp);
11867         int vn, mfw_vn;
11868         u32 val = 0, val2 = 0;
11869         int rc = 0;
11870
11871         /* Validate that chip access is feasible */
11872         if (REG_RD(bp, MISC_REG_CHIP_NUM) == 0xffffffff) {
11873                 dev_err(&bp->pdev->dev,
11874                         "Chip read returns all Fs. Preventing probe from continuing\n");
11875                 return -EINVAL;
11876         }
11877
11878         bnx2x_get_common_hwinfo(bp);
11879
11880         /*
11881          * initialize IGU parameters
11882          */
11883         if (CHIP_IS_E1x(bp)) {
11884                 bp->common.int_block = INT_BLOCK_HC;
11885
11886                 bp->igu_dsb_id = DEF_SB_IGU_ID;
11887                 bp->igu_base_sb = 0;
11888         } else {
11889                 bp->common.int_block = INT_BLOCK_IGU;
11890
11891                 /* do not allow device reset during IGU info processing */
11892                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
11893
11894                 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
11895
11896                 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
11897                         int tout = 5000;
11898
11899                         BNX2X_DEV_INFO("FORCING Normal Mode\n");
11900
11901                         val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
11902                         REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val);
11903                         REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f);
11904
11905                         while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
11906                                 tout--;
11907                                 usleep_range(1000, 2000);
11908                         }
11909
11910                         if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
11911                                 dev_err(&bp->pdev->dev,
11912                                         "FORCING Normal Mode failed!!!\n");
11913                                 bnx2x_release_hw_lock(bp,
11914                                                       HW_LOCK_RESOURCE_RESET);
11915                                 return -EPERM;
11916                         }
11917                 }
11918
11919                 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
11920                         BNX2X_DEV_INFO("IGU Backward Compatible Mode\n");
11921                         bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
11922                 } else
11923                         BNX2X_DEV_INFO("IGU Normal Mode\n");
11924
11925                 rc = bnx2x_get_igu_cam_info(bp);
11926                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
11927                 if (rc)
11928                         return rc;
11929         }
11930
11931         /*
11932          * set base FW non-default (fast path) status block id, this value is
11933          * used to initialize the fw_sb_id saved on the fp/queue structure to
11934          * determine the id used by the FW.
11935          */
11936         if (CHIP_IS_E1x(bp))
11937                 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp);
11938         else /*
11939               * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of
11940               * the same queue are indicated on the same IGU SB). So we prefer
11941               * FW and IGU SBs to be the same value.
11942               */
11943                 bp->base_fw_ndsb = bp->igu_base_sb;
11944
11945         BNX2X_DEV_INFO("igu_dsb_id %d  igu_base_sb %d  igu_sb_cnt %d\n"
11946                        "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb,
11947                        bp->igu_sb_cnt, bp->base_fw_ndsb);
11948
11949         /*
11950          * Initialize MF configuration
11951          */
11952
11953         bp->mf_ov = 0;
11954         bp->mf_mode = 0;
11955         bp->mf_sub_mode = 0;
11956         vn = BP_VN(bp);
11957         mfw_vn = BP_FW_MB_IDX(bp);
11958
11959         if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
11960                 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
11961                                bp->common.shmem2_base, SHMEM2_RD(bp, size),
11962                               (u32)offsetof(struct shmem2_region, mf_cfg_addr));
11963
11964                 if (SHMEM2_HAS(bp, mf_cfg_addr))
11965                         bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
11966                 else
11967                         bp->common.mf_cfg_base = bp->common.shmem_base +
11968                                 offsetof(struct shmem_region, func_mb) +
11969                                 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
11970                 /*
11971                  * get mf configuration:
11972                  * 1. Existence of MF configuration
11973                  * 2. MAC address must be legal (check only upper bytes)
11974                  *    for  Switch-Independent mode;
11975                  *    OVLAN must be legal for Switch-Dependent mode
11976                  * 3. SF_MODE configures specific MF mode
11977                  */
11978                 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
11979                         /* get mf configuration */
11980                         val = SHMEM_RD(bp,
11981                                        dev_info.shared_feature_config.config);
11982                         val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
11983
11984                         switch (val) {
11985                         case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
11986                                 validate_set_si_mode(bp);
11987                                 break;
11988                         case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
11989                                 if ((!CHIP_IS_E1x(bp)) &&
11990                                     (MF_CFG_RD(bp, func_mf_config[func].
11991                                                mac_upper) != 0xffff) &&
11992                                     (SHMEM2_HAS(bp,
11993                                                 afex_driver_support))) {
11994                                         bp->mf_mode = MULTI_FUNCTION_AFEX;
11995                                         bp->mf_config[vn] = MF_CFG_RD(bp,
11996                                                 func_mf_config[func].config);
11997                                 } else {
11998                                         BNX2X_DEV_INFO("can not configure afex mode\n");
11999                                 }
12000                                 break;
12001                         case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
12002                                 /* get OV configuration */
12003                                 val = MF_CFG_RD(bp,
12004                                         func_mf_config[FUNC_0].e1hov_tag);
12005                                 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
12006
12007                                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
12008                                         bp->mf_mode = MULTI_FUNCTION_SD;
12009                                         bp->mf_config[vn] = MF_CFG_RD(bp,
12010                                                 func_mf_config[func].config);
12011                                 } else
12012                                         BNX2X_DEV_INFO("illegal OV for SD\n");
12013                                 break;
12014                         case SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE:
12015                                 bp->mf_mode = MULTI_FUNCTION_SD;
12016                                 bp->mf_sub_mode = SUB_MF_MODE_BD;
12017                                 bp->mf_config[vn] =
12018                                         MF_CFG_RD(bp,
12019                                                   func_mf_config[func].config);
12020
12021                                 if (SHMEM2_HAS(bp, mtu_size)) {
12022                                         int mtu_idx = BP_FW_MB_IDX(bp);
12023                                         u16 mtu_size;
12024                                         u32 mtu;
12025
12026                                         mtu = SHMEM2_RD(bp, mtu_size[mtu_idx]);
12027                                         mtu_size = (u16)mtu;
12028                                         DP(NETIF_MSG_IFUP, "Read MTU size %04x [%08x]\n",
12029                                            mtu_size, mtu);
12030
12031                                         /* if valid: update device mtu */
12032                                         if (((mtu_size + ETH_HLEN) >=
12033                                              ETH_MIN_PACKET_SIZE) &&
12034                                             (mtu_size <=
12035                                              ETH_MAX_JUMBO_PACKET_SIZE))
12036                                                 bp->dev->mtu = mtu_size;
12037                                 }
12038                                 break;
12039                         case SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE:
12040                                 bp->mf_mode = MULTI_FUNCTION_SD;
12041                                 bp->mf_sub_mode = SUB_MF_MODE_UFP;
12042                                 bp->mf_config[vn] =
12043                                         MF_CFG_RD(bp,
12044                                                   func_mf_config[func].config);
12045                                 break;
12046                         case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
12047                                 bp->mf_config[vn] = 0;
12048                                 break;
12049                         case SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE:
12050                                 val2 = SHMEM_RD(bp,
12051                                         dev_info.shared_hw_config.config_3);
12052                                 val2 &= SHARED_HW_CFG_EXTENDED_MF_MODE_MASK;
12053                                 switch (val2) {
12054                                 case SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR1_DOT_5:
12055                                         validate_set_si_mode(bp);
12056                                         bp->mf_sub_mode =
12057                                                         SUB_MF_MODE_NPAR1_DOT_5;
12058                                         break;
12059                                 default:
12060                                         /* Unknown configuration */
12061                                         bp->mf_config[vn] = 0;
12062                                         BNX2X_DEV_INFO("unknown extended MF mode 0x%x\n",
12063                                                        val);
12064                                 }
12065                                 break;
12066                         default:
12067                                 /* Unknown configuration: reset mf_config */
12068                                 bp->mf_config[vn] = 0;
12069                                 BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val);
12070                         }
12071                 }
12072
12073                 BNX2X_DEV_INFO("%s function mode\n",
12074                                IS_MF(bp) ? "multi" : "single");
12075
12076                 switch (bp->mf_mode) {
12077                 case MULTI_FUNCTION_SD:
12078                         val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
12079                               FUNC_MF_CFG_E1HOV_TAG_MASK;
12080                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
12081                                 bp->mf_ov = val;
12082                                 bp->path_has_ovlan = true;
12083
12084                                 BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n",
12085                                                func, bp->mf_ov, bp->mf_ov);
12086                         } else if ((bp->mf_sub_mode == SUB_MF_MODE_UFP) ||
12087                                    (bp->mf_sub_mode == SUB_MF_MODE_BD)) {
12088                                 dev_err(&bp->pdev->dev,
12089                                         "Unexpected - no valid MF OV for func %d in UFP/BD mode\n",
12090                                         func);
12091                                 bp->path_has_ovlan = true;
12092                         } else {
12093                                 dev_err(&bp->pdev->dev,
12094                                         "No valid MF OV for func %d, aborting\n",
12095                                         func);
12096                                 return -EPERM;
12097                         }
12098                         break;
12099                 case MULTI_FUNCTION_AFEX:
12100                         BNX2X_DEV_INFO("func %d is in MF afex mode\n", func);
12101                         break;
12102                 case MULTI_FUNCTION_SI:
12103                         BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n",
12104                                        func);
12105                         break;
12106                 default:
12107                         if (vn) {
12108                                 dev_err(&bp->pdev->dev,
12109                                         "VN %d is in a single function mode, aborting\n",
12110                                         vn);
12111                                 return -EPERM;
12112                         }
12113                         break;
12114                 }
12115
12116                 /* check if other port on the path needs ovlan:
12117                  * Since MF configuration is shared between ports
12118                  * Possible mixed modes are only
12119                  * {SF, SI} {SF, SD} {SD, SF} {SI, SF}
12120                  */
12121                 if (CHIP_MODE_IS_4_PORT(bp) &&
12122                     !bp->path_has_ovlan &&
12123                     !IS_MF(bp) &&
12124                     bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
12125                         u8 other_port = !BP_PORT(bp);
12126                         u8 other_func = BP_PATH(bp) + 2*other_port;
12127                         val = MF_CFG_RD(bp,
12128                                         func_mf_config[other_func].e1hov_tag);
12129                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
12130                                 bp->path_has_ovlan = true;
12131                 }
12132         }
12133
12134         /* adjust igu_sb_cnt to MF for E1H */
12135         if (CHIP_IS_E1H(bp) && IS_MF(bp))
12136                 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT);
12137
12138         /* port info */
12139         bnx2x_get_port_hwinfo(bp);
12140
12141         /* Get MAC addresses */
12142         bnx2x_get_mac_hwinfo(bp);
12143
12144         bnx2x_get_cnic_info(bp);
12145
12146         return rc;
12147 }
12148
12149 static void bnx2x_read_fwinfo(struct bnx2x *bp)
12150 {
12151         int cnt, i, block_end, rodi;
12152         char vpd_start[BNX2X_VPD_LEN+1];
12153         char str_id_reg[VENDOR_ID_LEN+1];
12154         char str_id_cap[VENDOR_ID_LEN+1];
12155         char *vpd_data;
12156         char *vpd_extended_data = NULL;
12157         u8 len;
12158
12159         cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start);
12160         memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
12161
12162         if (cnt < BNX2X_VPD_LEN)
12163                 goto out_not_found;
12164
12165         /* VPD RO tag should be first tag after identifier string, hence
12166          * we should be able to find it in first BNX2X_VPD_LEN chars
12167          */
12168         i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN,
12169                              PCI_VPD_LRDT_RO_DATA);
12170         if (i < 0)
12171                 goto out_not_found;
12172
12173         block_end = i + PCI_VPD_LRDT_TAG_SIZE +
12174                     pci_vpd_lrdt_size(&vpd_start[i]);
12175
12176         i += PCI_VPD_LRDT_TAG_SIZE;
12177
12178         if (block_end > BNX2X_VPD_LEN) {
12179                 vpd_extended_data = kmalloc(block_end, GFP_KERNEL);
12180                 if (vpd_extended_data  == NULL)
12181                         goto out_not_found;
12182
12183                 /* read rest of vpd image into vpd_extended_data */
12184                 memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN);
12185                 cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN,
12186                                    block_end - BNX2X_VPD_LEN,
12187                                    vpd_extended_data + BNX2X_VPD_LEN);
12188                 if (cnt < (block_end - BNX2X_VPD_LEN))
12189                         goto out_not_found;
12190                 vpd_data = vpd_extended_data;
12191         } else
12192                 vpd_data = vpd_start;
12193
12194         /* now vpd_data holds full vpd content in both cases */
12195
12196         rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
12197                                    PCI_VPD_RO_KEYWORD_MFR_ID);
12198         if (rodi < 0)
12199                 goto out_not_found;
12200
12201         len = pci_vpd_info_field_size(&vpd_data[rodi]);
12202
12203         if (len != VENDOR_ID_LEN)
12204                 goto out_not_found;
12205
12206         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
12207
12208         /* vendor specific info */
12209         snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
12210         snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
12211         if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
12212             !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
12213
12214                 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
12215                                                 PCI_VPD_RO_KEYWORD_VENDOR0);
12216                 if (rodi >= 0) {
12217                         len = pci_vpd_info_field_size(&vpd_data[rodi]);
12218
12219                         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
12220
12221                         if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
12222                                 memcpy(bp->fw_ver, &vpd_data[rodi], len);
12223                                 bp->fw_ver[len] = ' ';
12224                         }
12225                 }
12226                 kfree(vpd_extended_data);
12227                 return;
12228         }
12229 out_not_found:
12230         kfree(vpd_extended_data);
12231         return;
12232 }
12233
12234 static void bnx2x_set_modes_bitmap(struct bnx2x *bp)
12235 {
12236         u32 flags = 0;
12237
12238         if (CHIP_REV_IS_FPGA(bp))
12239                 SET_FLAGS(flags, MODE_FPGA);
12240         else if (CHIP_REV_IS_EMUL(bp))
12241                 SET_FLAGS(flags, MODE_EMUL);
12242         else
12243                 SET_FLAGS(flags, MODE_ASIC);
12244
12245         if (CHIP_MODE_IS_4_PORT(bp))
12246                 SET_FLAGS(flags, MODE_PORT4);
12247         else
12248                 SET_FLAGS(flags, MODE_PORT2);
12249
12250         if (CHIP_IS_E2(bp))
12251                 SET_FLAGS(flags, MODE_E2);
12252         else if (CHIP_IS_E3(bp)) {
12253                 SET_FLAGS(flags, MODE_E3);
12254                 if (CHIP_REV(bp) == CHIP_REV_Ax)
12255                         SET_FLAGS(flags, MODE_E3_A0);
12256                 else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/
12257                         SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
12258         }
12259
12260         if (IS_MF(bp)) {
12261                 SET_FLAGS(flags, MODE_MF);
12262                 switch (bp->mf_mode) {
12263                 case MULTI_FUNCTION_SD:
12264                         SET_FLAGS(flags, MODE_MF_SD);
12265                         break;
12266                 case MULTI_FUNCTION_SI:
12267                         SET_FLAGS(flags, MODE_MF_SI);
12268                         break;
12269                 case MULTI_FUNCTION_AFEX:
12270                         SET_FLAGS(flags, MODE_MF_AFEX);
12271                         break;
12272                 }
12273         } else
12274                 SET_FLAGS(flags, MODE_SF);
12275
12276 #if defined(__LITTLE_ENDIAN)
12277         SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
12278 #else /*(__BIG_ENDIAN)*/
12279         SET_FLAGS(flags, MODE_BIG_ENDIAN);
12280 #endif
12281         INIT_MODE_FLAGS(bp) = flags;
12282 }
12283
12284 static int bnx2x_init_bp(struct bnx2x *bp)
12285 {
12286         int func;
12287         int rc;
12288
12289         mutex_init(&bp->port.phy_mutex);
12290         mutex_init(&bp->fw_mb_mutex);
12291         mutex_init(&bp->drv_info_mutex);
12292         sema_init(&bp->stats_lock, 1);
12293         bp->drv_info_mng_owner = false;
12294         INIT_LIST_HEAD(&bp->vlan_reg);
12295
12296         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
12297         INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
12298         INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
12299         INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task);
12300         if (IS_PF(bp)) {
12301                 rc = bnx2x_get_hwinfo(bp);
12302                 if (rc)
12303                         return rc;
12304         } else {
12305                 eth_zero_addr(bp->dev->dev_addr);
12306         }
12307
12308         bnx2x_set_modes_bitmap(bp);
12309
12310         rc = bnx2x_alloc_mem_bp(bp);
12311         if (rc)
12312                 return rc;
12313
12314         bnx2x_read_fwinfo(bp);
12315
12316         func = BP_FUNC(bp);
12317
12318         /* need to reset chip if undi was active */
12319         if (IS_PF(bp) && !BP_NOMCP(bp)) {
12320                 /* init fw_seq */
12321                 bp->fw_seq =
12322                         SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
12323                                                         DRV_MSG_SEQ_NUMBER_MASK;
12324                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12325
12326                 rc = bnx2x_prev_unload(bp);
12327                 if (rc) {
12328                         bnx2x_free_mem_bp(bp);
12329                         return rc;
12330                 }
12331         }
12332
12333         if (CHIP_REV_IS_FPGA(bp))
12334                 dev_err(&bp->pdev->dev, "FPGA detected\n");
12335
12336         if (BP_NOMCP(bp) && (func == 0))
12337                 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
12338
12339         bp->disable_tpa = disable_tpa;
12340         bp->disable_tpa |= !!IS_MF_STORAGE_ONLY(bp);
12341         /* Reduce memory usage in kdump environment by disabling TPA */
12342         bp->disable_tpa |= is_kdump_kernel();
12343
12344         /* Set TPA flags */
12345         if (bp->disable_tpa) {
12346                 bp->dev->hw_features &= ~NETIF_F_LRO;
12347                 bp->dev->features &= ~NETIF_F_LRO;
12348         }
12349
12350         if (CHIP_IS_E1(bp))
12351                 bp->dropless_fc = 0;
12352         else
12353                 bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp);
12354
12355         bp->mrrs = mrrs;
12356
12357         bp->tx_ring_size = IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL;
12358         if (IS_VF(bp))
12359                 bp->rx_ring_size = MAX_RX_AVAIL;
12360
12361         /* make sure that the numbers are in the right granularity */
12362         bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
12363         bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
12364
12365         bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ;
12366
12367         init_timer(&bp->timer);
12368         bp->timer.expires = jiffies + bp->current_interval;
12369         bp->timer.data = (unsigned long) bp;
12370         bp->timer.function = bnx2x_timer;
12371
12372         if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) &&
12373             SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) &&
12374             SHMEM2_RD(bp, dcbx_lldp_params_offset) &&
12375             SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset)) {
12376                 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
12377                 bnx2x_dcbx_init_params(bp);
12378         } else {
12379                 bnx2x_dcbx_set_state(bp, false, BNX2X_DCBX_ENABLED_OFF);
12380         }
12381
12382         if (CHIP_IS_E1x(bp))
12383                 bp->cnic_base_cl_id = FP_SB_MAX_E1x;
12384         else
12385                 bp->cnic_base_cl_id = FP_SB_MAX_E2;
12386
12387         /* multiple tx priority */
12388         if (IS_VF(bp))
12389                 bp->max_cos = 1;
12390         else if (CHIP_IS_E1x(bp))
12391                 bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
12392         else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
12393                 bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
12394         else if (CHIP_IS_E3B0(bp))
12395                 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
12396         else
12397                 BNX2X_ERR("unknown chip %x revision %x\n",
12398                           CHIP_NUM(bp), CHIP_REV(bp));
12399         BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos);
12400
12401         /* We need at least one default status block for slow-path events,
12402          * second status block for the L2 queue, and a third status block for
12403          * CNIC if supported.
12404          */
12405         if (IS_VF(bp))
12406                 bp->min_msix_vec_cnt = 1;
12407         else if (CNIC_SUPPORT(bp))
12408                 bp->min_msix_vec_cnt = 3;
12409         else /* PF w/o cnic */
12410                 bp->min_msix_vec_cnt = 2;
12411         BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
12412
12413         bp->dump_preset_idx = 1;
12414
12415         if (CHIP_IS_E3B0(bp))
12416                 bp->flags |= PTP_SUPPORTED;
12417
12418         return rc;
12419 }
12420
12421 /****************************************************************************
12422 * General service functions
12423 ****************************************************************************/
12424
12425 /*
12426  * net_device service functions
12427  */
12428
12429 /* called with rtnl_lock */
12430 static int bnx2x_open(struct net_device *dev)
12431 {
12432         struct bnx2x *bp = netdev_priv(dev);
12433         int rc;
12434
12435         bp->stats_init = true;
12436
12437         netif_carrier_off(dev);
12438
12439         bnx2x_set_power_state(bp, PCI_D0);
12440
12441         /* If parity had happen during the unload, then attentions
12442          * and/or RECOVERY_IN_PROGRES may still be set. In this case we
12443          * want the first function loaded on the current engine to
12444          * complete the recovery.
12445          * Parity recovery is only relevant for PF driver.
12446          */
12447         if (IS_PF(bp)) {
12448                 int other_engine = BP_PATH(bp) ? 0 : 1;
12449                 bool other_load_status, load_status;
12450                 bool global = false;
12451
12452                 other_load_status = bnx2x_get_load_status(bp, other_engine);
12453                 load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
12454                 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
12455                     bnx2x_chk_parity_attn(bp, &global, true)) {
12456                         do {
12457                                 /* If there are attentions and they are in a
12458                                  * global blocks, set the GLOBAL_RESET bit
12459                                  * regardless whether it will be this function
12460                                  * that will complete the recovery or not.
12461                                  */
12462                                 if (global)
12463                                         bnx2x_set_reset_global(bp);
12464
12465                                 /* Only the first function on the current
12466                                  * engine should try to recover in open. In case
12467                                  * of attentions in global blocks only the first
12468                                  * in the chip should try to recover.
12469                                  */
12470                                 if ((!load_status &&
12471                                      (!global || !other_load_status)) &&
12472                                       bnx2x_trylock_leader_lock(bp) &&
12473                                       !bnx2x_leader_reset(bp)) {
12474                                         netdev_info(bp->dev,
12475                                                     "Recovered in open\n");
12476                                         break;
12477                                 }
12478
12479                                 /* recovery has failed... */
12480                                 bnx2x_set_power_state(bp, PCI_D3hot);
12481                                 bp->recovery_state = BNX2X_RECOVERY_FAILED;
12482
12483                                 BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
12484                                           "If you still see this message after a few retries then power cycle is required.\n");
12485
12486                                 return -EAGAIN;
12487                         } while (0);
12488                 }
12489         }
12490
12491         bp->recovery_state = BNX2X_RECOVERY_DONE;
12492         rc = bnx2x_nic_load(bp, LOAD_OPEN);
12493         if (rc)
12494                 return rc;
12495
12496 #ifdef CONFIG_BNX2X_VXLAN
12497         if (IS_PF(bp))
12498                 vxlan_get_rx_port(dev);
12499 #endif
12500
12501         return 0;
12502 }
12503
12504 /* called with rtnl_lock */
12505 static int bnx2x_close(struct net_device *dev)
12506 {
12507         struct bnx2x *bp = netdev_priv(dev);
12508
12509         /* Unload the driver, release IRQs */
12510         bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
12511
12512         return 0;
12513 }
12514
12515 static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
12516                                       struct bnx2x_mcast_ramrod_params *p)
12517 {
12518         int mc_count = netdev_mc_count(bp->dev);
12519         struct bnx2x_mcast_list_elem *mc_mac =
12520                 kcalloc(mc_count, sizeof(*mc_mac), GFP_ATOMIC);
12521         struct netdev_hw_addr *ha;
12522
12523         if (!mc_mac)
12524                 return -ENOMEM;
12525
12526         INIT_LIST_HEAD(&p->mcast_list);
12527
12528         netdev_for_each_mc_addr(ha, bp->dev) {
12529                 mc_mac->mac = bnx2x_mc_addr(ha);
12530                 list_add_tail(&mc_mac->link, &p->mcast_list);
12531                 mc_mac++;
12532         }
12533
12534         p->mcast_list_len = mc_count;
12535
12536         return 0;
12537 }
12538
12539 static void bnx2x_free_mcast_macs_list(
12540         struct bnx2x_mcast_ramrod_params *p)
12541 {
12542         struct bnx2x_mcast_list_elem *mc_mac =
12543                 list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem,
12544                                  link);
12545
12546         WARN_ON(!mc_mac);
12547         kfree(mc_mac);
12548 }
12549
12550 /**
12551  * bnx2x_set_uc_list - configure a new unicast MACs list.
12552  *
12553  * @bp: driver handle
12554  *
12555  * We will use zero (0) as a MAC type for these MACs.
12556  */
12557 static int bnx2x_set_uc_list(struct bnx2x *bp)
12558 {
12559         int rc;
12560         struct net_device *dev = bp->dev;
12561         struct netdev_hw_addr *ha;
12562         struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
12563         unsigned long ramrod_flags = 0;
12564
12565         /* First schedule a cleanup up of old configuration */
12566         rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false);
12567         if (rc < 0) {
12568                 BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc);
12569                 return rc;
12570         }
12571
12572         netdev_for_each_uc_addr(ha, dev) {
12573                 rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true,
12574                                        BNX2X_UC_LIST_MAC, &ramrod_flags);
12575                 if (rc == -EEXIST) {
12576                         DP(BNX2X_MSG_SP,
12577                            "Failed to schedule ADD operations: %d\n", rc);
12578                         /* do not treat adding same MAC as error */
12579                         rc = 0;
12580
12581                 } else if (rc < 0) {
12582
12583                         BNX2X_ERR("Failed to schedule ADD operations: %d\n",
12584                                   rc);
12585                         return rc;
12586                 }
12587         }
12588
12589         /* Execute the pending commands */
12590         __set_bit(RAMROD_CONT, &ramrod_flags);
12591         return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */,
12592                                  BNX2X_UC_LIST_MAC, &ramrod_flags);
12593 }
12594
12595 static int bnx2x_set_mc_list(struct bnx2x *bp)
12596 {
12597         struct net_device *dev = bp->dev;
12598         struct bnx2x_mcast_ramrod_params rparam = {NULL};
12599         int rc = 0;
12600
12601         rparam.mcast_obj = &bp->mcast_obj;
12602
12603         /* first, clear all configured multicast MACs */
12604         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
12605         if (rc < 0) {
12606                 BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc);
12607                 return rc;
12608         }
12609
12610         /* then, configure a new MACs list */
12611         if (netdev_mc_count(dev)) {
12612                 rc = bnx2x_init_mcast_macs_list(bp, &rparam);
12613                 if (rc) {
12614                         BNX2X_ERR("Failed to create multicast MACs list: %d\n",
12615                                   rc);
12616                         return rc;
12617                 }
12618
12619                 /* Now add the new MACs */
12620                 rc = bnx2x_config_mcast(bp, &rparam,
12621                                         BNX2X_MCAST_CMD_ADD);
12622                 if (rc < 0)
12623                         BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
12624                                   rc);
12625
12626                 bnx2x_free_mcast_macs_list(&rparam);
12627         }
12628
12629         return rc;
12630 }
12631
12632 /* If bp->state is OPEN, should be called with netif_addr_lock_bh() */
12633 static void bnx2x_set_rx_mode(struct net_device *dev)
12634 {
12635         struct bnx2x *bp = netdev_priv(dev);
12636
12637         if (bp->state != BNX2X_STATE_OPEN) {
12638                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12639                 return;
12640         } else {
12641                 /* Schedule an SP task to handle rest of change */
12642                 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE,
12643                                        NETIF_MSG_IFUP);
12644         }
12645 }
12646
12647 void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
12648 {
12649         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12650
12651         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
12652
12653         netif_addr_lock_bh(bp->dev);
12654
12655         if (bp->dev->flags & IFF_PROMISC) {
12656                 rx_mode = BNX2X_RX_MODE_PROMISC;
12657         } else if ((bp->dev->flags & IFF_ALLMULTI) ||
12658                    ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) &&
12659                     CHIP_IS_E1(bp))) {
12660                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12661         } else {
12662                 if (IS_PF(bp)) {
12663                         /* some multicasts */
12664                         if (bnx2x_set_mc_list(bp) < 0)
12665                                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12666
12667                         /* release bh lock, as bnx2x_set_uc_list might sleep */
12668                         netif_addr_unlock_bh(bp->dev);
12669                         if (bnx2x_set_uc_list(bp) < 0)
12670                                 rx_mode = BNX2X_RX_MODE_PROMISC;
12671                         netif_addr_lock_bh(bp->dev);
12672                 } else {
12673                         /* configuring mcast to a vf involves sleeping (when we
12674                          * wait for the pf's response).
12675                          */
12676                         bnx2x_schedule_sp_rtnl(bp,
12677                                                BNX2X_SP_RTNL_VFPF_MCAST, 0);
12678                 }
12679         }
12680
12681         bp->rx_mode = rx_mode;
12682         /* handle ISCSI SD mode */
12683         if (IS_MF_ISCSI_ONLY(bp))
12684                 bp->rx_mode = BNX2X_RX_MODE_NONE;
12685
12686         /* Schedule the rx_mode command */
12687         if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
12688                 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
12689                 netif_addr_unlock_bh(bp->dev);
12690                 return;
12691         }
12692
12693         if (IS_PF(bp)) {
12694                 bnx2x_set_storm_rx_mode(bp);
12695                 netif_addr_unlock_bh(bp->dev);
12696         } else {
12697                 /* VF will need to request the PF to make this change, and so
12698                  * the VF needs to release the bottom-half lock prior to the
12699                  * request (as it will likely require sleep on the VF side)
12700                  */
12701                 netif_addr_unlock_bh(bp->dev);
12702                 bnx2x_vfpf_storm_rx_mode(bp);
12703         }
12704 }
12705
12706 /* called with rtnl_lock */
12707 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12708                            int devad, u16 addr)
12709 {
12710         struct bnx2x *bp = netdev_priv(netdev);
12711         u16 value;
12712         int rc;
12713
12714         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12715            prtad, devad, addr);
12716
12717         /* The HW expects different devad if CL22 is used */
12718         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12719
12720         bnx2x_acquire_phy_lock(bp);
12721         rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
12722         bnx2x_release_phy_lock(bp);
12723         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
12724
12725         if (!rc)
12726                 rc = value;
12727         return rc;
12728 }
12729
12730 /* called with rtnl_lock */
12731 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12732                             u16 addr, u16 value)
12733 {
12734         struct bnx2x *bp = netdev_priv(netdev);
12735         int rc;
12736
12737         DP(NETIF_MSG_LINK,
12738            "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n",
12739            prtad, devad, addr, value);
12740
12741         /* The HW expects different devad if CL22 is used */
12742         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12743
12744         bnx2x_acquire_phy_lock(bp);
12745         rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
12746         bnx2x_release_phy_lock(bp);
12747         return rc;
12748 }
12749
12750 /* called with rtnl_lock */
12751 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12752 {
12753         struct bnx2x *bp = netdev_priv(dev);
12754         struct mii_ioctl_data *mdio = if_mii(ifr);
12755
12756         if (!netif_running(dev))
12757                 return -EAGAIN;
12758
12759         switch (cmd) {
12760         case SIOCSHWTSTAMP:
12761                 return bnx2x_hwtstamp_ioctl(bp, ifr);
12762         default:
12763                 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12764                    mdio->phy_id, mdio->reg_num, mdio->val_in);
12765                 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
12766         }
12767 }
12768
12769 #ifdef CONFIG_NET_POLL_CONTROLLER
12770 static void poll_bnx2x(struct net_device *dev)
12771 {
12772         struct bnx2x *bp = netdev_priv(dev);
12773         int i;
12774
12775         for_each_eth_queue(bp, i) {
12776                 struct bnx2x_fastpath *fp = &bp->fp[i];
12777                 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
12778         }
12779 }
12780 #endif
12781
12782 static int bnx2x_validate_addr(struct net_device *dev)
12783 {
12784         struct bnx2x *bp = netdev_priv(dev);
12785
12786         /* query the bulletin board for mac address configured by the PF */
12787         if (IS_VF(bp))
12788                 bnx2x_sample_bulletin(bp);
12789
12790         if (!is_valid_ether_addr(dev->dev_addr)) {
12791                 BNX2X_ERR("Non-valid Ethernet address\n");
12792                 return -EADDRNOTAVAIL;
12793         }
12794         return 0;
12795 }
12796
12797 static int bnx2x_get_phys_port_id(struct net_device *netdev,
12798                                   struct netdev_phys_item_id *ppid)
12799 {
12800         struct bnx2x *bp = netdev_priv(netdev);
12801
12802         if (!(bp->flags & HAS_PHYS_PORT_ID))
12803                 return -EOPNOTSUPP;
12804
12805         ppid->id_len = sizeof(bp->phys_port_id);
12806         memcpy(ppid->id, bp->phys_port_id, ppid->id_len);
12807
12808         return 0;
12809 }
12810
12811 static netdev_features_t bnx2x_features_check(struct sk_buff *skb,
12812                                               struct net_device *dev,
12813                                               netdev_features_t features)
12814 {
12815         features = vlan_features_check(skb, features);
12816         return vxlan_features_check(skb, features);
12817 }
12818
12819 static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add)
12820 {
12821         int rc;
12822
12823         if (IS_PF(bp)) {
12824                 unsigned long ramrod_flags = 0;
12825
12826                 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
12827                 rc = bnx2x_set_vlan_one(bp, vid, &bp->sp_objs->vlan_obj,
12828                                         add, &ramrod_flags);
12829         } else {
12830                 rc = bnx2x_vfpf_update_vlan(bp, vid, bp->fp->index, add);
12831         }
12832
12833         return rc;
12834 }
12835
12836 int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
12837 {
12838         struct bnx2x_vlan_entry *vlan;
12839         int rc = 0;
12840
12841         if (!bp->vlan_cnt) {
12842                 DP(NETIF_MSG_IFUP, "No need to re-configure vlan filters\n");
12843                 return 0;
12844         }
12845
12846         list_for_each_entry(vlan, &bp->vlan_reg, link) {
12847                 /* Prepare for cleanup in case of errors */
12848                 if (rc) {
12849                         vlan->hw = false;
12850                         continue;
12851                 }
12852
12853                 if (!vlan->hw)
12854                         continue;
12855
12856                 DP(NETIF_MSG_IFUP, "Re-configuring vlan 0x%04x\n", vlan->vid);
12857
12858                 rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
12859                 if (rc) {
12860                         BNX2X_ERR("Unable to configure VLAN %d\n", vlan->vid);
12861                         vlan->hw = false;
12862                         rc = -EINVAL;
12863                         continue;
12864                 }
12865         }
12866
12867         return rc;
12868 }
12869
12870 static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
12871 {
12872         struct bnx2x *bp = netdev_priv(dev);
12873         struct bnx2x_vlan_entry *vlan;
12874         bool hw = false;
12875         int rc = 0;
12876
12877         if (!netif_running(bp->dev)) {
12878                 DP(NETIF_MSG_IFUP,
12879                    "Ignoring VLAN configuration the interface is down\n");
12880                 return -EFAULT;
12881         }
12882
12883         DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid);
12884
12885         vlan = kmalloc(sizeof(*vlan), GFP_KERNEL);
12886         if (!vlan)
12887                 return -ENOMEM;
12888
12889         bp->vlan_cnt++;
12890         if (bp->vlan_cnt > bp->vlan_credit && !bp->accept_any_vlan) {
12891                 DP(NETIF_MSG_IFUP, "Accept all VLAN raised\n");
12892                 bp->accept_any_vlan = true;
12893                 if (IS_PF(bp))
12894                         bnx2x_set_rx_mode_inner(bp);
12895                 else
12896                         bnx2x_vfpf_storm_rx_mode(bp);
12897         } else if (bp->vlan_cnt <= bp->vlan_credit) {
12898                 rc = __bnx2x_vlan_configure_vid(bp, vid, true);
12899                 hw = true;
12900         }
12901
12902         vlan->vid = vid;
12903         vlan->hw = hw;
12904
12905         if (!rc) {
12906                 list_add(&vlan->link, &bp->vlan_reg);
12907         } else {
12908                 bp->vlan_cnt--;
12909                 kfree(vlan);
12910         }
12911
12912         DP(NETIF_MSG_IFUP, "Adding VLAN result %d\n", rc);
12913
12914         return rc;
12915 }
12916
12917 static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
12918 {
12919         struct bnx2x *bp = netdev_priv(dev);
12920         struct bnx2x_vlan_entry *vlan;
12921         int rc = 0;
12922
12923         if (!netif_running(bp->dev)) {
12924                 DP(NETIF_MSG_IFUP,
12925                    "Ignoring VLAN configuration the interface is down\n");
12926                 return -EFAULT;
12927         }
12928
12929         DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid);
12930
12931         if (!bp->vlan_cnt) {
12932                 BNX2X_ERR("Unable to kill VLAN %d\n", vid);
12933                 return -EINVAL;
12934         }
12935
12936         list_for_each_entry(vlan, &bp->vlan_reg, link)
12937                 if (vlan->vid == vid)
12938                         break;
12939
12940         if (vlan->vid != vid) {
12941                 BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid);
12942                 return -EINVAL;
12943         }
12944
12945         if (vlan->hw)
12946                 rc = __bnx2x_vlan_configure_vid(bp, vid, false);
12947
12948         list_del(&vlan->link);
12949         kfree(vlan);
12950
12951         bp->vlan_cnt--;
12952
12953         if (bp->vlan_cnt <= bp->vlan_credit && bp->accept_any_vlan) {
12954                 /* Configure all non-configured entries */
12955                 list_for_each_entry(vlan, &bp->vlan_reg, link) {
12956                         if (vlan->hw)
12957                                 continue;
12958
12959                         rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
12960                         if (rc) {
12961                                 BNX2X_ERR("Unable to config VLAN %d\n",
12962                                           vlan->vid);
12963                                 continue;
12964                         }
12965                         DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n",
12966                            vlan->vid);
12967                         vlan->hw = true;
12968                 }
12969                 DP(NETIF_MSG_IFUP, "Accept all VLAN Removed\n");
12970                 bp->accept_any_vlan = false;
12971                 if (IS_PF(bp))
12972                         bnx2x_set_rx_mode_inner(bp);
12973                 else
12974                         bnx2x_vfpf_storm_rx_mode(bp);
12975         }
12976
12977         DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc);
12978
12979         return rc;
12980 }
12981
12982 static const struct net_device_ops bnx2x_netdev_ops = {
12983         .ndo_open               = bnx2x_open,
12984         .ndo_stop               = bnx2x_close,
12985         .ndo_start_xmit         = bnx2x_start_xmit,
12986         .ndo_select_queue       = bnx2x_select_queue,
12987         .ndo_set_rx_mode        = bnx2x_set_rx_mode,
12988         .ndo_set_mac_address    = bnx2x_change_mac_addr,
12989         .ndo_validate_addr      = bnx2x_validate_addr,
12990         .ndo_do_ioctl           = bnx2x_ioctl,
12991         .ndo_change_mtu         = bnx2x_change_mtu,
12992         .ndo_fix_features       = bnx2x_fix_features,
12993         .ndo_set_features       = bnx2x_set_features,
12994         .ndo_tx_timeout         = bnx2x_tx_timeout,
12995         .ndo_vlan_rx_add_vid    = bnx2x_vlan_rx_add_vid,
12996         .ndo_vlan_rx_kill_vid   = bnx2x_vlan_rx_kill_vid,
12997 #ifdef CONFIG_NET_POLL_CONTROLLER
12998         .ndo_poll_controller    = poll_bnx2x,
12999 #endif
13000         .ndo_setup_tc           = bnx2x_setup_tc,
13001 #ifdef CONFIG_BNX2X_SRIOV
13002         .ndo_set_vf_mac         = bnx2x_set_vf_mac,
13003         .ndo_set_vf_vlan        = bnx2x_set_vf_vlan,
13004         .ndo_get_vf_config      = bnx2x_get_vf_config,
13005 #endif
13006 #ifdef NETDEV_FCOE_WWNN
13007         .ndo_fcoe_get_wwn       = bnx2x_fcoe_get_wwn,
13008 #endif
13009
13010 #ifdef CONFIG_NET_RX_BUSY_POLL
13011         .ndo_busy_poll          = bnx2x_low_latency_recv,
13012 #endif
13013         .ndo_get_phys_port_id   = bnx2x_get_phys_port_id,
13014         .ndo_set_vf_link_state  = bnx2x_set_vf_link_state,
13015         .ndo_features_check     = bnx2x_features_check,
13016 #ifdef CONFIG_BNX2X_VXLAN
13017         .ndo_add_vxlan_port     = bnx2x_add_vxlan_port,
13018         .ndo_del_vxlan_port     = bnx2x_del_vxlan_port,
13019 #endif
13020 };
13021
13022 static int bnx2x_set_coherency_mask(struct bnx2x *bp)
13023 {
13024         struct device *dev = &bp->pdev->dev;
13025
13026         if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 &&
13027             dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) {
13028                 dev_err(dev, "System does not support DMA, aborting\n");
13029                 return -EIO;
13030         }
13031
13032         return 0;
13033 }
13034
13035 static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp)
13036 {
13037         if (bp->flags & AER_ENABLED) {
13038                 pci_disable_pcie_error_reporting(bp->pdev);
13039                 bp->flags &= ~AER_ENABLED;
13040         }
13041 }
13042
13043 static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
13044                           struct net_device *dev, unsigned long board_type)
13045 {
13046         int rc;
13047         u32 pci_cfg_dword;
13048         bool chip_is_e1x = (board_type == BCM57710 ||
13049                             board_type == BCM57711 ||
13050                             board_type == BCM57711E);
13051
13052         SET_NETDEV_DEV(dev, &pdev->dev);
13053
13054         bp->dev = dev;
13055         bp->pdev = pdev;
13056
13057         rc = pci_enable_device(pdev);
13058         if (rc) {
13059                 dev_err(&bp->pdev->dev,
13060                         "Cannot enable PCI device, aborting\n");
13061                 goto err_out;
13062         }
13063
13064         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
13065                 dev_err(&bp->pdev->dev,
13066                         "Cannot find PCI device base address, aborting\n");
13067                 rc = -ENODEV;
13068                 goto err_out_disable;
13069         }
13070
13071         if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
13072                 dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n");
13073                 rc = -ENODEV;
13074                 goto err_out_disable;
13075         }
13076
13077         pci_read_config_dword(pdev, PCICFG_REVISION_ID_OFFSET, &pci_cfg_dword);
13078         if ((pci_cfg_dword & PCICFG_REVESION_ID_MASK) ==
13079             PCICFG_REVESION_ID_ERROR_VAL) {
13080                 pr_err("PCI device error, probably due to fan failure, aborting\n");
13081                 rc = -ENODEV;
13082                 goto err_out_disable;
13083         }
13084
13085         if (atomic_read(&pdev->enable_cnt) == 1) {
13086                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
13087                 if (rc) {
13088                         dev_err(&bp->pdev->dev,
13089                                 "Cannot obtain PCI resources, aborting\n");
13090                         goto err_out_disable;
13091                 }
13092
13093                 pci_set_master(pdev);
13094                 pci_save_state(pdev);
13095         }
13096
13097         if (IS_PF(bp)) {
13098                 if (!pdev->pm_cap) {
13099                         dev_err(&bp->pdev->dev,
13100                                 "Cannot find power management capability, aborting\n");
13101                         rc = -EIO;
13102                         goto err_out_release;
13103                 }
13104         }
13105
13106         if (!pci_is_pcie(pdev)) {
13107                 dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n");
13108                 rc = -EIO;
13109                 goto err_out_release;
13110         }
13111
13112         rc = bnx2x_set_coherency_mask(bp);
13113         if (rc)
13114                 goto err_out_release;
13115
13116         dev->mem_start = pci_resource_start(pdev, 0);
13117         dev->base_addr = dev->mem_start;
13118         dev->mem_end = pci_resource_end(pdev, 0);
13119
13120         dev->irq = pdev->irq;
13121
13122         bp->regview = pci_ioremap_bar(pdev, 0);
13123         if (!bp->regview) {
13124                 dev_err(&bp->pdev->dev,
13125                         "Cannot map register space, aborting\n");
13126                 rc = -ENOMEM;
13127                 goto err_out_release;
13128         }
13129
13130         /* In E1/E1H use pci device function given by kernel.
13131          * In E2/E3 read physical function from ME register since these chips
13132          * support Physical Device Assignment where kernel BDF maybe arbitrary
13133          * (depending on hypervisor).
13134          */
13135         if (chip_is_e1x) {
13136                 bp->pf_num = PCI_FUNC(pdev->devfn);
13137         } else {
13138                 /* chip is E2/3*/
13139                 pci_read_config_dword(bp->pdev,
13140                                       PCICFG_ME_REGISTER, &pci_cfg_dword);
13141                 bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >>
13142                                   ME_REG_ABS_PF_NUM_SHIFT);
13143         }
13144         BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num);
13145
13146         /* clean indirect addresses */
13147         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
13148                                PCICFG_VENDOR_ID_OFFSET);
13149
13150         /* Set PCIe reset type to fundamental for EEH recovery */
13151         pdev->needs_freset = 1;
13152
13153         /* AER (Advanced Error reporting) configuration */
13154         rc = pci_enable_pcie_error_reporting(pdev);
13155         if (!rc)
13156                 bp->flags |= AER_ENABLED;
13157         else
13158                 BNX2X_DEV_INFO("Failed To configure PCIe AER [%d]\n", rc);
13159
13160         /*
13161          * Clean the following indirect addresses for all functions since it
13162          * is not used by the driver.
13163          */
13164         if (IS_PF(bp)) {
13165                 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
13166                 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
13167                 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
13168                 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
13169
13170                 if (chip_is_e1x) {
13171                         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
13172                         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
13173                         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
13174                         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
13175                 }
13176
13177                 /* Enable internal target-read (in case we are probed after PF
13178                  * FLR). Must be done prior to any BAR read access. Only for
13179                  * 57712 and up
13180                  */
13181                 if (!chip_is_e1x)
13182                         REG_WR(bp,
13183                                PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
13184         }
13185
13186         dev->watchdog_timeo = TX_TIMEOUT;
13187
13188         dev->netdev_ops = &bnx2x_netdev_ops;
13189         bnx2x_set_ethtool_ops(bp, dev);
13190
13191         dev->priv_flags |= IFF_UNICAST_FLT;
13192
13193         dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
13194                 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
13195                 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
13196                 NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
13197         if (!chip_is_e1x) {
13198                 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
13199                                     NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
13200                 dev->hw_enc_features =
13201                         NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13202                         NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
13203                         NETIF_F_GSO_IPIP |
13204                         NETIF_F_GSO_SIT |
13205                         NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
13206         }
13207
13208         dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
13209                 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
13210
13211         /* VF with OLD Hypervisor or old PF do not support filtering */
13212         if (IS_PF(bp)) {
13213                 if (chip_is_e1x)
13214                         bp->accept_any_vlan = true;
13215                 else
13216                         dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
13217 #ifdef CONFIG_BNX2X_SRIOV
13218         } else if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
13219                 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
13220 #endif
13221         }
13222
13223         dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
13224         dev->features |= NETIF_F_HIGHDMA;
13225
13226         /* Add Loopback capability to the device */
13227         dev->hw_features |= NETIF_F_LOOPBACK;
13228
13229 #ifdef BCM_DCBNL
13230         dev->dcbnl_ops = &bnx2x_dcbnl_ops;
13231 #endif
13232
13233         /* get_port_hwinfo() will set prtad and mmds properly */
13234         bp->mdio.prtad = MDIO_PRTAD_NONE;
13235         bp->mdio.mmds = 0;
13236         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
13237         bp->mdio.dev = dev;
13238         bp->mdio.mdio_read = bnx2x_mdio_read;
13239         bp->mdio.mdio_write = bnx2x_mdio_write;
13240
13241         return 0;
13242
13243 err_out_release:
13244         if (atomic_read(&pdev->enable_cnt) == 1)
13245                 pci_release_regions(pdev);
13246
13247 err_out_disable:
13248         pci_disable_device(pdev);
13249
13250 err_out:
13251         return rc;
13252 }
13253
13254 /*(DEBLOBBED)*/
13255
13256 static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13257 {
13258         const __be32 *source = (const __be32 *)_source;
13259         u32 *target = (u32 *)_target;
13260         u32 i;
13261
13262         for (i = 0; i < n/4; i++)
13263                 target[i] = be32_to_cpu(source[i]);
13264 }
13265
13266 /*
13267    Ops array is stored in the following format:
13268    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
13269  */
13270 static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
13271 {
13272         const __be32 *source = (const __be32 *)_source;
13273         struct raw_op *target = (struct raw_op *)_target;
13274         u32 i, j, tmp;
13275
13276         for (i = 0, j = 0; i < n/8; i++, j += 2) {
13277                 tmp = be32_to_cpu(source[j]);
13278                 target[i].op = (tmp >> 24) & 0xff;
13279                 target[i].offset = tmp & 0xffffff;
13280                 target[i].raw_data = be32_to_cpu(source[j + 1]);
13281         }
13282 }
13283
13284 /* IRO array is stored in the following format:
13285  * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
13286  */
13287 static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
13288 {
13289         const __be32 *source = (const __be32 *)_source;
13290         struct iro *target = (struct iro *)_target;
13291         u32 i, j, tmp;
13292
13293         for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
13294                 target[i].base = be32_to_cpu(source[j]);
13295                 j++;
13296                 tmp = be32_to_cpu(source[j]);
13297                 target[i].m1 = (tmp >> 16) & 0xffff;
13298                 target[i].m2 = tmp & 0xffff;
13299                 j++;
13300                 tmp = be32_to_cpu(source[j]);
13301                 target[i].m3 = (tmp >> 16) & 0xffff;
13302                 target[i].size = tmp & 0xffff;
13303                 j++;
13304         }
13305 }
13306
13307 static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13308 {
13309         const __be16 *source = (const __be16 *)_source;
13310         u16 *target = (u16 *)_target;
13311         u32 i;
13312
13313         for (i = 0; i < n/2; i++)
13314                 target[i] = be16_to_cpu(source[i]);
13315 }
13316
13317 #define BNX2X_ALLOC_AND_SET(arr, lbl, func)                             \
13318 do {                                                                    \
13319         u32 len = be32_to_cpu(fw_hdr->arr.len);                         \
13320         bp->arr = kmalloc(len, GFP_KERNEL);                             \
13321         if (!bp->arr)                                                   \
13322                 goto lbl;                                               \
13323         func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),      \
13324              (u8 *)bp->arr, len);                                       \
13325 } while (0)
13326
13327 static int bnx2x_init_firmware(struct bnx2x *bp)
13328 {
13329         const char *fw_file_name;
13330         struct bnx2x_fw_file_hdr *fw_hdr;
13331         int rc;
13332
13333         if (bp->firmware)
13334                 return 0;
13335
13336         if (CHIP_IS_E1(bp))
13337                 fw_file_name = FW_FILE_NAME_E1;
13338         else if (CHIP_IS_E1H(bp))
13339                 fw_file_name = FW_FILE_NAME_E1H;
13340         else if (!CHIP_IS_E1x(bp))
13341                 fw_file_name = FW_FILE_NAME_E2;
13342         else {
13343                 BNX2X_ERR("Unsupported chip revision\n");
13344                 return -EINVAL;
13345         }
13346         BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
13347
13348         rc = reject_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
13349         if (rc) {
13350                 BNX2X_ERR("Can't load firmware file %s\n",
13351                           fw_file_name);
13352                 goto request_firmware_exit;
13353         }
13354
13355         /*(DEBLOBBED)*/
13356         if (rc) {
13357                 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
13358                 goto request_firmware_exit;
13359         }
13360
13361         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13362
13363         /* Initialize the pointers to the init arrays */
13364         /* Blob */
13365         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13366
13367         /* Opcodes */
13368         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13369
13370         /* Offsets */
13371         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13372                             be16_to_cpu_n);
13373
13374         /* STORMs firmware */
13375         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13376                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13377         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
13378                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13379         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13380                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13381         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
13382                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
13383         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13384                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13385         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
13386                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13387         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13388                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13389         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
13390                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
13391         /* IRO */
13392         BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
13393
13394         return 0;
13395
13396 iro_alloc_err:
13397         kfree(bp->init_ops_offsets);
13398 init_offsets_alloc_err:
13399         kfree(bp->init_ops);
13400 init_ops_alloc_err:
13401         kfree(bp->init_data);
13402 request_firmware_exit:
13403         release_firmware(bp->firmware);
13404         bp->firmware = NULL;
13405
13406         return rc;
13407 }
13408
13409 static void bnx2x_release_firmware(struct bnx2x *bp)
13410 {
13411         kfree(bp->init_ops_offsets);
13412         kfree(bp->init_ops);
13413         kfree(bp->init_data);
13414         release_firmware(bp->firmware);
13415         bp->firmware = NULL;
13416 }
13417
13418 static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = {
13419         .init_hw_cmn_chip = bnx2x_init_hw_common_chip,
13420         .init_hw_cmn      = bnx2x_init_hw_common,
13421         .init_hw_port     = bnx2x_init_hw_port,
13422         .init_hw_func     = bnx2x_init_hw_func,
13423
13424         .reset_hw_cmn     = bnx2x_reset_common,
13425         .reset_hw_port    = bnx2x_reset_port,
13426         .reset_hw_func    = bnx2x_reset_func,
13427
13428         .gunzip_init      = bnx2x_gunzip_init,
13429         .gunzip_end       = bnx2x_gunzip_end,
13430
13431         .init_fw          = bnx2x_init_firmware,
13432         .release_fw       = bnx2x_release_firmware,
13433 };
13434
13435 void bnx2x__init_func_obj(struct bnx2x *bp)
13436 {
13437         /* Prepare DMAE related driver resources */
13438         bnx2x_setup_dmae(bp);
13439
13440         bnx2x_init_func_obj(bp, &bp->func_obj,
13441                             bnx2x_sp(bp, func_rdata),
13442                             bnx2x_sp_mapping(bp, func_rdata),
13443                             bnx2x_sp(bp, func_afex_rdata),
13444                             bnx2x_sp_mapping(bp, func_afex_rdata),
13445                             &bnx2x_func_sp_drv);
13446 }
13447
13448 /* must be called after sriov-enable */
13449 static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
13450 {
13451         int cid_count = BNX2X_L2_MAX_CID(bp);
13452
13453         if (IS_SRIOV(bp))
13454                 cid_count += BNX2X_VF_CIDS;
13455
13456         if (CNIC_SUPPORT(bp))
13457                 cid_count += CNIC_CID_MAX;
13458
13459         return roundup(cid_count, QM_CID_ROUND);
13460 }
13461
13462 /**
13463  * bnx2x_get_num_none_def_sbs - return the number of none default SBs
13464  *
13465  * @dev:        pci device
13466  *
13467  */
13468 static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
13469 {
13470         int index;
13471         u16 control = 0;
13472
13473         /*
13474          * If MSI-X is not supported - return number of SBs needed to support
13475          * one fast path queue: one FP queue + SB for CNIC
13476          */
13477         if (!pdev->msix_cap) {
13478                 dev_info(&pdev->dev, "no msix capability found\n");
13479                 return 1 + cnic_cnt;
13480         }
13481         dev_info(&pdev->dev, "msix capability found\n");
13482
13483         /*
13484          * The value in the PCI configuration space is the index of the last
13485          * entry, namely one less than the actual size of the table, which is
13486          * exactly what we want to return from this function: number of all SBs
13487          * without the default SB.
13488          * For VFs there is no default SB, then we return (index+1).
13489          */
13490         pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control);
13491
13492         index = control & PCI_MSIX_FLAGS_QSIZE;
13493
13494         return index;
13495 }
13496
13497 static int set_max_cos_est(int chip_id)
13498 {
13499         switch (chip_id) {
13500         case BCM57710:
13501         case BCM57711:
13502         case BCM57711E:
13503                 return BNX2X_MULTI_TX_COS_E1X;
13504         case BCM57712:
13505         case BCM57712_MF:
13506                 return BNX2X_MULTI_TX_COS_E2_E3A0;
13507         case BCM57800:
13508         case BCM57800_MF:
13509         case BCM57810:
13510         case BCM57810_MF:
13511         case BCM57840_4_10:
13512         case BCM57840_2_20:
13513         case BCM57840_O:
13514         case BCM57840_MFO:
13515         case BCM57840_MF:
13516         case BCM57811:
13517         case BCM57811_MF:
13518                 return BNX2X_MULTI_TX_COS_E3B0;
13519         case BCM57712_VF:
13520         case BCM57800_VF:
13521         case BCM57810_VF:
13522         case BCM57840_VF:
13523         case BCM57811_VF:
13524                 return 1;
13525         default:
13526                 pr_err("Unknown board_type (%d), aborting\n", chip_id);
13527                 return -ENODEV;
13528         }
13529 }
13530
13531 static int set_is_vf(int chip_id)
13532 {
13533         switch (chip_id) {
13534         case BCM57712_VF:
13535         case BCM57800_VF:
13536         case BCM57810_VF:
13537         case BCM57840_VF:
13538         case BCM57811_VF:
13539                 return true;
13540         default:
13541                 return false;
13542         }
13543 }
13544
13545 /* nig_tsgen registers relative address */
13546 #define tsgen_ctrl 0x0
13547 #define tsgen_freecount 0x10
13548 #define tsgen_synctime_t0 0x20
13549 #define tsgen_offset_t0 0x28
13550 #define tsgen_drift_t0 0x30
13551 #define tsgen_synctime_t1 0x58
13552 #define tsgen_offset_t1 0x60
13553 #define tsgen_drift_t1 0x68
13554
13555 /* FW workaround for setting drift */
13556 static int bnx2x_send_update_drift_ramrod(struct bnx2x *bp, int drift_dir,
13557                                           int best_val, int best_period)
13558 {
13559         struct bnx2x_func_state_params func_params = {NULL};
13560         struct bnx2x_func_set_timesync_params *set_timesync_params =
13561                 &func_params.params.set_timesync;
13562
13563         /* Prepare parameters for function state transitions */
13564         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
13565         __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
13566
13567         func_params.f_obj = &bp->func_obj;
13568         func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
13569
13570         /* Function parameters */
13571         set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_SET;
13572         set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
13573         set_timesync_params->add_sub_drift_adjust_value =
13574                 drift_dir ? TS_ADD_VALUE : TS_SUB_VALUE;
13575         set_timesync_params->drift_adjust_value = best_val;
13576         set_timesync_params->drift_adjust_period = best_period;
13577
13578         return bnx2x_func_state_change(bp, &func_params);
13579 }
13580
13581 static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
13582 {
13583         struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13584         int rc;
13585         int drift_dir = 1;
13586         int val, period, period1, period2, dif, dif1, dif2;
13587         int best_dif = BNX2X_MAX_PHC_DRIFT, best_period = 0, best_val = 0;
13588
13589         DP(BNX2X_MSG_PTP, "PTP adjfreq called, ppb = %d\n", ppb);
13590
13591         if (!netif_running(bp->dev)) {
13592                 DP(BNX2X_MSG_PTP,
13593                    "PTP adjfreq called while the interface is down\n");
13594                 return -ENETDOWN;
13595         }
13596
13597         if (ppb < 0) {
13598                 ppb = -ppb;
13599                 drift_dir = 0;
13600         }
13601
13602         if (ppb == 0) {
13603                 best_val = 1;
13604                 best_period = 0x1FFFFFF;
13605         } else if (ppb >= BNX2X_MAX_PHC_DRIFT) {
13606                 best_val = 31;
13607                 best_period = 1;
13608         } else {
13609                 /* Changed not to allow val = 8, 16, 24 as these values
13610                  * are not supported in workaround.
13611                  */
13612                 for (val = 0; val <= 31; val++) {
13613                         if ((val & 0x7) == 0)
13614                                 continue;
13615                         period1 = val * 1000000 / ppb;
13616                         period2 = period1 + 1;
13617                         if (period1 != 0)
13618                                 dif1 = ppb - (val * 1000000 / period1);
13619                         else
13620                                 dif1 = BNX2X_MAX_PHC_DRIFT;
13621                         if (dif1 < 0)
13622                                 dif1 = -dif1;
13623                         dif2 = ppb - (val * 1000000 / period2);
13624                         if (dif2 < 0)
13625                                 dif2 = -dif2;
13626                         dif = (dif1 < dif2) ? dif1 : dif2;
13627                         period = (dif1 < dif2) ? period1 : period2;
13628                         if (dif < best_dif) {
13629                                 best_dif = dif;
13630                                 best_val = val;
13631                                 best_period = period;
13632                         }
13633                 }
13634         }
13635
13636         rc = bnx2x_send_update_drift_ramrod(bp, drift_dir, best_val,
13637                                             best_period);
13638         if (rc) {
13639                 BNX2X_ERR("Failed to set drift\n");
13640                 return -EFAULT;
13641         }
13642
13643         DP(BNX2X_MSG_PTP, "Configured val = %d, period = %d\n", best_val,
13644            best_period);
13645
13646         return 0;
13647 }
13648
13649 static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
13650 {
13651         struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13652
13653         if (!netif_running(bp->dev)) {
13654                 DP(BNX2X_MSG_PTP,
13655                    "PTP adjtime called while the interface is down\n");
13656                 return -ENETDOWN;
13657         }
13658
13659         DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
13660
13661         timecounter_adjtime(&bp->timecounter, delta);
13662
13663         return 0;
13664 }
13665
13666 static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
13667 {
13668         struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13669         u64 ns;
13670
13671         if (!netif_running(bp->dev)) {
13672                 DP(BNX2X_MSG_PTP,
13673                    "PTP gettime called while the interface is down\n");
13674                 return -ENETDOWN;
13675         }
13676
13677         ns = timecounter_read(&bp->timecounter);
13678
13679         DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns);
13680
13681         *ts = ns_to_timespec64(ns);
13682
13683         return 0;
13684 }
13685
13686 static int bnx2x_ptp_settime(struct ptp_clock_info *ptp,
13687                              const struct timespec64 *ts)
13688 {
13689         struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13690         u64 ns;
13691
13692         if (!netif_running(bp->dev)) {
13693                 DP(BNX2X_MSG_PTP,
13694                    "PTP settime called while the interface is down\n");
13695                 return -ENETDOWN;
13696         }
13697
13698         ns = timespec64_to_ns(ts);
13699
13700         DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns);
13701
13702         /* Re-init the timecounter */
13703         timecounter_init(&bp->timecounter, &bp->cyclecounter, ns);
13704
13705         return 0;
13706 }
13707
13708 /* Enable (or disable) ancillary features of the phc subsystem */
13709 static int bnx2x_ptp_enable(struct ptp_clock_info *ptp,
13710                             struct ptp_clock_request *rq, int on)
13711 {
13712         struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13713
13714         BNX2X_ERR("PHC ancillary features are not supported\n");
13715         return -ENOTSUPP;
13716 }
13717
13718 static void bnx2x_register_phc(struct bnx2x *bp)
13719 {
13720         /* Fill the ptp_clock_info struct and register PTP clock*/
13721         bp->ptp_clock_info.owner = THIS_MODULE;
13722         snprintf(bp->ptp_clock_info.name, 16, "%s", bp->dev->name);
13723         bp->ptp_clock_info.max_adj = BNX2X_MAX_PHC_DRIFT; /* In PPB */
13724         bp->ptp_clock_info.n_alarm = 0;
13725         bp->ptp_clock_info.n_ext_ts = 0;
13726         bp->ptp_clock_info.n_per_out = 0;
13727         bp->ptp_clock_info.pps = 0;
13728         bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq;
13729         bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime;
13730         bp->ptp_clock_info.gettime64 = bnx2x_ptp_gettime;
13731         bp->ptp_clock_info.settime64 = bnx2x_ptp_settime;
13732         bp->ptp_clock_info.enable = bnx2x_ptp_enable;
13733
13734         bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev);
13735         if (IS_ERR(bp->ptp_clock)) {
13736                 bp->ptp_clock = NULL;
13737                 BNX2X_ERR("PTP clock registeration failed\n");
13738         }
13739 }
13740
13741 static int bnx2x_init_one(struct pci_dev *pdev,
13742                                     const struct pci_device_id *ent)
13743 {
13744         struct net_device *dev = NULL;
13745         struct bnx2x *bp;
13746         enum pcie_link_width pcie_width;
13747         enum pci_bus_speed pcie_speed;
13748         int rc, max_non_def_sbs;
13749         int rx_count, tx_count, rss_count, doorbell_size;
13750         int max_cos_est;
13751         bool is_vf;
13752         int cnic_cnt;
13753
13754         /* Management FW 'remembers' living interfaces. Allow it some time
13755          * to forget previously living interfaces, allowing a proper re-load.
13756          */
13757         if (is_kdump_kernel()) {
13758                 ktime_t now = ktime_get_boottime();
13759                 ktime_t fw_ready_time = ktime_set(5, 0);
13760
13761                 if (ktime_before(now, fw_ready_time))
13762                         msleep(ktime_ms_delta(fw_ready_time, now));
13763         }
13764
13765         /* An estimated maximum supported CoS number according to the chip
13766          * version.
13767          * We will try to roughly estimate the maximum number of CoSes this chip
13768          * may support in order to minimize the memory allocated for Tx
13769          * netdev_queue's. This number will be accurately calculated during the
13770          * initialization of bp->max_cos based on the chip versions AND chip
13771          * revision in the bnx2x_init_bp().
13772          */
13773         max_cos_est = set_max_cos_est(ent->driver_data);
13774         if (max_cos_est < 0)
13775                 return max_cos_est;
13776         is_vf = set_is_vf(ent->driver_data);
13777         cnic_cnt = is_vf ? 0 : 1;
13778
13779         max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
13780
13781         /* add another SB for VF as it has no default SB */
13782         max_non_def_sbs += is_vf ? 1 : 0;
13783
13784         /* Maximum number of RSS queues: one IGU SB goes to CNIC */
13785         rss_count = max_non_def_sbs - cnic_cnt;
13786
13787         if (rss_count < 1)
13788                 return -EINVAL;
13789
13790         /* Maximum number of netdev Rx queues: RSS + FCoE L2 */
13791         rx_count = rss_count + cnic_cnt;
13792
13793         /* Maximum number of netdev Tx queues:
13794          * Maximum TSS queues * Maximum supported number of CoS  + FCoE L2
13795          */
13796         tx_count = rss_count * max_cos_est + cnic_cnt;
13797
13798         /* dev zeroed in init_etherdev */
13799         dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
13800         if (!dev)
13801                 return -ENOMEM;
13802
13803         bp = netdev_priv(dev);
13804
13805         bp->flags = 0;
13806         if (is_vf)
13807                 bp->flags |= IS_VF_FLAG;
13808
13809         bp->igu_sb_cnt = max_non_def_sbs;
13810         bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM;
13811         bp->msg_enable = debug;
13812         bp->cnic_support = cnic_cnt;
13813         bp->cnic_probe = bnx2x_cnic_probe;
13814
13815         pci_set_drvdata(pdev, dev);
13816
13817         rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data);
13818         if (rc < 0) {
13819                 free_netdev(dev);
13820                 return rc;
13821         }
13822
13823         BNX2X_DEV_INFO("This is a %s function\n",
13824                        IS_PF(bp) ? "physical" : "virtual");
13825         BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off");
13826         BNX2X_DEV_INFO("Max num of status blocks %d\n", max_non_def_sbs);
13827         BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
13828                        tx_count, rx_count);
13829
13830         rc = bnx2x_init_bp(bp);
13831         if (rc)
13832                 goto init_one_exit;
13833
13834         /* Map doorbells here as we need the real value of bp->max_cos which
13835          * is initialized in bnx2x_init_bp() to determine the number of
13836          * l2 connections.
13837          */
13838         if (IS_VF(bp)) {
13839                 bp->doorbells = bnx2x_vf_doorbells(bp);
13840                 rc = bnx2x_vf_pci_alloc(bp);
13841                 if (rc)
13842                         goto init_one_exit;
13843         } else {
13844                 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
13845                 if (doorbell_size > pci_resource_len(pdev, 2)) {
13846                         dev_err(&bp->pdev->dev,
13847                                 "Cannot map doorbells, bar size too small, aborting\n");
13848                         rc = -ENOMEM;
13849                         goto init_one_exit;
13850                 }
13851                 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
13852                                                 doorbell_size);
13853         }
13854         if (!bp->doorbells) {
13855                 dev_err(&bp->pdev->dev,
13856                         "Cannot map doorbell space, aborting\n");
13857                 rc = -ENOMEM;
13858                 goto init_one_exit;
13859         }
13860
13861         if (IS_VF(bp)) {
13862                 rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
13863                 if (rc)
13864                         goto init_one_exit;
13865         }
13866
13867         /* Enable SRIOV if capability found in configuration space */
13868         rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
13869         if (rc)
13870                 goto init_one_exit;
13871
13872         /* calc qm_cid_count */
13873         bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
13874         BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count);
13875
13876         /* disable FCOE L2 queue for E1x*/
13877         if (CHIP_IS_E1x(bp))
13878                 bp->flags |= NO_FCOE_FLAG;
13879
13880         /* Set bp->num_queues for MSI-X mode*/
13881         bnx2x_set_num_queues(bp);
13882
13883         /* Configure interrupt mode: try to enable MSI-X/MSI if
13884          * needed.
13885          */
13886         rc = bnx2x_set_int_mode(bp);
13887         if (rc) {
13888                 dev_err(&pdev->dev, "Cannot set interrupts\n");
13889                 goto init_one_exit;
13890         }
13891         BNX2X_DEV_INFO("set interrupts successfully\n");
13892
13893         /* register the net device */
13894         rc = register_netdev(dev);
13895         if (rc) {
13896                 dev_err(&pdev->dev, "Cannot register net device\n");
13897                 goto init_one_exit;
13898         }
13899         BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
13900
13901         if (!NO_FCOE(bp)) {
13902                 /* Add storage MAC address */
13903                 rtnl_lock();
13904                 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
13905                 rtnl_unlock();
13906         }
13907         if (pcie_get_minimum_link(bp->pdev, &pcie_speed, &pcie_width) ||
13908             pcie_speed == PCI_SPEED_UNKNOWN ||
13909             pcie_width == PCIE_LNK_WIDTH_UNKNOWN)
13910                 BNX2X_DEV_INFO("Failed to determine PCI Express Bandwidth\n");
13911         else
13912                 BNX2X_DEV_INFO(
13913                        "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
13914                        board_info[ent->driver_data].name,
13915                        (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
13916                        pcie_width,
13917                        pcie_speed == PCIE_SPEED_2_5GT ? "2.5GHz" :
13918                        pcie_speed == PCIE_SPEED_5_0GT ? "5.0GHz" :
13919                        pcie_speed == PCIE_SPEED_8_0GT ? "8.0GHz" :
13920                        "Unknown",
13921                        dev->base_addr, bp->pdev->irq, dev->dev_addr);
13922
13923         bnx2x_register_phc(bp);
13924
13925         if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
13926                 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
13927
13928         return 0;
13929
13930 init_one_exit:
13931         bnx2x_disable_pcie_error_reporting(bp);
13932
13933         if (bp->regview)
13934                 iounmap(bp->regview);
13935
13936         if (IS_PF(bp) && bp->doorbells)
13937                 iounmap(bp->doorbells);
13938
13939         free_netdev(dev);
13940
13941         if (atomic_read(&pdev->enable_cnt) == 1)
13942                 pci_release_regions(pdev);
13943
13944         pci_disable_device(pdev);
13945
13946         return rc;
13947 }
13948
13949 static void __bnx2x_remove(struct pci_dev *pdev,
13950                            struct net_device *dev,
13951                            struct bnx2x *bp,
13952                            bool remove_netdev)
13953 {
13954         if (bp->ptp_clock) {
13955                 ptp_clock_unregister(bp->ptp_clock);
13956                 bp->ptp_clock = NULL;
13957         }
13958
13959         /* Delete storage MAC address */
13960         if (!NO_FCOE(bp)) {
13961                 rtnl_lock();
13962                 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
13963                 rtnl_unlock();
13964         }
13965
13966 #ifdef BCM_DCBNL
13967         /* Delete app tlvs from dcbnl */
13968         bnx2x_dcbnl_update_applist(bp, true);
13969 #endif
13970
13971         if (IS_PF(bp) &&
13972             !BP_NOMCP(bp) &&
13973             (bp->flags & BC_SUPPORTS_RMMOD_CMD))
13974                 bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0);
13975
13976         /* Close the interface - either directly or implicitly */
13977         if (remove_netdev) {
13978                 unregister_netdev(dev);
13979         } else {
13980                 rtnl_lock();
13981                 dev_close(dev);
13982                 rtnl_unlock();
13983         }
13984
13985         bnx2x_iov_remove_one(bp);
13986
13987         /* Power on: we can't let PCI layer write to us while we are in D3 */
13988         if (IS_PF(bp)) {
13989                 bnx2x_set_power_state(bp, PCI_D0);
13990                 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_NOT_LOADED);
13991
13992                 /* Set endianity registers to reset values in case next driver
13993                  * boots in different endianty environment.
13994                  */
13995                 bnx2x_reset_endianity(bp);
13996         }
13997
13998         /* Disable MSI/MSI-X */
13999         bnx2x_disable_msi(bp);
14000
14001         /* Power off */
14002         if (IS_PF(bp))
14003                 bnx2x_set_power_state(bp, PCI_D3hot);
14004
14005         /* Make sure RESET task is not scheduled before continuing */
14006         cancel_delayed_work_sync(&bp->sp_rtnl_task);
14007
14008         /* send message via vfpf channel to release the resources of this vf */
14009         if (IS_VF(bp))
14010                 bnx2x_vfpf_release(bp);
14011
14012         /* Assumes no further PCIe PM changes will occur */
14013         if (system_state == SYSTEM_POWER_OFF) {
14014                 pci_wake_from_d3(pdev, bp->wol);
14015                 pci_set_power_state(pdev, PCI_D3hot);
14016         }
14017
14018         bnx2x_disable_pcie_error_reporting(bp);
14019         if (remove_netdev) {
14020                 if (bp->regview)
14021                         iounmap(bp->regview);
14022
14023                 /* For vfs, doorbells are part of the regview and were unmapped
14024                  * along with it. FW is only loaded by PF.
14025                  */
14026                 if (IS_PF(bp)) {
14027                         if (bp->doorbells)
14028                                 iounmap(bp->doorbells);
14029
14030                         bnx2x_release_firmware(bp);
14031                 } else {
14032                         bnx2x_vf_pci_dealloc(bp);
14033                 }
14034                 bnx2x_free_mem_bp(bp);
14035
14036                 free_netdev(dev);
14037
14038                 if (atomic_read(&pdev->enable_cnt) == 1)
14039                         pci_release_regions(pdev);
14040
14041                 pci_disable_device(pdev);
14042         }
14043 }
14044
14045 static void bnx2x_remove_one(struct pci_dev *pdev)
14046 {
14047         struct net_device *dev = pci_get_drvdata(pdev);
14048         struct bnx2x *bp;
14049
14050         if (!dev) {
14051                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
14052                 return;
14053         }
14054         bp = netdev_priv(dev);
14055
14056         __bnx2x_remove(pdev, dev, bp, true);
14057 }
14058
14059 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
14060 {
14061         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
14062
14063         bp->rx_mode = BNX2X_RX_MODE_NONE;
14064
14065         if (CNIC_LOADED(bp))
14066                 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
14067
14068         /* Stop Tx */
14069         bnx2x_tx_disable(bp);
14070         /* Delete all NAPI objects */
14071         bnx2x_del_all_napi(bp);
14072         if (CNIC_LOADED(bp))
14073                 bnx2x_del_all_napi_cnic(bp);
14074         netdev_reset_tc(bp->dev);
14075
14076         del_timer_sync(&bp->timer);
14077         cancel_delayed_work_sync(&bp->sp_task);
14078         cancel_delayed_work_sync(&bp->period_task);
14079
14080         if (!down_timeout(&bp->stats_lock, HZ / 10)) {
14081                 bp->stats_state = STATS_STATE_DISABLED;
14082                 up(&bp->stats_lock);
14083         }
14084
14085         bnx2x_save_statistics(bp);
14086
14087         netif_carrier_off(bp->dev);
14088
14089         return 0;
14090 }
14091
14092 /**
14093  * bnx2x_io_error_detected - called when PCI error is detected
14094  * @pdev: Pointer to PCI device
14095  * @state: The current pci connection state
14096  *
14097  * This function is called after a PCI bus error affecting
14098  * this device has been detected.
14099  */
14100 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
14101                                                 pci_channel_state_t state)
14102 {
14103         struct net_device *dev = pci_get_drvdata(pdev);
14104         struct bnx2x *bp = netdev_priv(dev);
14105
14106         rtnl_lock();
14107
14108         BNX2X_ERR("IO error detected\n");
14109
14110         netif_device_detach(dev);
14111
14112         if (state == pci_channel_io_perm_failure) {
14113                 rtnl_unlock();
14114                 return PCI_ERS_RESULT_DISCONNECT;
14115         }
14116
14117         if (netif_running(dev))
14118                 bnx2x_eeh_nic_unload(bp);
14119
14120         bnx2x_prev_path_mark_eeh(bp);
14121
14122         pci_disable_device(pdev);
14123
14124         rtnl_unlock();
14125
14126         /* Request a slot reset */
14127         return PCI_ERS_RESULT_NEED_RESET;
14128 }
14129
14130 /**
14131  * bnx2x_io_slot_reset - called after the PCI bus has been reset
14132  * @pdev: Pointer to PCI device
14133  *
14134  * Restart the card from scratch, as if from a cold-boot.
14135  */
14136 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
14137 {
14138         struct net_device *dev = pci_get_drvdata(pdev);
14139         struct bnx2x *bp = netdev_priv(dev);
14140         int i;
14141
14142         rtnl_lock();
14143         BNX2X_ERR("IO slot reset initializing...\n");
14144         if (pci_enable_device(pdev)) {
14145                 dev_err(&pdev->dev,
14146                         "Cannot re-enable PCI device after reset\n");
14147                 rtnl_unlock();
14148                 return PCI_ERS_RESULT_DISCONNECT;
14149         }
14150
14151         pci_set_master(pdev);
14152         pci_restore_state(pdev);
14153         pci_save_state(pdev);
14154
14155         if (netif_running(dev))
14156                 bnx2x_set_power_state(bp, PCI_D0);
14157
14158         if (netif_running(dev)) {
14159                 BNX2X_ERR("IO slot reset --> driver unload\n");
14160
14161                 /* MCP should have been reset; Need to wait for validity */
14162                 if (bnx2x_init_shmem(bp)) {
14163                         rtnl_unlock();
14164                         return PCI_ERS_RESULT_DISCONNECT;
14165                 }
14166
14167                 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
14168                         u32 v;
14169
14170                         v = SHMEM2_RD(bp,
14171                                       drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
14172                         SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
14173                                   v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
14174                 }
14175                 bnx2x_drain_tx_queues(bp);
14176                 bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
14177                 bnx2x_netif_stop(bp, 1);
14178                 bnx2x_free_irq(bp);
14179
14180                 /* Report UNLOAD_DONE to MCP */
14181                 bnx2x_send_unload_done(bp, true);
14182
14183                 bp->sp_state = 0;
14184                 bp->port.pmf = 0;
14185
14186                 bnx2x_prev_unload(bp);
14187
14188                 /* We should have reseted the engine, so It's fair to
14189                  * assume the FW will no longer write to the bnx2x driver.
14190                  */
14191                 bnx2x_squeeze_objects(bp);
14192                 bnx2x_free_skbs(bp);
14193                 for_each_rx_queue(bp, i)
14194                         bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
14195                 bnx2x_free_fp_mem(bp);
14196                 bnx2x_free_mem(bp);
14197
14198                 bp->state = BNX2X_STATE_CLOSED;
14199         }
14200
14201         rtnl_unlock();
14202
14203         /* If AER, perform cleanup of the PCIe registers */
14204         if (bp->flags & AER_ENABLED) {
14205                 if (pci_cleanup_aer_uncorrect_error_status(pdev))
14206                         BNX2X_ERR("pci_cleanup_aer_uncorrect_error_status failed\n");
14207                 else
14208                         DP(NETIF_MSG_HW, "pci_cleanup_aer_uncorrect_error_status succeeded\n");
14209         }
14210
14211         return PCI_ERS_RESULT_RECOVERED;
14212 }
14213
14214 /**
14215  * bnx2x_io_resume - called when traffic can start flowing again
14216  * @pdev: Pointer to PCI device
14217  *
14218  * This callback is called when the error recovery driver tells us that
14219  * its OK to resume normal operation.
14220  */
14221 static void bnx2x_io_resume(struct pci_dev *pdev)
14222 {
14223         struct net_device *dev = pci_get_drvdata(pdev);
14224         struct bnx2x *bp = netdev_priv(dev);
14225
14226         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
14227                 netdev_err(bp->dev, "Handling parity error recovery. Try again later\n");
14228                 return;
14229         }
14230
14231         rtnl_lock();
14232
14233         bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
14234                                                         DRV_MSG_SEQ_NUMBER_MASK;
14235
14236         if (netif_running(dev))
14237                 bnx2x_nic_load(bp, LOAD_NORMAL);
14238
14239         netif_device_attach(dev);
14240
14241         rtnl_unlock();
14242 }
14243
14244 static const struct pci_error_handlers bnx2x_err_handler = {
14245         .error_detected = bnx2x_io_error_detected,
14246         .slot_reset     = bnx2x_io_slot_reset,
14247         .resume         = bnx2x_io_resume,
14248 };
14249
14250 static void bnx2x_shutdown(struct pci_dev *pdev)
14251 {
14252         struct net_device *dev = pci_get_drvdata(pdev);
14253         struct bnx2x *bp;
14254
14255         if (!dev)
14256                 return;
14257
14258         bp = netdev_priv(dev);
14259         if (!bp)
14260                 return;
14261
14262         rtnl_lock();
14263         netif_device_detach(dev);
14264         rtnl_unlock();
14265
14266         /* Don't remove the netdevice, as there are scenarios which will cause
14267          * the kernel to hang, e.g., when trying to remove bnx2i while the
14268          * rootfs is mounted from SAN.
14269          */
14270         __bnx2x_remove(pdev, dev, bp, false);
14271 }
14272
14273 static struct pci_driver bnx2x_pci_driver = {
14274         .name        = DRV_MODULE_NAME,
14275         .id_table    = bnx2x_pci_tbl,
14276         .probe       = bnx2x_init_one,
14277         .remove      = bnx2x_remove_one,
14278         .suspend     = bnx2x_suspend,
14279         .resume      = bnx2x_resume,
14280         .err_handler = &bnx2x_err_handler,
14281 #ifdef CONFIG_BNX2X_SRIOV
14282         .sriov_configure = bnx2x_sriov_configure,
14283 #endif
14284         .shutdown    = bnx2x_shutdown,
14285 };
14286
14287 static int __init bnx2x_init(void)
14288 {
14289         int ret;
14290
14291         pr_info("%s", version);
14292
14293         bnx2x_wq = create_singlethread_workqueue("bnx2x");
14294         if (bnx2x_wq == NULL) {
14295                 pr_err("Cannot create workqueue\n");
14296                 return -ENOMEM;
14297         }
14298         bnx2x_iov_wq = create_singlethread_workqueue("bnx2x_iov");
14299         if (!bnx2x_iov_wq) {
14300                 pr_err("Cannot create iov workqueue\n");
14301                 destroy_workqueue(bnx2x_wq);
14302                 return -ENOMEM;
14303         }
14304
14305         ret = pci_register_driver(&bnx2x_pci_driver);
14306         if (ret) {
14307                 pr_err("Cannot register driver\n");
14308                 destroy_workqueue(bnx2x_wq);
14309                 destroy_workqueue(bnx2x_iov_wq);
14310         }
14311         return ret;
14312 }
14313
14314 static void __exit bnx2x_cleanup(void)
14315 {
14316         struct list_head *pos, *q;
14317
14318         pci_unregister_driver(&bnx2x_pci_driver);
14319
14320         destroy_workqueue(bnx2x_wq);
14321         destroy_workqueue(bnx2x_iov_wq);
14322
14323         /* Free globally allocated resources */
14324         list_for_each_safe(pos, q, &bnx2x_prev_list) {
14325                 struct bnx2x_prev_path_list *tmp =
14326                         list_entry(pos, struct bnx2x_prev_path_list, list);
14327                 list_del(pos);
14328                 kfree(tmp);
14329         }
14330 }
14331
14332 void bnx2x_notify_link_changed(struct bnx2x *bp)
14333 {
14334         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1);
14335 }
14336
14337 module_init(bnx2x_init);
14338 module_exit(bnx2x_cleanup);
14339
14340 /**
14341  * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
14342  *
14343  * @bp:         driver handle
14344  * @set:        set or clear the CAM entry
14345  *
14346  * This function will wait until the ramrod completion returns.
14347  * Return 0 if success, -ENODEV if ramrod doesn't return.
14348  */
14349 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
14350 {
14351         unsigned long ramrod_flags = 0;
14352
14353         __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
14354         return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac,
14355                                  &bp->iscsi_l2_mac_obj, true,
14356                                  BNX2X_ISCSI_ETH_MAC, &ramrod_flags);
14357 }
14358
14359 /* count denotes the number of new completions we have seen */
14360 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
14361 {
14362         struct eth_spe *spe;
14363         int cxt_index, cxt_offset;
14364
14365 #ifdef BNX2X_STOP_ON_ERROR
14366         if (unlikely(bp->panic))
14367                 return;
14368 #endif
14369
14370         spin_lock_bh(&bp->spq_lock);
14371         BUG_ON(bp->cnic_spq_pending < count);
14372         bp->cnic_spq_pending -= count;
14373
14374         for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
14375                 u16 type =  (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
14376                                 & SPE_HDR_CONN_TYPE) >>
14377                                 SPE_HDR_CONN_TYPE_SHIFT;
14378                 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data)
14379                                 >> SPE_HDR_CMD_ID_SHIFT) & 0xff;
14380
14381                 /* Set validation for iSCSI L2 client before sending SETUP
14382                  *  ramrod
14383                  */
14384                 if (type == ETH_CONNECTION_TYPE) {
14385                         if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) {
14386                                 cxt_index = BNX2X_ISCSI_ETH_CID(bp) /
14387                                         ILT_PAGE_CIDS;
14388                                 cxt_offset = BNX2X_ISCSI_ETH_CID(bp) -
14389                                         (cxt_index * ILT_PAGE_CIDS);
14390                                 bnx2x_set_ctx_validation(bp,
14391                                         &bp->context[cxt_index].
14392                                                          vcxt[cxt_offset].eth,
14393                                         BNX2X_ISCSI_ETH_CID(bp));
14394                         }
14395                 }
14396
14397                 /*
14398                  * There may be not more than 8 L2, not more than 8 L5 SPEs
14399                  * and in the air. We also check that number of outstanding
14400                  * COMMON ramrods is not more than the EQ and SPQ can
14401                  * accommodate.
14402                  */
14403                 if (type == ETH_CONNECTION_TYPE) {
14404                         if (!atomic_read(&bp->cq_spq_left))
14405                                 break;
14406                         else
14407                                 atomic_dec(&bp->cq_spq_left);
14408                 } else if (type == NONE_CONNECTION_TYPE) {
14409                         if (!atomic_read(&bp->eq_spq_left))
14410                                 break;
14411                         else
14412                                 atomic_dec(&bp->eq_spq_left);
14413                 } else if ((type == ISCSI_CONNECTION_TYPE) ||
14414                            (type == FCOE_CONNECTION_TYPE)) {
14415                         if (bp->cnic_spq_pending >=
14416                             bp->cnic_eth_dev.max_kwqe_pending)
14417                                 break;
14418                         else
14419                                 bp->cnic_spq_pending++;
14420                 } else {
14421                         BNX2X_ERR("Unknown SPE type: %d\n", type);
14422                         bnx2x_panic();
14423                         break;
14424                 }
14425
14426                 spe = bnx2x_sp_get_next(bp);
14427                 *spe = *bp->cnic_kwq_cons;
14428
14429                 DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n",
14430                    bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
14431
14432                 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
14433                         bp->cnic_kwq_cons = bp->cnic_kwq;
14434                 else
14435                         bp->cnic_kwq_cons++;
14436         }
14437         bnx2x_sp_prod_update(bp);
14438         spin_unlock_bh(&bp->spq_lock);
14439 }
14440
14441 static int bnx2x_cnic_sp_queue(struct net_device *dev,
14442                                struct kwqe_16 *kwqes[], u32 count)
14443 {
14444         struct bnx2x *bp = netdev_priv(dev);
14445         int i;
14446
14447 #ifdef BNX2X_STOP_ON_ERROR
14448         if (unlikely(bp->panic)) {
14449                 BNX2X_ERR("Can't post to SP queue while panic\n");
14450                 return -EIO;
14451         }
14452 #endif
14453
14454         if ((bp->recovery_state != BNX2X_RECOVERY_DONE) &&
14455             (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
14456                 BNX2X_ERR("Handling parity error recovery. Try again later\n");
14457                 return -EAGAIN;
14458         }
14459
14460         spin_lock_bh(&bp->spq_lock);
14461
14462         for (i = 0; i < count; i++) {
14463                 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
14464
14465                 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
14466                         break;
14467
14468                 *bp->cnic_kwq_prod = *spe;
14469
14470                 bp->cnic_kwq_pending++;
14471
14472                 DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n",
14473                    spe->hdr.conn_and_cmd_data, spe->hdr.type,
14474                    spe->data.update_data_addr.hi,
14475                    spe->data.update_data_addr.lo,
14476                    bp->cnic_kwq_pending);
14477
14478                 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
14479                         bp->cnic_kwq_prod = bp->cnic_kwq;
14480                 else
14481                         bp->cnic_kwq_prod++;
14482         }
14483
14484         spin_unlock_bh(&bp->spq_lock);
14485
14486         if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
14487                 bnx2x_cnic_sp_post(bp, 0);
14488
14489         return i;
14490 }
14491
14492 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
14493 {
14494         struct cnic_ops *c_ops;
14495         int rc = 0;
14496
14497         mutex_lock(&bp->cnic_mutex);
14498         c_ops = rcu_dereference_protected(bp->cnic_ops,
14499                                           lockdep_is_held(&bp->cnic_mutex));
14500         if (c_ops)
14501                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
14502         mutex_unlock(&bp->cnic_mutex);
14503
14504         return rc;
14505 }
14506
14507 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
14508 {
14509         struct cnic_ops *c_ops;
14510         int rc = 0;
14511
14512         rcu_read_lock();
14513         c_ops = rcu_dereference(bp->cnic_ops);
14514         if (c_ops)
14515                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
14516         rcu_read_unlock();
14517
14518         return rc;
14519 }
14520
14521 /*
14522  * for commands that have no data
14523  */
14524 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
14525 {
14526         struct cnic_ctl_info ctl = {0};
14527
14528         ctl.cmd = cmd;
14529
14530         return bnx2x_cnic_ctl_send(bp, &ctl);
14531 }
14532
14533 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err)
14534 {
14535         struct cnic_ctl_info ctl = {0};
14536
14537         /* first we tell CNIC and only then we count this as a completion */
14538         ctl.cmd = CNIC_CTL_COMPLETION_CMD;
14539         ctl.data.comp.cid = cid;
14540         ctl.data.comp.error = err;
14541
14542         bnx2x_cnic_ctl_send_bh(bp, &ctl);
14543         bnx2x_cnic_sp_post(bp, 0);
14544 }
14545
14546 /* Called with netif_addr_lock_bh() taken.
14547  * Sets an rx_mode config for an iSCSI ETH client.
14548  * Doesn't block.
14549  * Completion should be checked outside.
14550  */
14551 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start)
14552 {
14553         unsigned long accept_flags = 0, ramrod_flags = 0;
14554         u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
14555         int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED;
14556
14557         if (start) {
14558                 /* Start accepting on iSCSI L2 ring. Accept all multicasts
14559                  * because it's the only way for UIO Queue to accept
14560                  * multicasts (in non-promiscuous mode only one Queue per
14561                  * function will receive multicast packets (leading in our
14562                  * case).
14563                  */
14564                 __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags);
14565                 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags);
14566                 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags);
14567                 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
14568
14569                 /* Clear STOP_PENDING bit if START is requested */
14570                 clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state);
14571
14572                 sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED;
14573         } else
14574                 /* Clear START_PENDING bit if STOP is requested */
14575                 clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state);
14576
14577         if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
14578                 set_bit(sched_state, &bp->sp_state);
14579         else {
14580                 __set_bit(RAMROD_RX, &ramrod_flags);
14581                 bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0,
14582                                     ramrod_flags);
14583         }
14584 }
14585
14586 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
14587 {
14588         struct bnx2x *bp = netdev_priv(dev);
14589         int rc = 0;
14590
14591         switch (ctl->cmd) {
14592         case DRV_CTL_CTXTBL_WR_CMD: {
14593                 u32 index = ctl->data.io.offset;
14594                 dma_addr_t addr = ctl->data.io.dma_addr;
14595
14596                 bnx2x_ilt_wr(bp, index, addr);
14597                 break;
14598         }
14599
14600         case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
14601                 int count = ctl->data.credit.credit_count;
14602
14603                 bnx2x_cnic_sp_post(bp, count);
14604                 break;
14605         }
14606
14607         /* rtnl_lock is held.  */
14608         case DRV_CTL_START_L2_CMD: {
14609                 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14610                 unsigned long sp_bits = 0;
14611
14612                 /* Configure the iSCSI classification object */
14613                 bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj,
14614                                    cp->iscsi_l2_client_id,
14615                                    cp->iscsi_l2_cid, BP_FUNC(bp),
14616                                    bnx2x_sp(bp, mac_rdata),
14617                                    bnx2x_sp_mapping(bp, mac_rdata),
14618                                    BNX2X_FILTER_MAC_PENDING,
14619                                    &bp->sp_state, BNX2X_OBJ_TYPE_RX,
14620                                    &bp->macs_pool);
14621
14622                 /* Set iSCSI MAC address */
14623                 rc = bnx2x_set_iscsi_eth_mac_addr(bp);
14624                 if (rc)
14625                         break;
14626
14627                 mmiowb();
14628                 barrier();
14629
14630                 /* Start accepting on iSCSI L2 ring */
14631
14632                 netif_addr_lock_bh(dev);
14633                 bnx2x_set_iscsi_eth_rx_mode(bp, true);
14634                 netif_addr_unlock_bh(dev);
14635
14636                 /* bits to wait on */
14637                 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
14638                 __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits);
14639
14640                 if (!bnx2x_wait_sp_comp(bp, sp_bits))
14641                         BNX2X_ERR("rx_mode completion timed out!\n");
14642
14643                 break;
14644         }
14645
14646         /* rtnl_lock is held.  */
14647         case DRV_CTL_STOP_L2_CMD: {
14648                 unsigned long sp_bits = 0;
14649
14650                 /* Stop accepting on iSCSI L2 ring */
14651                 netif_addr_lock_bh(dev);
14652                 bnx2x_set_iscsi_eth_rx_mode(bp, false);
14653                 netif_addr_unlock_bh(dev);
14654
14655                 /* bits to wait on */
14656                 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
14657                 __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits);
14658
14659                 if (!bnx2x_wait_sp_comp(bp, sp_bits))
14660                         BNX2X_ERR("rx_mode completion timed out!\n");
14661
14662                 mmiowb();
14663                 barrier();
14664
14665                 /* Unset iSCSI L2 MAC */
14666                 rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj,
14667                                         BNX2X_ISCSI_ETH_MAC, true);
14668                 break;
14669         }
14670         case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
14671                 int count = ctl->data.credit.credit_count;
14672
14673                 smp_mb__before_atomic();
14674                 atomic_add(count, &bp->cq_spq_left);
14675                 smp_mb__after_atomic();
14676                 break;
14677         }
14678         case DRV_CTL_ULP_REGISTER_CMD: {
14679                 int ulp_type = ctl->data.register_data.ulp_type;
14680
14681                 if (CHIP_IS_E3(bp)) {
14682                         int idx = BP_FW_MB_IDX(bp);
14683                         u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
14684                         int path = BP_PATH(bp);
14685                         int port = BP_PORT(bp);
14686                         int i;
14687                         u32 scratch_offset;
14688                         u32 *host_addr;
14689
14690                         /* first write capability to shmem2 */
14691                         if (ulp_type == CNIC_ULP_ISCSI)
14692                                 cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
14693                         else if (ulp_type == CNIC_ULP_FCOE)
14694                                 cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
14695                         SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
14696
14697                         if ((ulp_type != CNIC_ULP_FCOE) ||
14698                             (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) ||
14699                             (!(bp->flags &  BC_SUPPORTS_FCOE_FEATURES)))
14700                                 break;
14701
14702                         /* if reached here - should write fcoe capabilities */
14703                         scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr);
14704                         if (!scratch_offset)
14705                                 break;
14706                         scratch_offset += offsetof(struct glob_ncsi_oem_data,
14707                                                    fcoe_features[path][port]);
14708                         host_addr = (u32 *) &(ctl->data.register_data.
14709                                               fcoe_features);
14710                         for (i = 0; i < sizeof(struct fcoe_capabilities);
14711                              i += 4)
14712                                 REG_WR(bp, scratch_offset + i,
14713                                        *(host_addr + i/4));
14714                 }
14715                 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
14716                 break;
14717         }
14718
14719         case DRV_CTL_ULP_UNREGISTER_CMD: {
14720                 int ulp_type = ctl->data.ulp_type;
14721
14722                 if (CHIP_IS_E3(bp)) {
14723                         int idx = BP_FW_MB_IDX(bp);
14724                         u32 cap;
14725
14726                         cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
14727                         if (ulp_type == CNIC_ULP_ISCSI)
14728                                 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
14729                         else if (ulp_type == CNIC_ULP_FCOE)
14730                                 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
14731                         SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
14732                 }
14733                 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
14734                 break;
14735         }
14736
14737         default:
14738                 BNX2X_ERR("unknown command %x\n", ctl->cmd);
14739                 rc = -EINVAL;
14740         }
14741
14742         /* For storage-only interfaces, change driver state */
14743         if (IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) {
14744                 switch (ctl->drv_state) {
14745                 case DRV_NOP:
14746                         break;
14747                 case DRV_ACTIVE:
14748                         bnx2x_set_os_driver_state(bp,
14749                                                   OS_DRIVER_STATE_ACTIVE);
14750                         break;
14751                 case DRV_INACTIVE:
14752                         bnx2x_set_os_driver_state(bp,
14753                                                   OS_DRIVER_STATE_DISABLED);
14754                         break;
14755                 case DRV_UNLOADED:
14756                         bnx2x_set_os_driver_state(bp,
14757                                                   OS_DRIVER_STATE_NOT_LOADED);
14758                         break;
14759                 default:
14760                 BNX2X_ERR("Unknown cnic driver state: %d\n", ctl->drv_state);
14761                 }
14762         }
14763
14764         return rc;
14765 }
14766
14767 static int bnx2x_get_fc_npiv(struct net_device *dev,
14768                              struct cnic_fc_npiv_tbl *cnic_tbl)
14769 {
14770         struct bnx2x *bp = netdev_priv(dev);
14771         struct bdn_fc_npiv_tbl *tbl = NULL;
14772         u32 offset, entries;
14773         int rc = -EINVAL;
14774         int i;
14775
14776         if (!SHMEM2_HAS(bp, fc_npiv_nvram_tbl_addr[0]))
14777                 goto out;
14778
14779         DP(BNX2X_MSG_MCP, "About to read the FC-NPIV table\n");
14780
14781         tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
14782         if (!tbl) {
14783                 BNX2X_ERR("Failed to allocate fc_npiv table\n");
14784                 goto out;
14785         }
14786
14787         offset = SHMEM2_RD(bp, fc_npiv_nvram_tbl_addr[BP_PORT(bp)]);
14788         if (!offset) {
14789                 DP(BNX2X_MSG_MCP, "No FC-NPIV in NVRAM\n");
14790                 goto out;
14791         }
14792         DP(BNX2X_MSG_MCP, "Offset of FC-NPIV in NVRAM: %08x\n", offset);
14793
14794         /* Read the table contents from nvram */
14795         if (bnx2x_nvram_read(bp, offset, (u8 *)tbl, sizeof(*tbl))) {
14796                 BNX2X_ERR("Failed to read FC-NPIV table\n");
14797                 goto out;
14798         }
14799
14800         /* Since bnx2x_nvram_read() returns data in be32, we need to convert
14801          * the number of entries back to cpu endianness.
14802          */
14803         entries = tbl->fc_npiv_cfg.num_of_npiv;
14804         entries = (__force u32)be32_to_cpu((__force __be32)entries);
14805         tbl->fc_npiv_cfg.num_of_npiv = entries;
14806
14807         if (!tbl->fc_npiv_cfg.num_of_npiv) {
14808                 DP(BNX2X_MSG_MCP,
14809                    "No FC-NPIV table [valid, simply not present]\n");
14810                 goto out;
14811         } else if (tbl->fc_npiv_cfg.num_of_npiv > MAX_NUMBER_NPIV) {
14812                 BNX2X_ERR("FC-NPIV table with bad length 0x%08x\n",
14813                           tbl->fc_npiv_cfg.num_of_npiv);
14814                 goto out;
14815         } else {
14816                 DP(BNX2X_MSG_MCP, "Read 0x%08x entries from NVRAM\n",
14817                    tbl->fc_npiv_cfg.num_of_npiv);
14818         }
14819
14820         /* Copy the data into cnic-provided struct */
14821         cnic_tbl->count = tbl->fc_npiv_cfg.num_of_npiv;
14822         for (i = 0; i < cnic_tbl->count; i++) {
14823                 memcpy(cnic_tbl->wwpn[i], tbl->settings[i].npiv_wwpn, 8);
14824                 memcpy(cnic_tbl->wwnn[i], tbl->settings[i].npiv_wwnn, 8);
14825         }
14826
14827         rc = 0;
14828 out:
14829         kfree(tbl);
14830         return rc;
14831 }
14832
14833 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
14834 {
14835         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14836
14837         if (bp->flags & USING_MSIX_FLAG) {
14838                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
14839                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
14840                 cp->irq_arr[0].vector = bp->msix_table[1].vector;
14841         } else {
14842                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
14843                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
14844         }
14845         if (!CHIP_IS_E1x(bp))
14846                 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
14847         else
14848                 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
14849
14850         cp->irq_arr[0].status_blk_num =  bnx2x_cnic_fw_sb_id(bp);
14851         cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
14852         cp->irq_arr[1].status_blk = bp->def_status_blk;
14853         cp->irq_arr[1].status_blk_num = DEF_SB_ID;
14854         cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
14855
14856         cp->num_irq = 2;
14857 }
14858
14859 void bnx2x_setup_cnic_info(struct bnx2x *bp)
14860 {
14861         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14862
14863         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
14864                              bnx2x_cid_ilt_lines(bp);
14865         cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
14866         cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
14867         cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
14868
14869         DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n",
14870            BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid,
14871            cp->iscsi_l2_cid);
14872
14873         if (NO_ISCSI_OOO(bp))
14874                 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
14875 }
14876
14877 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
14878                                void *data)
14879 {
14880         struct bnx2x *bp = netdev_priv(dev);
14881         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14882         int rc;
14883
14884         DP(NETIF_MSG_IFUP, "Register_cnic called\n");
14885
14886         if (ops == NULL) {
14887                 BNX2X_ERR("NULL ops received\n");
14888                 return -EINVAL;
14889         }
14890
14891         if (!CNIC_SUPPORT(bp)) {
14892                 BNX2X_ERR("Can't register CNIC when not supported\n");
14893                 return -EOPNOTSUPP;
14894         }
14895
14896         if (!CNIC_LOADED(bp)) {
14897                 rc = bnx2x_load_cnic(bp);
14898                 if (rc) {
14899                         BNX2X_ERR("CNIC-related load failed\n");
14900                         return rc;
14901                 }
14902         }
14903
14904         bp->cnic_enabled = true;
14905
14906         bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
14907         if (!bp->cnic_kwq)
14908                 return -ENOMEM;
14909
14910         bp->cnic_kwq_cons = bp->cnic_kwq;
14911         bp->cnic_kwq_prod = bp->cnic_kwq;
14912         bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
14913
14914         bp->cnic_spq_pending = 0;
14915         bp->cnic_kwq_pending = 0;
14916
14917         bp->cnic_data = data;
14918
14919         cp->num_irq = 0;
14920         cp->drv_state |= CNIC_DRV_STATE_REGD;
14921         cp->iro_arr = bp->iro_arr;
14922
14923         bnx2x_setup_cnic_irq_info(bp);
14924
14925         rcu_assign_pointer(bp->cnic_ops, ops);
14926
14927         /* Schedule driver to read CNIC driver versions */
14928         bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
14929
14930         return 0;
14931 }
14932
14933 static int bnx2x_unregister_cnic(struct net_device *dev)
14934 {
14935         struct bnx2x *bp = netdev_priv(dev);
14936         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14937
14938         mutex_lock(&bp->cnic_mutex);
14939         cp->drv_state = 0;
14940         RCU_INIT_POINTER(bp->cnic_ops, NULL);
14941         mutex_unlock(&bp->cnic_mutex);
14942         synchronize_rcu();
14943         bp->cnic_enabled = false;
14944         kfree(bp->cnic_kwq);
14945         bp->cnic_kwq = NULL;
14946
14947         return 0;
14948 }
14949
14950 static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
14951 {
14952         struct bnx2x *bp = netdev_priv(dev);
14953         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14954
14955         /* If both iSCSI and FCoE are disabled - return NULL in
14956          * order to indicate CNIC that it should not try to work
14957          * with this device.
14958          */
14959         if (NO_ISCSI(bp) && NO_FCOE(bp))
14960                 return NULL;
14961
14962         cp->drv_owner = THIS_MODULE;
14963         cp->chip_id = CHIP_ID(bp);
14964         cp->pdev = bp->pdev;
14965         cp->io_base = bp->regview;
14966         cp->io_base2 = bp->doorbells;
14967         cp->max_kwqe_pending = 8;
14968         cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
14969         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
14970                              bnx2x_cid_ilt_lines(bp);
14971         cp->ctx_tbl_len = CNIC_ILT_LINES;
14972         cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
14973         cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
14974         cp->drv_ctl = bnx2x_drv_ctl;
14975         cp->drv_get_fc_npiv_tbl = bnx2x_get_fc_npiv;
14976         cp->drv_register_cnic = bnx2x_register_cnic;
14977         cp->drv_unregister_cnic = bnx2x_unregister_cnic;
14978         cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
14979         cp->iscsi_l2_client_id =
14980                 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
14981         cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
14982
14983         if (NO_ISCSI_OOO(bp))
14984                 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
14985
14986         if (NO_ISCSI(bp))
14987                 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
14988
14989         if (NO_FCOE(bp))
14990                 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
14991
14992         BNX2X_DEV_INFO(
14993                 "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n",
14994            cp->ctx_blk_size,
14995            cp->ctx_tbl_offset,
14996            cp->ctx_tbl_len,
14997            cp->starting_cid);
14998         return cp;
14999 }
15000
15001 static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
15002 {
15003         struct bnx2x *bp = fp->bp;
15004         u32 offset = BAR_USTRORM_INTMEM;
15005
15006         if (IS_VF(bp))
15007                 return bnx2x_vf_ustorm_prods_offset(bp, fp);
15008         else if (!CHIP_IS_E1x(bp))
15009                 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
15010         else
15011                 offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
15012
15013         return offset;
15014 }
15015
15016 /* called only on E1H or E2.
15017  * When pretending to be PF, the pretend value is the function number 0...7
15018  * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
15019  * combination
15020  */
15021 int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
15022 {
15023         u32 pretend_reg;
15024
15025         if (CHIP_IS_E1H(bp) && pretend_func_val >= E1H_FUNC_MAX)
15026                 return -1;
15027
15028         /* get my own pretend register */
15029         pretend_reg = bnx2x_get_pretend_reg(bp);
15030         REG_WR(bp, pretend_reg, pretend_func_val);
15031         REG_RD(bp, pretend_reg);
15032         return 0;
15033 }
15034
15035 static void bnx2x_ptp_task(struct work_struct *work)
15036 {
15037         struct bnx2x *bp = container_of(work, struct bnx2x, ptp_task);
15038         int port = BP_PORT(bp);
15039         u32 val_seq;
15040         u64 timestamp, ns;
15041         struct skb_shared_hwtstamps shhwtstamps;
15042
15043         /* Read Tx timestamp registers */
15044         val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
15045                          NIG_REG_P0_TLLH_PTP_BUF_SEQID);
15046         if (val_seq & 0x10000) {
15047                 /* There is a valid timestamp value */
15048                 timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB :
15049                                    NIG_REG_P0_TLLH_PTP_BUF_TS_MSB);
15050                 timestamp <<= 32;
15051                 timestamp |= REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_LSB :
15052                                     NIG_REG_P0_TLLH_PTP_BUF_TS_LSB);
15053                 /* Reset timestamp register to allow new timestamp */
15054                 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
15055                        NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
15056                 ns = timecounter_cyc2time(&bp->timecounter, timestamp);
15057
15058                 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
15059                 shhwtstamps.hwtstamp = ns_to_ktime(ns);
15060                 skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps);
15061                 dev_kfree_skb_any(bp->ptp_tx_skb);
15062                 bp->ptp_tx_skb = NULL;
15063
15064                 DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
15065                    timestamp, ns);
15066         } else {
15067                 DP(BNX2X_MSG_PTP, "There is no valid Tx timestamp yet\n");
15068                 /* Reschedule to keep checking for a valid timestamp value */
15069                 schedule_work(&bp->ptp_task);
15070         }
15071 }
15072
15073 void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
15074 {
15075         int port = BP_PORT(bp);
15076         u64 timestamp, ns;
15077
15078         timestamp = REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB :
15079                             NIG_REG_P0_LLH_PTP_HOST_BUF_TS_MSB);
15080         timestamp <<= 32;
15081         timestamp |= REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB :
15082                             NIG_REG_P0_LLH_PTP_HOST_BUF_TS_LSB);
15083
15084         /* Reset timestamp register to allow new timestamp */
15085         REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
15086                NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
15087
15088         ns = timecounter_cyc2time(&bp->timecounter, timestamp);
15089
15090         skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
15091
15092         DP(BNX2X_MSG_PTP, "Rx timestamp, timestamp cycles = %llu, ns = %llu\n",
15093            timestamp, ns);
15094 }
15095
15096 /* Read the PHC */
15097 static cycle_t bnx2x_cyclecounter_read(const struct cyclecounter *cc)
15098 {
15099         struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter);
15100         int port = BP_PORT(bp);
15101         u32 wb_data[2];
15102         u64 phc_cycles;
15103
15104         REG_RD_DMAE(bp, port ? NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t1 :
15105                     NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t0, wb_data, 2);
15106         phc_cycles = wb_data[1];
15107         phc_cycles = (phc_cycles << 32) + wb_data[0];
15108
15109         DP(BNX2X_MSG_PTP, "PHC read cycles = %llu\n", phc_cycles);
15110
15111         return phc_cycles;
15112 }
15113
15114 static void bnx2x_init_cyclecounter(struct bnx2x *bp)
15115 {
15116         memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter));
15117         bp->cyclecounter.read = bnx2x_cyclecounter_read;
15118         bp->cyclecounter.mask = CYCLECOUNTER_MASK(64);
15119         bp->cyclecounter.shift = 1;
15120         bp->cyclecounter.mult = 1;
15121 }
15122
15123 static int bnx2x_send_reset_timesync_ramrod(struct bnx2x *bp)
15124 {
15125         struct bnx2x_func_state_params func_params = {NULL};
15126         struct bnx2x_func_set_timesync_params *set_timesync_params =
15127                 &func_params.params.set_timesync;
15128
15129         /* Prepare parameters for function state transitions */
15130         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
15131         __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
15132
15133         func_params.f_obj = &bp->func_obj;
15134         func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
15135
15136         /* Function parameters */
15137         set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_RESET;
15138         set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
15139
15140         return bnx2x_func_state_change(bp, &func_params);
15141 }
15142
15143 static int bnx2x_enable_ptp_packets(struct bnx2x *bp)
15144 {
15145         struct bnx2x_queue_state_params q_params;
15146         int rc, i;
15147
15148         /* send queue update ramrod to enable PTP packets */
15149         memset(&q_params, 0, sizeof(q_params));
15150         __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
15151         q_params.cmd = BNX2X_Q_CMD_UPDATE;
15152         __set_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG,
15153                   &q_params.params.update.update_flags);
15154         __set_bit(BNX2X_Q_UPDATE_PTP_PKTS,
15155                   &q_params.params.update.update_flags);
15156
15157         /* send the ramrod on all the queues of the PF */
15158         for_each_eth_queue(bp, i) {
15159                 struct bnx2x_fastpath *fp = &bp->fp[i];
15160
15161                 /* Set the appropriate Queue object */
15162                 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
15163
15164                 /* Update the Queue state */
15165                 rc = bnx2x_queue_state_change(bp, &q_params);
15166                 if (rc) {
15167                         BNX2X_ERR("Failed to enable PTP packets\n");
15168                         return rc;
15169                 }
15170         }
15171
15172         return 0;
15173 }
15174
15175 int bnx2x_configure_ptp_filters(struct bnx2x *bp)
15176 {
15177         int port = BP_PORT(bp);
15178         int rc;
15179
15180         if (!bp->hwtstamp_ioctl_called)
15181                 return 0;
15182
15183         switch (bp->tx_type) {
15184         case HWTSTAMP_TX_ON:
15185                 bp->flags |= TX_TIMESTAMPING_EN;
15186                 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
15187                        NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x6AA);
15188                 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
15189                        NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3EEE);
15190                 break;
15191         case HWTSTAMP_TX_ONESTEP_SYNC:
15192                 BNX2X_ERR("One-step timestamping is not supported\n");
15193                 return -ERANGE;
15194         }
15195
15196         switch (bp->rx_filter) {
15197         case HWTSTAMP_FILTER_NONE:
15198                 break;
15199         case HWTSTAMP_FILTER_ALL:
15200         case HWTSTAMP_FILTER_SOME:
15201                 bp->rx_filter = HWTSTAMP_FILTER_NONE;
15202                 break;
15203         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
15204         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
15205         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
15206                 bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
15207                 /* Initialize PTP detection for UDP/IPv4 events */
15208                 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15209                        NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EE);
15210                 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15211                        NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFE);
15212                 break;
15213         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
15214         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
15215         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
15216                 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
15217                 /* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */
15218                 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15219                        NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EA);
15220                 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15221                        NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FEE);
15222                 break;
15223         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
15224         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
15225         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
15226                 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
15227                 /* Initialize PTP detection L2 events */
15228                 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15229                        NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6BF);
15230                 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15231                        NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EFF);
15232
15233                 break;
15234         case HWTSTAMP_FILTER_PTP_V2_EVENT:
15235         case HWTSTAMP_FILTER_PTP_V2_SYNC:
15236         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
15237                 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
15238                 /* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */
15239                 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15240                        NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6AA);
15241                 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15242                        NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EEE);
15243                 break;
15244         }
15245
15246         /* Indicate to FW that this PF expects recorded PTP packets */
15247         rc = bnx2x_enable_ptp_packets(bp);
15248         if (rc)
15249                 return rc;
15250
15251         /* Enable sending PTP packets to host */
15252         REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
15253                NIG_REG_P0_LLH_PTP_TO_HOST, 0x1);
15254
15255         return 0;
15256 }
15257
15258 static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr)
15259 {
15260         struct hwtstamp_config config;
15261         int rc;
15262
15263         DP(BNX2X_MSG_PTP, "HWTSTAMP IOCTL called\n");
15264
15265         if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
15266                 return -EFAULT;
15267
15268         DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n",
15269            config.tx_type, config.rx_filter);
15270
15271         if (config.flags) {
15272                 BNX2X_ERR("config.flags is reserved for future use\n");
15273                 return -EINVAL;
15274         }
15275
15276         bp->hwtstamp_ioctl_called = 1;
15277         bp->tx_type = config.tx_type;
15278         bp->rx_filter = config.rx_filter;
15279
15280         rc = bnx2x_configure_ptp_filters(bp);
15281         if (rc)
15282                 return rc;
15283
15284         config.rx_filter = bp->rx_filter;
15285
15286         return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
15287                 -EFAULT : 0;
15288 }
15289
15290 /* Configures HW for PTP */
15291 static int bnx2x_configure_ptp(struct bnx2x *bp)
15292 {
15293         int rc, port = BP_PORT(bp);
15294         u32 wb_data[2];
15295
15296         /* Reset PTP event detection rules - will be configured in the IOCTL */
15297         REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15298                NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
15299         REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15300                NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
15301         REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
15302                NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
15303         REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
15304                NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
15305
15306         /* Disable PTP packets to host - will be configured in the IOCTL*/
15307         REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
15308                NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
15309
15310         /* Enable the PTP feature */
15311         REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
15312                NIG_REG_P0_PTP_EN, 0x3F);
15313
15314         /* Enable the free-running counter */
15315         wb_data[0] = 0;
15316         wb_data[1] = 0;
15317         REG_WR_DMAE(bp, NIG_REG_TIMESYNC_GEN_REG + tsgen_ctrl, wb_data, 2);
15318
15319         /* Reset drift register (offset register is not reset) */
15320         rc = bnx2x_send_reset_timesync_ramrod(bp);
15321         if (rc) {
15322                 BNX2X_ERR("Failed to reset PHC drift register\n");
15323                 return -EFAULT;
15324         }
15325
15326         /* Reset possibly old timestamps */
15327         REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
15328                NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
15329         REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
15330                NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
15331
15332         return 0;
15333 }
15334
15335 /* Called during load, to initialize PTP-related stuff */
15336 void bnx2x_init_ptp(struct bnx2x *bp)
15337 {
15338         int rc;
15339
15340         /* Configure PTP in HW */
15341         rc = bnx2x_configure_ptp(bp);
15342         if (rc) {
15343                 BNX2X_ERR("Stopping PTP initialization\n");
15344                 return;
15345         }
15346
15347         /* Init work queue for Tx timestamping */
15348         INIT_WORK(&bp->ptp_task, bnx2x_ptp_task);
15349
15350         /* Init cyclecounter and timecounter. This is done only in the first
15351          * load. If done in every load, PTP application will fail when doing
15352          * unload / load (e.g. MTU change) while it is running.
15353          */
15354         if (!bp->timecounter_init_done) {
15355                 bnx2x_init_cyclecounter(bp);
15356                 timecounter_init(&bp->timecounter, &bp->cyclecounter,
15357                                  ktime_to_ns(ktime_get_real()));
15358                 bp->timecounter_init_done = 1;
15359         }
15360
15361         DP(BNX2X_MSG_PTP, "PTP initialization ended successfully\n");
15362 }