Linux-libre 3.2.72-gnu1
[librecmc/linux-libre.git] / drivers / scsi / bfa / bfad.c
1 /*
2  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3  * All rights reserved
4  * www.brocade.com
5  *
6  * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License (GPL) Version 2 as
10  * published by the Free Software Foundation
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  */
17
18 /*
19  *  bfad.c Linux driver PCI interface module.
20  */
21 #include <linux/module.h>
22 #include <linux/kthread.h>
23 #include <linux/errno.h>
24 #include <linux/sched.h>
25 #include <linux/init.h>
26 #include <linux/fs.h>
27 #include <linux/pci.h>
28 #include <linux/firmware.h>
29 #include <asm/uaccess.h>
30 #include <asm/fcntl.h>
31
32 #include "bfad_drv.h"
33 #include "bfad_im.h"
34 #include "bfa_fcs.h"
35 #include "bfa_defs.h"
36 #include "bfa.h"
37
38 BFA_TRC_FILE(LDRV, BFAD);
39 DEFINE_MUTEX(bfad_mutex);
40 LIST_HEAD(bfad_list);
41
42 static int      bfad_inst;
43 static int      num_sgpgs_parm;
44 int             supported_fc4s;
45 char            *host_name, *os_name, *os_patch;
46 int             num_rports, num_ios, num_tms;
47 int             num_fcxps, num_ufbufs;
48 int             reqq_size, rspq_size, num_sgpgs;
49 int             rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT;
50 int             bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH;
51 int             bfa_io_max_sge = BFAD_IO_MAX_SGE;
52 int             bfa_log_level = 3; /* WARNING log level */
53 int             ioc_auto_recover = BFA_TRUE;
54 int             bfa_linkup_delay = -1;
55 int             fdmi_enable = BFA_TRUE;
56 int             pcie_max_read_reqsz;
57 int             bfa_debugfs_enable = 1;
58 int             msix_disable_cb = 0, msix_disable_ct = 0;
59 int             max_xfer_size = BFAD_MAX_SECTORS >> 1;
60
61 /* Firmware releated */
62 u32     bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;
63 u32     *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2;
64
65 #define BFAD_FW_FILE_CB         "/*(DEBLOBBED)*/"
66 #define BFAD_FW_FILE_CT         "/*(DEBLOBBED)*/"
67 #define BFAD_FW_FILE_CT2        "/*(DEBLOBBED)*/"
68
69 static u32 *bfad_load_fwimg(struct pci_dev *pdev);
70 static void bfad_free_fwimg(void);
71 static void bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
72                 u32 *bfi_image_size, char *fw_name);
73
74 static const char *msix_name_ct[] = {
75         "ctrl",
76         "cpe0", "cpe1", "cpe2", "cpe3",
77         "rme0", "rme1", "rme2", "rme3" };
78
79 static const char *msix_name_cb[] = {
80         "cpe0", "cpe1", "cpe2", "cpe3",
81         "rme0", "rme1", "rme2", "rme3",
82         "eemc", "elpu0", "elpu1", "epss", "mlpu" };
83
84 /*(DEBLOBBED)*/
85
86 module_param(os_name, charp, S_IRUGO | S_IWUSR);
87 MODULE_PARM_DESC(os_name, "OS name of the hba host machine");
88 module_param(os_patch, charp, S_IRUGO | S_IWUSR);
89 MODULE_PARM_DESC(os_patch, "OS patch level of the hba host machine");
90 module_param(host_name, charp, S_IRUGO | S_IWUSR);
91 MODULE_PARM_DESC(host_name, "Hostname of the hba host machine");
92 module_param(num_rports, int, S_IRUGO | S_IWUSR);
93 MODULE_PARM_DESC(num_rports, "Max number of rports supported per port "
94                                 "(physical/logical), default=1024");
95 module_param(num_ios, int, S_IRUGO | S_IWUSR);
96 MODULE_PARM_DESC(num_ios, "Max number of ioim requests, default=2000");
97 module_param(num_tms, int, S_IRUGO | S_IWUSR);
98 MODULE_PARM_DESC(num_tms, "Max number of task im requests, default=128");
99 module_param(num_fcxps, int, S_IRUGO | S_IWUSR);
100 MODULE_PARM_DESC(num_fcxps, "Max number of fcxp requests, default=64");
101 module_param(num_ufbufs, int, S_IRUGO | S_IWUSR);
102 MODULE_PARM_DESC(num_ufbufs, "Max number of unsolicited frame "
103                                 "buffers, default=64");
104 module_param(reqq_size, int, S_IRUGO | S_IWUSR);
105 MODULE_PARM_DESC(reqq_size, "Max number of request queue elements, "
106                                 "default=256");
107 module_param(rspq_size, int, S_IRUGO | S_IWUSR);
108 MODULE_PARM_DESC(rspq_size, "Max number of response queue elements, "
109                                 "default=64");
110 module_param(num_sgpgs, int, S_IRUGO | S_IWUSR);
111 MODULE_PARM_DESC(num_sgpgs, "Number of scatter/gather pages, default=2048");
112 module_param(rport_del_timeout, int, S_IRUGO | S_IWUSR);
113 MODULE_PARM_DESC(rport_del_timeout, "Rport delete timeout, default=90 secs, "
114                                         "Range[>0]");
115 module_param(bfa_lun_queue_depth, int, S_IRUGO | S_IWUSR);
116 MODULE_PARM_DESC(bfa_lun_queue_depth, "Lun queue depth, default=32, Range[>0]");
117 module_param(bfa_io_max_sge, int, S_IRUGO | S_IWUSR);
118 MODULE_PARM_DESC(bfa_io_max_sge, "Max io scatter/gather elements, default=255");
119 module_param(bfa_log_level, int, S_IRUGO | S_IWUSR);
120 MODULE_PARM_DESC(bfa_log_level, "Driver log level, default=3, "
121                                 "Range[Critical:1|Error:2|Warning:3|Info:4]");
122 module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR);
123 MODULE_PARM_DESC(ioc_auto_recover, "IOC auto recovery, default=1, "
124                                 "Range[off:0|on:1]");
125 module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR);
126 MODULE_PARM_DESC(bfa_linkup_delay, "Link up delay, default=30 secs for "
127                         "boot port. Otherwise 10 secs in RHEL4 & 0 for "
128                         "[RHEL5, SLES10, ESX40] Range[>0]");
129 module_param(msix_disable_cb, int, S_IRUGO | S_IWUSR);
130 MODULE_PARM_DESC(msix_disable_cb, "Disable Message Signaled Interrupts "
131                         "for Brocade-415/425/815/825 cards, default=0, "
132                         " Range[false:0|true:1]");
133 module_param(msix_disable_ct, int, S_IRUGO | S_IWUSR);
134 MODULE_PARM_DESC(msix_disable_ct, "Disable Message Signaled Interrupts "
135                         "if possible for Brocade-1010/1020/804/1007/902/1741 "
136                         "cards, default=0, Range[false:0|true:1]");
137 module_param(fdmi_enable, int, S_IRUGO | S_IWUSR);
138 MODULE_PARM_DESC(fdmi_enable, "Enables fdmi registration, default=1, "
139                                 "Range[false:0|true:1]");
140 module_param(pcie_max_read_reqsz, int, S_IRUGO | S_IWUSR);
141 MODULE_PARM_DESC(pcie_max_read_reqsz, "PCIe max read request size, default=0 "
142                 "(use system setting), Range[128|256|512|1024|2048|4096]");
143 module_param(bfa_debugfs_enable, int, S_IRUGO | S_IWUSR);
144 MODULE_PARM_DESC(bfa_debugfs_enable, "Enables debugfs feature, default=1,"
145                 " Range[false:0|true:1]");
146 module_param(max_xfer_size, int, S_IRUGO | S_IWUSR);
147 MODULE_PARM_DESC(max_xfer_size, "default=32MB,"
148                 " Range[64k|128k|256k|512k|1024k|2048k]");
149
150 static void
151 bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event);
152 static void
153 bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event);
154 static void
155 bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event);
156 static void
157 bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event);
158 static void
159 bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event);
160 static void
161 bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event);
162 static void
163 bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event);
164
165 /*
166  * Beginning state for the driver instance, awaiting the pci_probe event
167  */
168 static void
169 bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event)
170 {
171         bfa_trc(bfad, event);
172
173         switch (event) {
174         case BFAD_E_CREATE:
175                 bfa_sm_set_state(bfad, bfad_sm_created);
176                 bfad->bfad_tsk = kthread_create(bfad_worker, (void *) bfad,
177                                                 "%s", "bfad_worker");
178                 if (IS_ERR(bfad->bfad_tsk)) {
179                         printk(KERN_INFO "bfad[%d]: Kernel thread "
180                                 "creation failed!\n", bfad->inst_no);
181                         bfa_sm_send_event(bfad, BFAD_E_KTHREAD_CREATE_FAILED);
182                 }
183                 bfa_sm_send_event(bfad, BFAD_E_INIT);
184                 break;
185
186         case BFAD_E_STOP:
187                 /* Ignore stop; already in uninit */
188                 break;
189
190         default:
191                 bfa_sm_fault(bfad, event);
192         }
193 }
194
195 /*
196  * Driver Instance is created, awaiting event INIT to initialize the bfad
197  */
198 static void
199 bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event)
200 {
201         unsigned long flags;
202
203         bfa_trc(bfad, event);
204
205         switch (event) {
206         case BFAD_E_INIT:
207                 bfa_sm_set_state(bfad, bfad_sm_initializing);
208
209                 init_completion(&bfad->comp);
210
211                 /* Enable Interrupt and wait bfa_init completion */
212                 if (bfad_setup_intr(bfad)) {
213                         printk(KERN_WARNING "bfad%d: bfad_setup_intr failed\n",
214                                         bfad->inst_no);
215                         bfa_sm_send_event(bfad, BFAD_E_INTR_INIT_FAILED);
216                         break;
217                 }
218
219                 spin_lock_irqsave(&bfad->bfad_lock, flags);
220                 bfa_iocfc_init(&bfad->bfa);
221                 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
222
223                 /* Set up interrupt handler for each vectors */
224                 if ((bfad->bfad_flags & BFAD_MSIX_ON) &&
225                         bfad_install_msix_handler(bfad)) {
226                         printk(KERN_WARNING "%s: install_msix failed, bfad%d\n",
227                                 __func__, bfad->inst_no);
228                 }
229
230                 bfad_init_timer(bfad);
231
232                 wait_for_completion(&bfad->comp);
233
234                 if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) {
235                         bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS);
236                 } else {
237                         printk(KERN_WARNING
238                                 "bfa %s: bfa init failed\n",
239                                 bfad->pci_name);
240                         bfad->bfad_flags |= BFAD_HAL_INIT_FAIL;
241                         bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED);
242                 }
243
244                 break;
245
246         case BFAD_E_KTHREAD_CREATE_FAILED:
247                 bfa_sm_set_state(bfad, bfad_sm_uninit);
248                 break;
249
250         default:
251                 bfa_sm_fault(bfad, event);
252         }
253 }
254
255 static void
256 bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event)
257 {
258         int     retval;
259         unsigned long   flags;
260
261         bfa_trc(bfad, event);
262
263         switch (event) {
264         case BFAD_E_INIT_SUCCESS:
265                 kthread_stop(bfad->bfad_tsk);
266                 spin_lock_irqsave(&bfad->bfad_lock, flags);
267                 bfad->bfad_tsk = NULL;
268                 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
269
270                 retval = bfad_start_ops(bfad);
271                 if (retval != BFA_STATUS_OK)
272                         break;
273                 bfa_sm_set_state(bfad, bfad_sm_operational);
274                 break;
275
276         case BFAD_E_INTR_INIT_FAILED:
277                 bfa_sm_set_state(bfad, bfad_sm_uninit);
278                 kthread_stop(bfad->bfad_tsk);
279                 spin_lock_irqsave(&bfad->bfad_lock, flags);
280                 bfad->bfad_tsk = NULL;
281                 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
282                 break;
283
284         case BFAD_E_INIT_FAILED:
285                 bfa_sm_set_state(bfad, bfad_sm_failed);
286                 break;
287         default:
288                 bfa_sm_fault(bfad, event);
289         }
290 }
291
292 static void
293 bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event)
294 {
295         int     retval;
296
297         bfa_trc(bfad, event);
298
299         switch (event) {
300         case BFAD_E_INIT_SUCCESS:
301                 retval = bfad_start_ops(bfad);
302                 if (retval != BFA_STATUS_OK)
303                         break;
304                 bfa_sm_set_state(bfad, bfad_sm_operational);
305                 break;
306
307         case BFAD_E_STOP:
308                 if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE)
309                         bfad_uncfg_pport(bfad);
310                 if (bfad->bfad_flags & BFAD_FC4_PROBE_DONE) {
311                         bfad_im_probe_undo(bfad);
312                         bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
313                 }
314                 bfad_stop(bfad);
315                 break;
316
317         case BFAD_E_EXIT_COMP:
318                 bfa_sm_set_state(bfad, bfad_sm_uninit);
319                 bfad_remove_intr(bfad);
320                 del_timer_sync(&bfad->hal_tmo);
321                 break;
322
323         default:
324                 bfa_sm_fault(bfad, event);
325         }
326 }
327
328 static void
329 bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event)
330 {
331         bfa_trc(bfad, event);
332
333         switch (event) {
334         case BFAD_E_STOP:
335                 bfa_sm_set_state(bfad, bfad_sm_fcs_exit);
336                 bfad_fcs_stop(bfad);
337                 break;
338
339         default:
340                 bfa_sm_fault(bfad, event);
341         }
342 }
343
344 static void
345 bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event)
346 {
347         bfa_trc(bfad, event);
348
349         switch (event) {
350         case BFAD_E_FCS_EXIT_COMP:
351                 bfa_sm_set_state(bfad, bfad_sm_stopping);
352                 bfad_stop(bfad);
353                 break;
354
355         default:
356                 bfa_sm_fault(bfad, event);
357         }
358 }
359
360 static void
361 bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event)
362 {
363         bfa_trc(bfad, event);
364
365         switch (event) {
366         case BFAD_E_EXIT_COMP:
367                 bfa_sm_set_state(bfad, bfad_sm_uninit);
368                 bfad_remove_intr(bfad);
369                 del_timer_sync(&bfad->hal_tmo);
370                 bfad_im_probe_undo(bfad);
371                 bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
372                 bfad_uncfg_pport(bfad);
373                 break;
374
375         default:
376                 bfa_sm_fault(bfad, event);
377                 break;
378         }
379 }
380
381 /*
382  *  BFA callbacks
383  */
384 void
385 bfad_hcb_comp(void *arg, bfa_status_t status)
386 {
387         struct bfad_hal_comp *fcomp = (struct bfad_hal_comp *)arg;
388
389         fcomp->status = status;
390         complete(&fcomp->comp);
391 }
392
393 /*
394  * bfa_init callback
395  */
396 void
397 bfa_cb_init(void *drv, bfa_status_t init_status)
398 {
399         struct bfad_s         *bfad = drv;
400
401         if (init_status == BFA_STATUS_OK) {
402                 bfad->bfad_flags |= BFAD_HAL_INIT_DONE;
403
404                 /*
405                  * If BFAD_HAL_INIT_FAIL flag is set:
406                  * Wake up the kernel thread to start
407                  * the bfad operations after HAL init done
408                  */
409                 if ((bfad->bfad_flags & BFAD_HAL_INIT_FAIL)) {
410                         bfad->bfad_flags &= ~BFAD_HAL_INIT_FAIL;
411                         wake_up_process(bfad->bfad_tsk);
412                 }
413         }
414
415         complete(&bfad->comp);
416 }
417
418 /*
419  *  BFA_FCS callbacks
420  */
421 struct bfad_port_s *
422 bfa_fcb_lport_new(struct bfad_s *bfad, struct bfa_fcs_lport_s *port,
423                  enum bfa_lport_role roles, struct bfad_vf_s *vf_drv,
424                  struct bfad_vport_s *vp_drv)
425 {
426         bfa_status_t    rc;
427         struct bfad_port_s    *port_drv;
428
429         if (!vp_drv && !vf_drv) {
430                 port_drv = &bfad->pport;
431                 port_drv->pvb_type = BFAD_PORT_PHYS_BASE;
432         } else if (!vp_drv && vf_drv) {
433                 port_drv = &vf_drv->base_port;
434                 port_drv->pvb_type = BFAD_PORT_VF_BASE;
435         } else if (vp_drv && !vf_drv) {
436                 port_drv = &vp_drv->drv_port;
437                 port_drv->pvb_type = BFAD_PORT_PHYS_VPORT;
438         } else {
439                 port_drv = &vp_drv->drv_port;
440                 port_drv->pvb_type = BFAD_PORT_VF_VPORT;
441         }
442
443         port_drv->fcs_port = port;
444         port_drv->roles = roles;
445
446         if (roles & BFA_LPORT_ROLE_FCP_IM) {
447                 rc = bfad_im_port_new(bfad, port_drv);
448                 if (rc != BFA_STATUS_OK) {
449                         bfad_im_port_delete(bfad, port_drv);
450                         port_drv = NULL;
451                 }
452         }
453
454         return port_drv;
455 }
456
457 void
458 bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles,
459                     struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv)
460 {
461         struct bfad_port_s    *port_drv;
462
463         /* this will be only called from rmmod context */
464         if (vp_drv && !vp_drv->comp_del) {
465                 port_drv = (vp_drv) ? (&(vp_drv)->drv_port) :
466                                 ((vf_drv) ? (&(vf_drv)->base_port) :
467                                 (&(bfad)->pport));
468                 bfa_trc(bfad, roles);
469                 if (roles & BFA_LPORT_ROLE_FCP_IM)
470                         bfad_im_port_delete(bfad, port_drv);
471         }
472 }
473
474 /*
475  * FCS RPORT alloc callback, after successful PLOGI by FCS
476  */
477 bfa_status_t
478 bfa_fcb_rport_alloc(struct bfad_s *bfad, struct bfa_fcs_rport_s **rport,
479                     struct bfad_rport_s **rport_drv)
480 {
481         bfa_status_t    rc = BFA_STATUS_OK;
482
483         *rport_drv = kzalloc(sizeof(struct bfad_rport_s), GFP_ATOMIC);
484         if (*rport_drv == NULL) {
485                 rc = BFA_STATUS_ENOMEM;
486                 goto ext;
487         }
488
489         *rport = &(*rport_drv)->fcs_rport;
490
491 ext:
492         return rc;
493 }
494
495 /*
496  * FCS PBC VPORT Create
497  */
498 void
499 bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport)
500 {
501
502         struct bfa_lport_cfg_s port_cfg = {0};
503         struct bfad_vport_s   *vport;
504         int rc;
505
506         vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL);
507         if (!vport) {
508                 bfa_trc(bfad, 0);
509                 return;
510         }
511
512         vport->drv_port.bfad = bfad;
513         port_cfg.roles = BFA_LPORT_ROLE_FCP_IM;
514         port_cfg.pwwn = pbc_vport.vp_pwwn;
515         port_cfg.nwwn = pbc_vport.vp_nwwn;
516         port_cfg.preboot_vp  = BFA_TRUE;
517
518         rc = bfa_fcs_pbc_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, 0,
519                                   &port_cfg, vport);
520
521         if (rc != BFA_STATUS_OK) {
522                 bfa_trc(bfad, 0);
523                 return;
524         }
525
526         list_add_tail(&vport->list_entry, &bfad->pbc_vport_list);
527 }
528
529 void
530 bfad_hal_mem_release(struct bfad_s *bfad)
531 {
532         struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
533         struct bfa_mem_dma_s *dma_info, *dma_elem;
534         struct bfa_mem_kva_s *kva_info, *kva_elem;
535         struct list_head *dm_qe, *km_qe;
536
537         dma_info = &hal_meminfo->dma_info;
538         kva_info = &hal_meminfo->kva_info;
539
540         /* Iterate through the KVA meminfo queue */
541         list_for_each(km_qe, &kva_info->qe) {
542                 kva_elem = (struct bfa_mem_kva_s *) km_qe;
543                 vfree(kva_elem->kva);
544         }
545
546         /* Iterate through the DMA meminfo queue */
547         list_for_each(dm_qe, &dma_info->qe) {
548                 dma_elem = (struct bfa_mem_dma_s *) dm_qe;
549                 dma_free_coherent(&bfad->pcidev->dev,
550                                 dma_elem->mem_len, dma_elem->kva,
551                                 (dma_addr_t) dma_elem->dma);
552         }
553
554         memset(hal_meminfo, 0, sizeof(struct bfa_meminfo_s));
555 }
556
557 void
558 bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg)
559 {
560         if (num_rports > 0)
561                 bfa_cfg->fwcfg.num_rports = num_rports;
562         if (num_ios > 0)
563                 bfa_cfg->fwcfg.num_ioim_reqs = num_ios;
564         if (num_tms > 0)
565                 bfa_cfg->fwcfg.num_tskim_reqs = num_tms;
566         if (num_fcxps > 0 && num_fcxps <= BFA_FCXP_MAX)
567                 bfa_cfg->fwcfg.num_fcxp_reqs = num_fcxps;
568         if (num_ufbufs > 0 && num_ufbufs <= BFA_UF_MAX)
569                 bfa_cfg->fwcfg.num_uf_bufs = num_ufbufs;
570         if (reqq_size > 0)
571                 bfa_cfg->drvcfg.num_reqq_elems = reqq_size;
572         if (rspq_size > 0)
573                 bfa_cfg->drvcfg.num_rspq_elems = rspq_size;
574         if (num_sgpgs > 0 && num_sgpgs <= BFA_SGPG_MAX)
575                 bfa_cfg->drvcfg.num_sgpgs = num_sgpgs;
576
577         /*
578          * populate the hal values back to the driver for sysfs use.
579          * otherwise, the default values will be shown as 0 in sysfs
580          */
581         num_rports = bfa_cfg->fwcfg.num_rports;
582         num_ios = bfa_cfg->fwcfg.num_ioim_reqs;
583         num_tms = bfa_cfg->fwcfg.num_tskim_reqs;
584         num_fcxps = bfa_cfg->fwcfg.num_fcxp_reqs;
585         num_ufbufs = bfa_cfg->fwcfg.num_uf_bufs;
586         reqq_size = bfa_cfg->drvcfg.num_reqq_elems;
587         rspq_size = bfa_cfg->drvcfg.num_rspq_elems;
588         num_sgpgs = bfa_cfg->drvcfg.num_sgpgs;
589 }
590
591 bfa_status_t
592 bfad_hal_mem_alloc(struct bfad_s *bfad)
593 {
594         struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
595         struct bfa_mem_dma_s *dma_info, *dma_elem;
596         struct bfa_mem_kva_s *kva_info, *kva_elem;
597         struct list_head *dm_qe, *km_qe;
598         bfa_status_t    rc = BFA_STATUS_OK;
599         dma_addr_t      phys_addr;
600
601         bfa_cfg_get_default(&bfad->ioc_cfg);
602         bfad_update_hal_cfg(&bfad->ioc_cfg);
603         bfad->cfg_data.ioc_queue_depth = bfad->ioc_cfg.fwcfg.num_ioim_reqs;
604         bfa_cfg_get_meminfo(&bfad->ioc_cfg, hal_meminfo, &bfad->bfa);
605
606         dma_info = &hal_meminfo->dma_info;
607         kva_info = &hal_meminfo->kva_info;
608
609         /* Iterate through the KVA meminfo queue */
610         list_for_each(km_qe, &kva_info->qe) {
611                 kva_elem = (struct bfa_mem_kva_s *) km_qe;
612                 kva_elem->kva = vmalloc(kva_elem->mem_len);
613                 if (kva_elem->kva == NULL) {
614                         bfad_hal_mem_release(bfad);
615                         rc = BFA_STATUS_ENOMEM;
616                         goto ext;
617                 }
618                 memset(kva_elem->kva, 0, kva_elem->mem_len);
619         }
620
621         /* Iterate through the DMA meminfo queue */
622         list_for_each(dm_qe, &dma_info->qe) {
623                 dma_elem = (struct bfa_mem_dma_s *) dm_qe;
624                 dma_elem->kva = dma_alloc_coherent(&bfad->pcidev->dev,
625                                                 dma_elem->mem_len,
626                                                 &phys_addr, GFP_KERNEL);
627                 if (dma_elem->kva == NULL) {
628                         bfad_hal_mem_release(bfad);
629                         rc = BFA_STATUS_ENOMEM;
630                         goto ext;
631                 }
632                 dma_elem->dma = phys_addr;
633                 memset(dma_elem->kva, 0, dma_elem->mem_len);
634         }
635 ext:
636         return rc;
637 }
638
639 /*
640  * Create a vport under a vf.
641  */
642 bfa_status_t
643 bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
644                   struct bfa_lport_cfg_s *port_cfg, struct device *dev)
645 {
646         struct bfad_vport_s   *vport;
647         int             rc = BFA_STATUS_OK;
648         unsigned long   flags;
649         struct completion fcomp;
650
651         vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL);
652         if (!vport) {
653                 rc = BFA_STATUS_ENOMEM;
654                 goto ext;
655         }
656
657         vport->drv_port.bfad = bfad;
658         spin_lock_irqsave(&bfad->bfad_lock, flags);
659         rc = bfa_fcs_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, vf_id,
660                                   port_cfg, vport);
661         spin_unlock_irqrestore(&bfad->bfad_lock, flags);
662
663         if (rc != BFA_STATUS_OK)
664                 goto ext_free_vport;
665
666         if (port_cfg->roles & BFA_LPORT_ROLE_FCP_IM) {
667                 rc = bfad_im_scsi_host_alloc(bfad, vport->drv_port.im_port,
668                                                         dev);
669                 if (rc != BFA_STATUS_OK)
670                         goto ext_free_fcs_vport;
671         }
672
673         spin_lock_irqsave(&bfad->bfad_lock, flags);
674         bfa_fcs_vport_start(&vport->fcs_vport);
675         spin_unlock_irqrestore(&bfad->bfad_lock, flags);
676
677         return BFA_STATUS_OK;
678
679 ext_free_fcs_vport:
680         spin_lock_irqsave(&bfad->bfad_lock, flags);
681         vport->comp_del = &fcomp;
682         init_completion(vport->comp_del);
683         bfa_fcs_vport_delete(&vport->fcs_vport);
684         spin_unlock_irqrestore(&bfad->bfad_lock, flags);
685         wait_for_completion(vport->comp_del);
686 ext_free_vport:
687         kfree(vport);
688 ext:
689         return rc;
690 }
691
692 void
693 bfad_bfa_tmo(unsigned long data)
694 {
695         struct bfad_s         *bfad = (struct bfad_s *) data;
696         unsigned long   flags;
697         struct list_head               doneq;
698
699         spin_lock_irqsave(&bfad->bfad_lock, flags);
700
701         bfa_timer_beat(&bfad->bfa.timer_mod);
702
703         bfa_comp_deq(&bfad->bfa, &doneq);
704         spin_unlock_irqrestore(&bfad->bfad_lock, flags);
705
706         if (!list_empty(&doneq)) {
707                 bfa_comp_process(&bfad->bfa, &doneq);
708                 spin_lock_irqsave(&bfad->bfad_lock, flags);
709                 bfa_comp_free(&bfad->bfa, &doneq);
710                 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
711         }
712
713         mod_timer(&bfad->hal_tmo,
714                   jiffies + msecs_to_jiffies(BFA_TIMER_FREQ));
715 }
716
717 void
718 bfad_init_timer(struct bfad_s *bfad)
719 {
720         init_timer(&bfad->hal_tmo);
721         bfad->hal_tmo.function = bfad_bfa_tmo;
722         bfad->hal_tmo.data = (unsigned long)bfad;
723
724         mod_timer(&bfad->hal_tmo,
725                   jiffies + msecs_to_jiffies(BFA_TIMER_FREQ));
726 }
727
728 int
729 bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
730 {
731         int             rc = -ENODEV;
732
733         if (pci_enable_device(pdev)) {
734                 printk(KERN_ERR "pci_enable_device fail %p\n", pdev);
735                 goto out;
736         }
737
738         if (pci_request_regions(pdev, BFAD_DRIVER_NAME))
739                 goto out_disable_device;
740
741         pci_set_master(pdev);
742
743
744         if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) ||
745             (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)) {
746                 if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
747                    (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
748                         printk(KERN_ERR "pci_set_dma_mask fail %p\n", pdev);
749                         goto out_release_region;
750                 }
751         }
752
753         bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
754         bfad->pci_bar2_kva = pci_iomap(pdev, 2, pci_resource_len(pdev, 2));
755
756         if (bfad->pci_bar0_kva == NULL) {
757                 printk(KERN_ERR "Fail to map bar0\n");
758                 goto out_release_region;
759         }
760
761         bfad->hal_pcidev.pci_slot = PCI_SLOT(pdev->devfn);
762         bfad->hal_pcidev.pci_func = PCI_FUNC(pdev->devfn);
763         bfad->hal_pcidev.pci_bar_kva = bfad->pci_bar0_kva;
764         bfad->hal_pcidev.device_id = pdev->device;
765         bfad->hal_pcidev.ssid = pdev->subsystem_device;
766         bfad->pci_name = pci_name(pdev);
767
768         bfad->pci_attr.vendor_id = pdev->vendor;
769         bfad->pci_attr.device_id = pdev->device;
770         bfad->pci_attr.ssid = pdev->subsystem_device;
771         bfad->pci_attr.ssvid = pdev->subsystem_vendor;
772         bfad->pci_attr.pcifn = PCI_FUNC(pdev->devfn);
773
774         bfad->pcidev = pdev;
775
776         /* Adjust PCIe Maximum Read Request Size */
777         if (pcie_max_read_reqsz > 0) {
778                 int pcie_cap_reg;
779                 u16 pcie_dev_ctl;
780                 u16 mask = 0xffff;
781
782                 switch (pcie_max_read_reqsz) {
783                 case 128:
784                         mask = 0x0;
785                         break;
786                 case 256:
787                         mask = 0x1000;
788                         break;
789                 case 512:
790                         mask = 0x2000;
791                         break;
792                 case 1024:
793                         mask = 0x3000;
794                         break;
795                 case 2048:
796                         mask = 0x4000;
797                         break;
798                 case 4096:
799                         mask = 0x5000;
800                         break;
801                 default:
802                         break;
803                 }
804
805                 pcie_cap_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
806                 if (mask != 0xffff && pcie_cap_reg) {
807                         pcie_cap_reg += 0x08;
808                         pci_read_config_word(pdev, pcie_cap_reg, &pcie_dev_ctl);
809                         if ((pcie_dev_ctl & 0x7000) != mask) {
810                                 printk(KERN_WARNING "BFA[%s]: "
811                                 "pcie_max_read_request_size is %d, "
812                                 "reset to %d\n", bfad->pci_name,
813                                 (1 << ((pcie_dev_ctl & 0x7000) >> 12)) << 7,
814                                 pcie_max_read_reqsz);
815
816                                 pcie_dev_ctl &= ~0x7000;
817                                 pci_write_config_word(pdev, pcie_cap_reg,
818                                                 pcie_dev_ctl | mask);
819                         }
820                 }
821         }
822
823         return 0;
824
825 out_release_region:
826         pci_release_regions(pdev);
827 out_disable_device:
828         pci_disable_device(pdev);
829 out:
830         return rc;
831 }
832
833 void
834 bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad)
835 {
836         pci_iounmap(pdev, bfad->pci_bar0_kva);
837         pci_iounmap(pdev, bfad->pci_bar2_kva);
838         pci_release_regions(pdev);
839         pci_disable_device(pdev);
840         pci_set_drvdata(pdev, NULL);
841 }
842
843 bfa_status_t
844 bfad_drv_init(struct bfad_s *bfad)
845 {
846         bfa_status_t    rc;
847         unsigned long   flags;
848
849         bfad->cfg_data.rport_del_timeout = rport_del_timeout;
850         bfad->cfg_data.lun_queue_depth = bfa_lun_queue_depth;
851         bfad->cfg_data.io_max_sge = bfa_io_max_sge;
852         bfad->cfg_data.binding_method = FCP_PWWN_BINDING;
853
854         rc = bfad_hal_mem_alloc(bfad);
855         if (rc != BFA_STATUS_OK) {
856                 printk(KERN_WARNING "bfad%d bfad_hal_mem_alloc failure\n",
857                        bfad->inst_no);
858                 printk(KERN_WARNING
859                         "Not enough memory to attach all Brocade HBA ports, %s",
860                         "System may need more memory.\n");
861                 goto out_hal_mem_alloc_failure;
862         }
863
864         bfad->bfa.trcmod = bfad->trcmod;
865         bfad->bfa.plog = &bfad->plog_buf;
866         bfa_plog_init(&bfad->plog_buf);
867         bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START,
868                      0, "Driver Attach");
869
870         bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg, &bfad->meminfo,
871                    &bfad->hal_pcidev);
872
873         /* FCS INIT */
874         spin_lock_irqsave(&bfad->bfad_lock, flags);
875         bfad->bfa_fcs.trcmod = bfad->trcmod;
876         bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE);
877         bfad->bfa_fcs.fdmi_enabled = fdmi_enable;
878         bfa_fcs_init(&bfad->bfa_fcs);
879         spin_unlock_irqrestore(&bfad->bfad_lock, flags);
880
881         bfad->bfad_flags |= BFAD_DRV_INIT_DONE;
882
883         /* configure base port */
884         rc = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM);
885         if (rc != BFA_STATUS_OK)
886                 goto out_cfg_pport_fail;
887
888         return BFA_STATUS_OK;
889
890 out_cfg_pport_fail:
891         /* fcs exit - on cfg pport failure */
892         spin_lock_irqsave(&bfad->bfad_lock, flags);
893         init_completion(&bfad->comp);
894         bfad->pport.flags |= BFAD_PORT_DELETE;
895         bfa_fcs_exit(&bfad->bfa_fcs);
896         spin_unlock_irqrestore(&bfad->bfad_lock, flags);
897         wait_for_completion(&bfad->comp);
898         /* bfa detach - free hal memory */
899         bfa_detach(&bfad->bfa);
900         bfad_hal_mem_release(bfad);
901 out_hal_mem_alloc_failure:
902         return BFA_STATUS_FAILED;
903 }
904
905 void
906 bfad_drv_uninit(struct bfad_s *bfad)
907 {
908         unsigned long   flags;
909
910         spin_lock_irqsave(&bfad->bfad_lock, flags);
911         init_completion(&bfad->comp);
912         bfa_iocfc_stop(&bfad->bfa);
913         spin_unlock_irqrestore(&bfad->bfad_lock, flags);
914         wait_for_completion(&bfad->comp);
915
916         del_timer_sync(&bfad->hal_tmo);
917         bfa_isr_disable(&bfad->bfa);
918         bfa_detach(&bfad->bfa);
919         bfad_remove_intr(bfad);
920         bfad_hal_mem_release(bfad);
921
922         bfad->bfad_flags &= ~BFAD_DRV_INIT_DONE;
923 }
924
925 void
926 bfad_drv_start(struct bfad_s *bfad)
927 {
928         unsigned long   flags;
929
930         spin_lock_irqsave(&bfad->bfad_lock, flags);
931         bfa_iocfc_start(&bfad->bfa);
932         bfa_fcs_pbc_vport_init(&bfad->bfa_fcs);
933         bfa_fcs_fabric_modstart(&bfad->bfa_fcs);
934         bfad->bfad_flags |= BFAD_HAL_START_DONE;
935         spin_unlock_irqrestore(&bfad->bfad_lock, flags);
936
937         if (bfad->im)
938                 flush_workqueue(bfad->im->drv_workq);
939 }
940
941 void
942 bfad_fcs_stop(struct bfad_s *bfad)
943 {
944         unsigned long   flags;
945
946         spin_lock_irqsave(&bfad->bfad_lock, flags);
947         init_completion(&bfad->comp);
948         bfad->pport.flags |= BFAD_PORT_DELETE;
949         bfa_fcs_exit(&bfad->bfa_fcs);
950         spin_unlock_irqrestore(&bfad->bfad_lock, flags);
951         wait_for_completion(&bfad->comp);
952
953         bfa_sm_send_event(bfad, BFAD_E_FCS_EXIT_COMP);
954 }
955
956 void
957 bfad_stop(struct bfad_s *bfad)
958 {
959         unsigned long   flags;
960
961         spin_lock_irqsave(&bfad->bfad_lock, flags);
962         init_completion(&bfad->comp);
963         bfa_iocfc_stop(&bfad->bfa);
964         bfad->bfad_flags &= ~BFAD_HAL_START_DONE;
965         spin_unlock_irqrestore(&bfad->bfad_lock, flags);
966         wait_for_completion(&bfad->comp);
967
968         bfa_sm_send_event(bfad, BFAD_E_EXIT_COMP);
969 }
970
971 bfa_status_t
972 bfad_cfg_pport(struct bfad_s *bfad, enum bfa_lport_role role)
973 {
974         int             rc = BFA_STATUS_OK;
975
976         /* Allocate scsi_host for the physical port */
977         if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) &&
978             (role & BFA_LPORT_ROLE_FCP_IM)) {
979                 if (bfad->pport.im_port == NULL) {
980                         rc = BFA_STATUS_FAILED;
981                         goto out;
982                 }
983
984                 rc = bfad_im_scsi_host_alloc(bfad, bfad->pport.im_port,
985                                                 &bfad->pcidev->dev);
986                 if (rc != BFA_STATUS_OK)
987                         goto out;
988
989                 bfad->pport.roles |= BFA_LPORT_ROLE_FCP_IM;
990         }
991
992         bfad->bfad_flags |= BFAD_CFG_PPORT_DONE;
993
994 out:
995         return rc;
996 }
997
998 void
999 bfad_uncfg_pport(struct bfad_s *bfad)
1000 {
1001         if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) &&
1002             (bfad->pport.roles & BFA_LPORT_ROLE_FCP_IM)) {
1003                 bfad_im_scsi_host_free(bfad, bfad->pport.im_port);
1004                 bfad_im_port_clean(bfad->pport.im_port);
1005                 kfree(bfad->pport.im_port);
1006                 bfad->pport.roles &= ~BFA_LPORT_ROLE_FCP_IM;
1007         }
1008
1009         bfad->bfad_flags &= ~BFAD_CFG_PPORT_DONE;
1010 }
1011
1012 bfa_status_t
1013 bfad_start_ops(struct bfad_s *bfad) {
1014
1015         int     retval;
1016         unsigned long   flags;
1017         struct bfad_vport_s *vport, *vport_new;
1018         struct bfa_fcs_driver_info_s driver_info;
1019
1020         /* Limit min/max. xfer size to [64k-32MB] */
1021         if (max_xfer_size < BFAD_MIN_SECTORS >> 1)
1022                 max_xfer_size = BFAD_MIN_SECTORS >> 1;
1023         if (max_xfer_size > BFAD_MAX_SECTORS >> 1)
1024                 max_xfer_size = BFAD_MAX_SECTORS >> 1;
1025
1026         /* Fill the driver_info info to fcs*/
1027         memset(&driver_info, 0, sizeof(driver_info));
1028         strncpy(driver_info.version, BFAD_DRIVER_VERSION,
1029                 sizeof(driver_info.version) - 1);
1030         if (host_name)
1031                 strncpy(driver_info.host_machine_name, host_name,
1032                         sizeof(driver_info.host_machine_name) - 1);
1033         if (os_name)
1034                 strncpy(driver_info.host_os_name, os_name,
1035                         sizeof(driver_info.host_os_name) - 1);
1036         if (os_patch)
1037                 strncpy(driver_info.host_os_patch, os_patch,
1038                         sizeof(driver_info.host_os_patch) - 1);
1039
1040         strncpy(driver_info.os_device_name, bfad->pci_name,
1041                 sizeof(driver_info.os_device_name - 1));
1042
1043         /* FCS driver info init */
1044         spin_lock_irqsave(&bfad->bfad_lock, flags);
1045         bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info);
1046         spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1047
1048         /*
1049          * FCS update cfg - reset the pwwn/nwwn of fabric base logical port
1050          * with values learned during bfa_init firmware GETATTR REQ.
1051          */
1052         bfa_fcs_update_cfg(&bfad->bfa_fcs);
1053
1054         /* Setup fc host fixed attribute if the lk supports */
1055         bfad_fc_host_init(bfad->pport.im_port);
1056
1057         /* BFAD level FC4 IM specific resource allocation */
1058         retval = bfad_im_probe(bfad);
1059         if (retval != BFA_STATUS_OK) {
1060                 printk(KERN_WARNING "bfad_im_probe failed\n");
1061                 if (bfa_sm_cmp_state(bfad, bfad_sm_initializing))
1062                         bfa_sm_set_state(bfad, bfad_sm_failed);
1063                 bfad_im_probe_undo(bfad);
1064                 bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
1065                 bfad_uncfg_pport(bfad);
1066                 bfad_stop(bfad);
1067                 return BFA_STATUS_FAILED;
1068         } else
1069                 bfad->bfad_flags |= BFAD_FC4_PROBE_DONE;
1070
1071         bfad_drv_start(bfad);
1072
1073         /* Complete pbc vport create */
1074         list_for_each_entry_safe(vport, vport_new, &bfad->pbc_vport_list,
1075                                 list_entry) {
1076                 struct fc_vport_identifiers vid;
1077                 struct fc_vport *fc_vport;
1078                 char pwwn_buf[BFA_STRING_32];
1079
1080                 memset(&vid, 0, sizeof(vid));
1081                 vid.roles = FC_PORT_ROLE_FCP_INITIATOR;
1082                 vid.vport_type = FC_PORTTYPE_NPIV;
1083                 vid.disable = false;
1084                 vid.node_name = wwn_to_u64((u8 *)
1085                                 (&((vport->fcs_vport).lport.port_cfg.nwwn)));
1086                 vid.port_name = wwn_to_u64((u8 *)
1087                                 (&((vport->fcs_vport).lport.port_cfg.pwwn)));
1088                 fc_vport = fc_vport_create(bfad->pport.im_port->shost, 0, &vid);
1089                 if (!fc_vport) {
1090                         wwn2str(pwwn_buf, vid.port_name);
1091                         printk(KERN_WARNING "bfad%d: failed to create pbc vport"
1092                                 " %s\n", bfad->inst_no, pwwn_buf);
1093                 }
1094                 list_del(&vport->list_entry);
1095                 kfree(vport);
1096         }
1097
1098         /*
1099          * If bfa_linkup_delay is set to -1 default; try to retrive the
1100          * value using the bfad_get_linkup_delay(); else use the
1101          * passed in module param value as the bfa_linkup_delay.
1102          */
1103         if (bfa_linkup_delay < 0) {
1104                 bfa_linkup_delay = bfad_get_linkup_delay(bfad);
1105                 bfad_rport_online_wait(bfad);
1106                 bfa_linkup_delay = -1;
1107         } else
1108                 bfad_rport_online_wait(bfad);
1109
1110         BFA_LOG(KERN_INFO, bfad, bfa_log_level, "bfa device claimed\n");
1111
1112         return BFA_STATUS_OK;
1113 }
1114
1115 int
1116 bfad_worker(void *ptr)
1117 {
1118         struct bfad_s *bfad;
1119         unsigned long   flags;
1120
1121         bfad = (struct bfad_s *)ptr;
1122
1123         while (!kthread_should_stop()) {
1124
1125                 /* Send event BFAD_E_INIT_SUCCESS */
1126                 bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS);
1127
1128                 spin_lock_irqsave(&bfad->bfad_lock, flags);
1129                 bfad->bfad_tsk = NULL;
1130                 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1131
1132                 break;
1133         }
1134
1135         return 0;
1136 }
1137
1138 /*
1139  *  BFA driver interrupt functions
1140  */
1141 irqreturn_t
1142 bfad_intx(int irq, void *dev_id)
1143 {
1144         struct bfad_s   *bfad = dev_id;
1145         struct list_head        doneq;
1146         unsigned long   flags;
1147         bfa_boolean_t rc;
1148
1149         spin_lock_irqsave(&bfad->bfad_lock, flags);
1150         rc = bfa_intx(&bfad->bfa);
1151         if (!rc) {
1152                 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1153                 return IRQ_NONE;
1154         }
1155
1156         bfa_comp_deq(&bfad->bfa, &doneq);
1157         spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1158
1159         if (!list_empty(&doneq)) {
1160                 bfa_comp_process(&bfad->bfa, &doneq);
1161
1162                 spin_lock_irqsave(&bfad->bfad_lock, flags);
1163                 bfa_comp_free(&bfad->bfa, &doneq);
1164                 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1165         }
1166
1167         return IRQ_HANDLED;
1168
1169 }
1170
1171 static irqreturn_t
1172 bfad_msix(int irq, void *dev_id)
1173 {
1174         struct bfad_msix_s *vec = dev_id;
1175         struct bfad_s *bfad = vec->bfad;
1176         struct list_head doneq;
1177         unsigned long   flags;
1178
1179         spin_lock_irqsave(&bfad->bfad_lock, flags);
1180
1181         bfa_msix(&bfad->bfa, vec->msix.entry);
1182         bfa_comp_deq(&bfad->bfa, &doneq);
1183         spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1184
1185         if (!list_empty(&doneq)) {
1186                 bfa_comp_process(&bfad->bfa, &doneq);
1187
1188                 spin_lock_irqsave(&bfad->bfad_lock, flags);
1189                 bfa_comp_free(&bfad->bfa, &doneq);
1190                 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1191         }
1192
1193         return IRQ_HANDLED;
1194 }
1195
1196 /*
1197  * Initialize the MSIX entry table.
1198  */
1199 static void
1200 bfad_init_msix_entry(struct bfad_s *bfad, struct msix_entry *msix_entries,
1201                          int mask, int max_bit)
1202 {
1203         int     i;
1204         int     match = 0x00000001;
1205
1206         for (i = 0, bfad->nvec = 0; i < MAX_MSIX_ENTRY; i++) {
1207                 if (mask & match) {
1208                         bfad->msix_tab[bfad->nvec].msix.entry = i;
1209                         bfad->msix_tab[bfad->nvec].bfad = bfad;
1210                         msix_entries[bfad->nvec].entry = i;
1211                         bfad->nvec++;
1212                 }
1213
1214                 match <<= 1;
1215         }
1216
1217 }
1218
1219 int
1220 bfad_install_msix_handler(struct bfad_s *bfad)
1221 {
1222         int i, error = 0;
1223
1224         for (i = 0; i < bfad->nvec; i++) {
1225                 sprintf(bfad->msix_tab[i].name, "bfa-%s-%s",
1226                                 bfad->pci_name,
1227                                 ((bfa_asic_id_cb(bfad->hal_pcidev.device_id)) ?
1228                                 msix_name_cb[i] : msix_name_ct[i]));
1229
1230                 error = request_irq(bfad->msix_tab[i].msix.vector,
1231                                     (irq_handler_t) bfad_msix, 0,
1232                                     bfad->msix_tab[i].name, &bfad->msix_tab[i]);
1233                 bfa_trc(bfad, i);
1234                 bfa_trc(bfad, bfad->msix_tab[i].msix.vector);
1235                 if (error) {
1236                         int     j;
1237
1238                         for (j = 0; j < i; j++)
1239                                 free_irq(bfad->msix_tab[j].msix.vector,
1240                                                 &bfad->msix_tab[j]);
1241
1242                         bfad->bfad_flags &= ~BFAD_MSIX_ON;
1243                         pci_disable_msix(bfad->pcidev);
1244
1245                         return 1;
1246                 }
1247         }
1248
1249         return 0;
1250 }
1251
1252 /*
1253  * Setup MSIX based interrupt.
1254  */
1255 int
1256 bfad_setup_intr(struct bfad_s *bfad)
1257 {
1258         int error = 0;
1259         u32 mask = 0, i, num_bit = 0, max_bit = 0;
1260         struct msix_entry msix_entries[MAX_MSIX_ENTRY];
1261         struct pci_dev *pdev = bfad->pcidev;
1262         u16     reg;
1263
1264         /* Call BFA to get the msix map for this PCI function.  */
1265         bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit);
1266
1267         /* Set up the msix entry table */
1268         bfad_init_msix_entry(bfad, msix_entries, mask, max_bit);
1269
1270         if ((bfa_asic_id_ctc(pdev->device) && !msix_disable_ct) ||
1271            (bfa_asic_id_cb(pdev->device) && !msix_disable_cb)) {
1272
1273                 error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec);
1274                 if (error) {
1275                         /*
1276                          * Only error number of vector is available.
1277                          * We don't have a mechanism to map multiple
1278                          * interrupts into one vector, so even if we
1279                          * can try to request less vectors, we don't
1280                          * know how to associate interrupt events to
1281                          *  vectors. Linux doesn't duplicate vectors
1282                          * in the MSIX table for this case.
1283                          */
1284
1285                         printk(KERN_WARNING "bfad%d: "
1286                                 "pci_enable_msix failed (%d),"
1287                                 " use line based.\n", bfad->inst_no, error);
1288
1289                         goto line_based;
1290                 }
1291
1292                 /* Disable INTX in MSI-X mode */
1293                 pci_read_config_word(pdev, PCI_COMMAND, &reg);
1294
1295                 if (!(reg & PCI_COMMAND_INTX_DISABLE))
1296                         pci_write_config_word(pdev, PCI_COMMAND,
1297                                 reg | PCI_COMMAND_INTX_DISABLE);
1298
1299                 /* Save the vectors */
1300                 for (i = 0; i < bfad->nvec; i++) {
1301                         bfa_trc(bfad, msix_entries[i].vector);
1302                         bfad->msix_tab[i].msix.vector = msix_entries[i].vector;
1303                 }
1304
1305                 bfa_msix_init(&bfad->bfa, bfad->nvec);
1306
1307                 bfad->bfad_flags |= BFAD_MSIX_ON;
1308
1309                 return error;
1310         }
1311
1312 line_based:
1313         error = 0;
1314         if (request_irq
1315             (bfad->pcidev->irq, (irq_handler_t) bfad_intx, BFAD_IRQ_FLAGS,
1316              BFAD_DRIVER_NAME, bfad) != 0) {
1317                 /* Enable interrupt handler failed */
1318                 return 1;
1319         }
1320         bfad->bfad_flags |= BFAD_INTX_ON;
1321
1322         return error;
1323 }
1324
1325 void
1326 bfad_remove_intr(struct bfad_s *bfad)
1327 {
1328         int     i;
1329
1330         if (bfad->bfad_flags & BFAD_MSIX_ON) {
1331                 for (i = 0; i < bfad->nvec; i++)
1332                         free_irq(bfad->msix_tab[i].msix.vector,
1333                                         &bfad->msix_tab[i]);
1334
1335                 pci_disable_msix(bfad->pcidev);
1336                 bfad->bfad_flags &= ~BFAD_MSIX_ON;
1337         } else if (bfad->bfad_flags & BFAD_INTX_ON) {
1338                 free_irq(bfad->pcidev->irq, bfad);
1339         }
1340 }
1341
1342 /*
1343  * PCI probe entry.
1344  */
1345 int
1346 bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
1347 {
1348         struct bfad_s   *bfad;
1349         int             error = -ENODEV, retval, i;
1350
1351         /* For single port cards - only claim function 0 */
1352         if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) &&
1353                 (PCI_FUNC(pdev->devfn) != 0))
1354                 return -ENODEV;
1355
1356         bfad = kzalloc(sizeof(struct bfad_s), GFP_KERNEL);
1357         if (!bfad) {
1358                 error = -ENOMEM;
1359                 goto out;
1360         }
1361
1362         bfad->trcmod = kzalloc(sizeof(struct bfa_trc_mod_s), GFP_KERNEL);
1363         if (!bfad->trcmod) {
1364                 printk(KERN_WARNING "Error alloc trace buffer!\n");
1365                 error = -ENOMEM;
1366                 goto out_alloc_trace_failure;
1367         }
1368
1369         /* TRACE INIT */
1370         bfa_trc_init(bfad->trcmod);
1371         bfa_trc(bfad, bfad_inst);
1372
1373         /* AEN INIT */
1374         INIT_LIST_HEAD(&bfad->free_aen_q);
1375         INIT_LIST_HEAD(&bfad->active_aen_q);
1376         for (i = 0; i < BFA_AEN_MAX_ENTRY; i++)
1377                 list_add_tail(&bfad->aen_list[i].qe, &bfad->free_aen_q);
1378
1379         if (!(bfad_load_fwimg(pdev))) {
1380                 kfree(bfad->trcmod);
1381                 goto out_alloc_trace_failure;
1382         }
1383
1384         retval = bfad_pci_init(pdev, bfad);
1385         if (retval) {
1386                 printk(KERN_WARNING "bfad_pci_init failure!\n");
1387                 error = retval;
1388                 goto out_pci_init_failure;
1389         }
1390
1391         mutex_lock(&bfad_mutex);
1392         bfad->inst_no = bfad_inst++;
1393         list_add_tail(&bfad->list_entry, &bfad_list);
1394         mutex_unlock(&bfad_mutex);
1395
1396         /* Initializing the state machine: State set to uninit */
1397         bfa_sm_set_state(bfad, bfad_sm_uninit);
1398
1399         spin_lock_init(&bfad->bfad_lock);
1400         pci_set_drvdata(pdev, bfad);
1401
1402         bfad->ref_count = 0;
1403         bfad->pport.bfad = bfad;
1404         INIT_LIST_HEAD(&bfad->pbc_vport_list);
1405
1406         /* Setup the debugfs node for this bfad */
1407         if (bfa_debugfs_enable)
1408                 bfad_debugfs_init(&bfad->pport);
1409
1410         retval = bfad_drv_init(bfad);
1411         if (retval != BFA_STATUS_OK)
1412                 goto out_drv_init_failure;
1413
1414         bfa_sm_send_event(bfad, BFAD_E_CREATE);
1415
1416         if (bfa_sm_cmp_state(bfad, bfad_sm_uninit))
1417                 goto out_bfad_sm_failure;
1418
1419         return 0;
1420
1421 out_bfad_sm_failure:
1422         bfa_detach(&bfad->bfa);
1423         bfad_hal_mem_release(bfad);
1424 out_drv_init_failure:
1425         /* Remove the debugfs node for this bfad */
1426         kfree(bfad->regdata);
1427         bfad_debugfs_exit(&bfad->pport);
1428         mutex_lock(&bfad_mutex);
1429         bfad_inst--;
1430         list_del(&bfad->list_entry);
1431         mutex_unlock(&bfad_mutex);
1432         bfad_pci_uninit(pdev, bfad);
1433 out_pci_init_failure:
1434         kfree(bfad->trcmod);
1435 out_alloc_trace_failure:
1436         kfree(bfad);
1437 out:
1438         return error;
1439 }
1440
1441 /*
1442  * PCI remove entry.
1443  */
1444 void
1445 bfad_pci_remove(struct pci_dev *pdev)
1446 {
1447         struct bfad_s         *bfad = pci_get_drvdata(pdev);
1448         unsigned long   flags;
1449
1450         bfa_trc(bfad, bfad->inst_no);
1451
1452         spin_lock_irqsave(&bfad->bfad_lock, flags);
1453         if (bfad->bfad_tsk != NULL) {
1454                 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1455                 kthread_stop(bfad->bfad_tsk);
1456         } else {
1457                 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1458         }
1459
1460         /* Send Event BFAD_E_STOP */
1461         bfa_sm_send_event(bfad, BFAD_E_STOP);
1462
1463         /* Driver detach and dealloc mem */
1464         spin_lock_irqsave(&bfad->bfad_lock, flags);
1465         bfa_detach(&bfad->bfa);
1466         spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1467         bfad_hal_mem_release(bfad);
1468
1469         /* Remove the debugfs node for this bfad */
1470         kfree(bfad->regdata);
1471         bfad_debugfs_exit(&bfad->pport);
1472
1473         /* Cleaning the BFAD instance */
1474         mutex_lock(&bfad_mutex);
1475         bfad_inst--;
1476         list_del(&bfad->list_entry);
1477         mutex_unlock(&bfad_mutex);
1478         bfad_pci_uninit(pdev, bfad);
1479
1480         kfree(bfad->trcmod);
1481         kfree(bfad);
1482 }
1483
1484 struct pci_device_id bfad_id_table[] = {
1485         {
1486                 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1487                 .device = BFA_PCI_DEVICE_ID_FC_8G2P,
1488                 .subvendor = PCI_ANY_ID,
1489                 .subdevice = PCI_ANY_ID,
1490         },
1491         {
1492                 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1493                 .device = BFA_PCI_DEVICE_ID_FC_8G1P,
1494                 .subvendor = PCI_ANY_ID,
1495                 .subdevice = PCI_ANY_ID,
1496         },
1497         {
1498                 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1499                 .device = BFA_PCI_DEVICE_ID_CT,
1500                 .subvendor = PCI_ANY_ID,
1501                 .subdevice = PCI_ANY_ID,
1502                 .class = (PCI_CLASS_SERIAL_FIBER << 8),
1503                 .class_mask = ~0,
1504         },
1505         {
1506                 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1507                 .device = BFA_PCI_DEVICE_ID_CT_FC,
1508                 .subvendor = PCI_ANY_ID,
1509                 .subdevice = PCI_ANY_ID,
1510                 .class = (PCI_CLASS_SERIAL_FIBER << 8),
1511                 .class_mask = ~0,
1512         },
1513         {
1514                 .vendor = BFA_PCI_VENDOR_ID_BROCADE,
1515                 .device = BFA_PCI_DEVICE_ID_CT2,
1516                 .subvendor = PCI_ANY_ID,
1517                 .subdevice = PCI_ANY_ID,
1518                 .class = (PCI_CLASS_SERIAL_FIBER << 8),
1519                 .class_mask = ~0,
1520         },
1521
1522         {0, 0},
1523 };
1524
1525 MODULE_DEVICE_TABLE(pci, bfad_id_table);
1526
1527 static struct pci_driver bfad_pci_driver = {
1528         .name = BFAD_DRIVER_NAME,
1529         .id_table = bfad_id_table,
1530         .probe = bfad_pci_probe,
1531         .remove = __devexit_p(bfad_pci_remove),
1532 };
1533
1534 /*
1535  * Driver module init.
1536  */
1537 static int __init
1538 bfad_init(void)
1539 {
1540         int             error = 0;
1541
1542         printk(KERN_INFO "Brocade BFA FC/FCOE SCSI driver - version: %s\n",
1543                         BFAD_DRIVER_VERSION);
1544
1545         if (num_sgpgs > 0)
1546                 num_sgpgs_parm = num_sgpgs;
1547
1548         error = bfad_im_module_init();
1549         if (error) {
1550                 error = -ENOMEM;
1551                 printk(KERN_WARNING "bfad_im_module_init failure\n");
1552                 goto ext;
1553         }
1554
1555         if (strcmp(FCPI_NAME, " fcpim") == 0)
1556                 supported_fc4s |= BFA_LPORT_ROLE_FCP_IM;
1557
1558         bfa_auto_recover = ioc_auto_recover;
1559         bfa_fcs_rport_set_del_timeout(rport_del_timeout);
1560
1561         error = pci_register_driver(&bfad_pci_driver);
1562         if (error) {
1563                 printk(KERN_WARNING "pci_register_driver failure\n");
1564                 goto ext;
1565         }
1566
1567         return 0;
1568
1569 ext:
1570         bfad_im_module_exit();
1571         return error;
1572 }
1573
1574 /*
1575  * Driver module exit.
1576  */
1577 static void __exit
1578 bfad_exit(void)
1579 {
1580         pci_unregister_driver(&bfad_pci_driver);
1581         bfad_im_module_exit();
1582         bfad_free_fwimg();
1583 }
1584
1585 /* Firmware handling */
1586 static void
1587 bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
1588                 u32 *bfi_image_size, char *fw_name)
1589 {
1590         const struct firmware *fw;
1591
1592         if (reject_firmware(&fw, fw_name, &pdev->dev)) {
1593                 printk(KERN_ALERT "Can't locate firmware %s\n", fw_name);
1594                 *bfi_image = NULL;
1595                 goto out;
1596         }
1597
1598         *bfi_image = vmalloc(fw->size);
1599         if (NULL == *bfi_image) {
1600                 printk(KERN_ALERT "Fail to allocate buffer for fw image "
1601                         "size=%x!\n", (u32) fw->size);
1602                 goto out;
1603         }
1604
1605         memcpy(*bfi_image, fw->data, fw->size);
1606         *bfi_image_size = fw->size/sizeof(u32);
1607 out:
1608         release_firmware(fw);
1609 }
1610
1611 static u32 *
1612 bfad_load_fwimg(struct pci_dev *pdev)
1613 {
1614         if (bfa_asic_id_ct2(pdev->device)) {
1615                 if (bfi_image_ct2_size == 0)
1616                         bfad_read_firmware(pdev, &bfi_image_ct2,
1617                                 &bfi_image_ct2_size, BFAD_FW_FILE_CT2);
1618                 return bfi_image_ct2;
1619         } else if (bfa_asic_id_ct(pdev->device)) {
1620                 if (bfi_image_ct_size == 0)
1621                         bfad_read_firmware(pdev, &bfi_image_ct,
1622                                 &bfi_image_ct_size, BFAD_FW_FILE_CT);
1623                 return bfi_image_ct;
1624         } else if (bfa_asic_id_cb(pdev->device)) {
1625                 if (bfi_image_cb_size == 0)
1626                         bfad_read_firmware(pdev, &bfi_image_cb,
1627                                 &bfi_image_cb_size, BFAD_FW_FILE_CB);
1628                 return bfi_image_cb;
1629         }
1630
1631         return NULL;
1632 }
1633
1634 static void
1635 bfad_free_fwimg(void)
1636 {
1637         if (bfi_image_ct2_size && bfi_image_ct2)
1638                 vfree(bfi_image_ct2);
1639         if (bfi_image_ct_size && bfi_image_ct)
1640                 vfree(bfi_image_ct);
1641         if (bfi_image_cb_size && bfi_image_cb)
1642                 vfree(bfi_image_cb);
1643 }
1644
1645 module_init(bfad_init);
1646 module_exit(bfad_exit);
1647 MODULE_LICENSE("GPL");
1648 MODULE_DESCRIPTION("Brocade Fibre Channel HBA Driver" BFAD_PROTO_NAME);
1649 MODULE_AUTHOR("Brocade Communications Systems, Inc.");
1650 MODULE_VERSION(BFAD_DRIVER_VERSION);