2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/delay.h>
11 #include <linux/gfp.h>
13 static struct mb_cmd_name {
17 {MBC_GET_PORT_DATABASE, "GPDB"},
18 {MBC_GET_ID_LIST, "GIDList"},
19 {MBC_GET_LINK_PRIV_STATS, "Stats"},
20 {MBC_GET_RESOURCE_COUNTS, "ResCnt"},
23 static const char *mb_to_str(uint16_t cmd)
26 struct mb_cmd_name *e;
28 for (i = 0; i < ARRAY_SIZE(mb_str); i++) {
36 static struct rom_cmd {
40 { MBC_EXECUTE_FIRMWARE },
41 { MBC_READ_RAM_WORD },
42 { MBC_MAILBOX_REGISTER_TEST },
43 { MBC_VERIFY_CHECKSUM },
44 { MBC_GET_FIRMWARE_VERSION },
45 { MBC_LOAD_RISC_RAM },
46 { MBC_DUMP_RISC_RAM },
47 { MBC_LOAD_RISC_RAM_EXTENDED },
48 { MBC_DUMP_RISC_RAM_EXTENDED },
49 { MBC_WRITE_RAM_WORD_EXTENDED },
50 { MBC_READ_RAM_EXTENDED },
51 { MBC_GET_RESOURCE_COUNTS },
52 { MBC_SET_FIRMWARE_OPTION },
53 { MBC_MID_INITIALIZE_FIRMWARE },
54 { MBC_GET_FIRMWARE_STATE },
55 { MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
56 { MBC_GET_RETRY_COUNT },
57 { MBC_TRACE_CONTROL },
58 { MBC_INITIALIZE_MULTIQ },
59 { MBC_IOCB_COMMAND_A64 },
60 { MBC_GET_ADAPTER_LOOP_ID },
62 { MBC_GET_RNID_PARAMS },
63 { MBC_GET_SET_ZIO_THRESHOLD },
66 static int is_rom_cmd(uint16_t cmd)
71 for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) {
81 * qla2x00_mailbox_command
82 * Issue mailbox command and waits for completion.
85 * ha = adapter block pointer.
86 * mcp = driver internal mbx struct pointer.
89 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
92 * 0 : QLA_SUCCESS = cmd performed success
93 * 1 : QLA_FUNCTION_FAILED (error encountered)
94 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
100 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
103 unsigned long flags = 0;
105 uint8_t abort_active;
107 uint16_t command = 0;
109 uint16_t __iomem *optr;
112 unsigned long wait_time;
113 struct qla_hw_data *ha = vha->hw;
114 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
118 ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
120 if (ha->pdev->error_state > pci_channel_io_frozen) {
121 ql_log(ql_log_warn, vha, 0x1001,
122 "error_state is greater than pci_channel_io_frozen, "
124 return QLA_FUNCTION_TIMEOUT;
127 if (vha->device_flags & DFLG_DEV_FAILED) {
128 ql_log(ql_log_warn, vha, 0x1002,
129 "Device in failed state, exiting.\n");
130 return QLA_FUNCTION_TIMEOUT;
133 /* if PCI error, then avoid mbx processing.*/
134 if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) &&
135 test_bit(UNLOADING, &base_vha->dpc_flags)) {
136 ql_log(ql_log_warn, vha, 0xd04e,
137 "PCI error, exiting.\n");
138 return QLA_FUNCTION_TIMEOUT;
142 io_lock_on = base_vha->flags.init_done;
145 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
146 chip_reset = ha->chip_reset;
148 if (ha->flags.pci_channel_io_perm_failure) {
149 ql_log(ql_log_warn, vha, 0x1003,
150 "Perm failure on EEH timeout MBX, exiting.\n");
151 return QLA_FUNCTION_TIMEOUT;
154 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
155 /* Setting Link-Down error */
156 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
157 ql_log(ql_log_warn, vha, 0x1004,
158 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
159 return QLA_FUNCTION_TIMEOUT;
162 /* check if ISP abort is active and return cmd with timeout */
163 if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
164 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
165 test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
166 !is_rom_cmd(mcp->mb[0])) {
167 ql_log(ql_log_info, vha, 0x1005,
168 "Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
170 return QLA_FUNCTION_TIMEOUT;
173 atomic_inc(&ha->num_pend_mbx_stage1);
175 * Wait for active mailbox commands to finish by waiting at most tov
176 * seconds. This is to serialize actual issuing of mailbox cmds during
177 * non ISP abort time.
179 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
180 /* Timeout occurred. Return error. */
181 ql_log(ql_log_warn, vha, 0xd035,
182 "Cmd access timeout, cmd=0x%x, Exiting.\n",
184 atomic_dec(&ha->num_pend_mbx_stage1);
185 return QLA_FUNCTION_TIMEOUT;
187 atomic_dec(&ha->num_pend_mbx_stage1);
188 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) {
194 /* Save mailbox command for debug */
197 ql_dbg(ql_dbg_mbx, vha, 0x1006,
198 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
200 spin_lock_irqsave(&ha->hardware_lock, flags);
202 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
203 ha->flags.mbox_busy) {
205 spin_unlock_irqrestore(&ha->hardware_lock, flags);
208 ha->flags.mbox_busy = 1;
210 /* Load mailbox registers. */
212 optr = (uint16_t __iomem *)®->isp82.mailbox_in[0];
213 else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
214 optr = (uint16_t __iomem *)®->isp24.mailbox0;
216 optr = (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 0);
219 command = mcp->mb[0];
220 mboxes = mcp->out_mb;
222 ql_dbg(ql_dbg_mbx, vha, 0x1111,
223 "Mailbox registers (OUT):\n");
224 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
225 if (IS_QLA2200(ha) && cnt == 8)
227 (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 8);
228 if (mboxes & BIT_0) {
229 ql_dbg(ql_dbg_mbx, vha, 0x1112,
230 "mbox[%d]<-0x%04x\n", cnt, *iptr);
231 WRT_REG_WORD(optr, *iptr);
239 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
240 "I/O Address = %p.\n", optr);
242 /* Issue set host interrupt command to send cmd out. */
243 ha->flags.mbox_int = 0;
244 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
246 /* Unlock mbx registers and wait for interrupt */
247 ql_dbg(ql_dbg_mbx, vha, 0x100f,
248 "Going to unlock irq & waiting for interrupts. "
249 "jiffies=%lx.\n", jiffies);
251 /* Wait for mbx cmd completion until timeout */
252 atomic_inc(&ha->num_pend_mbx_stage2);
253 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
254 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
256 if (IS_P3P_TYPE(ha)) {
257 if (RD_REG_DWORD(®->isp82.hint) &
258 HINT_MBX_INT_PENDING) {
259 ha->flags.mbox_busy = 0;
260 spin_unlock_irqrestore(&ha->hardware_lock,
263 atomic_dec(&ha->num_pend_mbx_stage2);
264 ql_dbg(ql_dbg_mbx, vha, 0x1010,
265 "Pending mailbox timeout, exiting.\n");
266 rval = QLA_FUNCTION_TIMEOUT;
269 WRT_REG_DWORD(®->isp82.hint, HINT_MBX_INT_PENDING);
270 } else if (IS_FWI2_CAPABLE(ha))
271 WRT_REG_DWORD(®->isp24.hccr, HCCRX_SET_HOST_INT);
273 WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT);
274 spin_unlock_irqrestore(&ha->hardware_lock, flags);
277 atomic_inc(&ha->num_pend_mbx_stage3);
278 if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
280 if (chip_reset != ha->chip_reset) {
281 spin_lock_irqsave(&ha->hardware_lock, flags);
282 ha->flags.mbox_busy = 0;
283 spin_unlock_irqrestore(&ha->hardware_lock,
285 atomic_dec(&ha->num_pend_mbx_stage2);
286 atomic_dec(&ha->num_pend_mbx_stage3);
290 ql_dbg(ql_dbg_mbx, vha, 0x117a,
291 "cmd=%x Timeout.\n", command);
292 spin_lock_irqsave(&ha->hardware_lock, flags);
293 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
294 spin_unlock_irqrestore(&ha->hardware_lock, flags);
296 } else if (ha->flags.purge_mbox ||
297 chip_reset != ha->chip_reset) {
298 spin_lock_irqsave(&ha->hardware_lock, flags);
299 ha->flags.mbox_busy = 0;
300 spin_unlock_irqrestore(&ha->hardware_lock, flags);
301 atomic_dec(&ha->num_pend_mbx_stage2);
302 atomic_dec(&ha->num_pend_mbx_stage3);
306 atomic_dec(&ha->num_pend_mbx_stage3);
308 if (time_after(jiffies, wait_time + 5 * HZ))
309 ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
310 command, jiffies_to_msecs(jiffies - wait_time));
312 ql_dbg(ql_dbg_mbx, vha, 0x1011,
313 "Cmd=%x Polling Mode.\n", command);
315 if (IS_P3P_TYPE(ha)) {
316 if (RD_REG_DWORD(®->isp82.hint) &
317 HINT_MBX_INT_PENDING) {
318 ha->flags.mbox_busy = 0;
319 spin_unlock_irqrestore(&ha->hardware_lock,
321 atomic_dec(&ha->num_pend_mbx_stage2);
322 ql_dbg(ql_dbg_mbx, vha, 0x1012,
323 "Pending mailbox timeout, exiting.\n");
324 rval = QLA_FUNCTION_TIMEOUT;
327 WRT_REG_DWORD(®->isp82.hint, HINT_MBX_INT_PENDING);
328 } else if (IS_FWI2_CAPABLE(ha))
329 WRT_REG_DWORD(®->isp24.hccr, HCCRX_SET_HOST_INT);
331 WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT);
332 spin_unlock_irqrestore(&ha->hardware_lock, flags);
334 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
335 while (!ha->flags.mbox_int) {
336 if (ha->flags.purge_mbox ||
337 chip_reset != ha->chip_reset) {
338 spin_lock_irqsave(&ha->hardware_lock, flags);
339 ha->flags.mbox_busy = 0;
340 spin_unlock_irqrestore(&ha->hardware_lock,
342 atomic_dec(&ha->num_pend_mbx_stage2);
347 if (time_after(jiffies, wait_time))
351 * Check if it's UNLOADING, cause we cannot poll in
352 * this case, or else a NULL pointer dereference
355 if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)))
356 return QLA_FUNCTION_TIMEOUT;
358 /* Check for pending interrupts. */
359 qla2x00_poll(ha->rsp_q_map[0]);
361 if (!ha->flags.mbox_int &&
363 command == MBC_LOAD_RISC_RAM_EXTENDED))
366 ql_dbg(ql_dbg_mbx, vha, 0x1013,
368 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
370 atomic_dec(&ha->num_pend_mbx_stage2);
372 /* Check whether we timed out */
373 if (ha->flags.mbox_int) {
376 ql_dbg(ql_dbg_mbx, vha, 0x1014,
377 "Cmd=%x completed.\n", command);
379 /* Got interrupt. Clear the flag. */
380 ha->flags.mbox_int = 0;
381 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
383 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
384 spin_lock_irqsave(&ha->hardware_lock, flags);
385 ha->flags.mbox_busy = 0;
386 spin_unlock_irqrestore(&ha->hardware_lock, flags);
388 /* Setting Link-Down error */
389 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
391 rval = QLA_FUNCTION_FAILED;
392 ql_log(ql_log_warn, vha, 0xd048,
393 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
397 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE)
398 rval = QLA_FUNCTION_FAILED;
400 /* Load return mailbox registers. */
402 iptr = (uint16_t *)&ha->mailbox_out[0];
405 ql_dbg(ql_dbg_mbx, vha, 0x1113,
406 "Mailbox registers (IN):\n");
407 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
408 if (mboxes & BIT_0) {
410 ql_dbg(ql_dbg_mbx, vha, 0x1114,
411 "mbox[%d]->0x%04x\n", cnt, *iptr2);
421 uint32_t ictrl, host_status, hccr;
424 if (IS_FWI2_CAPABLE(ha)) {
425 mb[0] = RD_REG_WORD(®->isp24.mailbox0);
426 mb[1] = RD_REG_WORD(®->isp24.mailbox1);
427 mb[2] = RD_REG_WORD(®->isp24.mailbox2);
428 mb[3] = RD_REG_WORD(®->isp24.mailbox3);
429 mb[7] = RD_REG_WORD(®->isp24.mailbox7);
430 ictrl = RD_REG_DWORD(®->isp24.ictrl);
431 host_status = RD_REG_DWORD(®->isp24.host_status);
432 hccr = RD_REG_DWORD(®->isp24.hccr);
434 ql_log(ql_log_warn, vha, 0xd04c,
435 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
436 "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
437 command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
438 mb[7], host_status, hccr);
441 mb[0] = RD_MAILBOX_REG(ha, ®->isp, 0);
442 ictrl = RD_REG_WORD(®->isp.ictrl);
443 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
444 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
445 "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
447 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
449 /* Capture FW dump only, if PCI device active */
450 if (!pci_channel_offline(vha->hw->pdev)) {
451 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
452 if (w == 0xffff || ictrl == 0xffffffff ||
453 (chip_reset != ha->chip_reset)) {
454 /* This is special case if there is unload
455 * of driver happening and if PCI device go
456 * into bad state due to PCI error condition
457 * then only PCI ERR flag would be set.
458 * we will do premature exit for above case.
460 spin_lock_irqsave(&ha->hardware_lock, flags);
461 ha->flags.mbox_busy = 0;
462 spin_unlock_irqrestore(&ha->hardware_lock,
464 rval = QLA_FUNCTION_TIMEOUT;
468 /* Attempt to capture firmware dump for further
469 * anallysis of the current formware state. we do not
470 * need to do this if we are intentionally generating
473 if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR)
474 ha->isp_ops->fw_dump(vha, 0);
475 rval = QLA_FUNCTION_TIMEOUT;
478 spin_lock_irqsave(&ha->hardware_lock, flags);
479 ha->flags.mbox_busy = 0;
480 spin_unlock_irqrestore(&ha->hardware_lock, flags);
485 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
486 ql_dbg(ql_dbg_mbx, vha, 0x101a,
487 "Checking for additional resp interrupt.\n");
489 /* polling mode for non isp_abort commands. */
490 qla2x00_poll(ha->rsp_q_map[0]);
493 if (rval == QLA_FUNCTION_TIMEOUT &&
494 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
495 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
496 ha->flags.eeh_busy) {
497 /* not in dpc. schedule it for dpc to take over. */
498 ql_dbg(ql_dbg_mbx, vha, 0x101b,
499 "Timeout, schedule isp_abort_needed.\n");
501 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
502 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
503 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
504 if (IS_QLA82XX(ha)) {
505 ql_dbg(ql_dbg_mbx, vha, 0x112a,
506 "disabling pause transmit on port "
509 QLA82XX_CRB_NIU + 0x98,
510 CRB_NIU_XG_PAUSE_CTL_P0|
511 CRB_NIU_XG_PAUSE_CTL_P1);
513 ql_log(ql_log_info, base_vha, 0x101c,
514 "Mailbox cmd timeout occurred, cmd=0x%x, "
515 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
516 "abort.\n", command, mcp->mb[0],
518 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
519 qla2xxx_wake_dpc(vha);
521 } else if (current == ha->dpc_thread) {
522 /* call abort directly since we are in the DPC thread */
523 ql_dbg(ql_dbg_mbx, vha, 0x101d,
524 "Timeout, calling abort_isp.\n");
526 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
527 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
528 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
529 if (IS_QLA82XX(ha)) {
530 ql_dbg(ql_dbg_mbx, vha, 0x112b,
531 "disabling pause transmit on port "
534 QLA82XX_CRB_NIU + 0x98,
535 CRB_NIU_XG_PAUSE_CTL_P0|
536 CRB_NIU_XG_PAUSE_CTL_P1);
538 ql_log(ql_log_info, base_vha, 0x101e,
539 "Mailbox cmd timeout occurred, cmd=0x%x, "
540 "mb[0]=0x%x. Scheduling ISP abort ",
541 command, mcp->mb[0]);
542 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
543 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
544 /* Allow next mbx cmd to come in. */
545 complete(&ha->mbx_cmd_comp);
546 if (ha->isp_ops->abort_isp(vha)) {
547 /* Failed. retry later. */
548 set_bit(ISP_ABORT_NEEDED,
551 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
552 ql_dbg(ql_dbg_mbx, vha, 0x101f,
553 "Finished abort_isp.\n");
560 /* Allow next mbx cmd to come in. */
561 complete(&ha->mbx_cmd_comp);
564 if (rval == QLA_ABORTED) {
565 ql_log(ql_log_info, vha, 0xd035,
566 "Chip Reset in progress. Purging Mbox cmd=0x%x.\n",
569 if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) {
570 pr_warn("%s [%s]-%04x:%ld: **** Failed=%x", QL_MSGHDR,
571 dev_name(&ha->pdev->dev), 0x1020+0x800,
575 for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1)
576 if (mboxes & BIT_0) {
577 printk(" mb[%u]=%x", i, mcp->mb[i]);
580 pr_warn(" cmd=%x ****\n", command);
582 if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) {
583 ql_dbg(ql_dbg_mbx, vha, 0x1198,
584 "host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
585 RD_REG_DWORD(®->isp24.host_status),
586 RD_REG_DWORD(®->isp24.ictrl),
587 RD_REG_DWORD(®->isp24.istatus));
589 ql_dbg(ql_dbg_mbx, vha, 0x1206,
590 "ctrl_status=%#x ictrl=%#x istatus=%#x\n",
591 RD_REG_WORD(®->isp.ctrl_status),
592 RD_REG_WORD(®->isp.ictrl),
593 RD_REG_WORD(®->isp.istatus));
596 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
603 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
604 uint32_t risc_code_size)
607 struct qla_hw_data *ha = vha->hw;
609 mbx_cmd_t *mcp = &mc;
611 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
612 "Entered %s.\n", __func__);
614 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
615 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
616 mcp->mb[8] = MSW(risc_addr);
617 mcp->out_mb = MBX_8|MBX_0;
619 mcp->mb[0] = MBC_LOAD_RISC_RAM;
622 mcp->mb[1] = LSW(risc_addr);
623 mcp->mb[2] = MSW(req_dma);
624 mcp->mb[3] = LSW(req_dma);
625 mcp->mb[6] = MSW(MSD(req_dma));
626 mcp->mb[7] = LSW(MSD(req_dma));
627 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
628 if (IS_FWI2_CAPABLE(ha)) {
629 mcp->mb[4] = MSW(risc_code_size);
630 mcp->mb[5] = LSW(risc_code_size);
631 mcp->out_mb |= MBX_5|MBX_4;
633 mcp->mb[4] = LSW(risc_code_size);
634 mcp->out_mb |= MBX_4;
637 mcp->in_mb = MBX_1|MBX_0;
638 mcp->tov = MBX_TOV_SECONDS;
640 rval = qla2x00_mailbox_command(vha, mcp);
642 if (rval != QLA_SUCCESS) {
643 ql_dbg(ql_dbg_mbx, vha, 0x1023,
644 "Failed=%x mb[0]=%x mb[1]=%x.\n",
645 rval, mcp->mb[0], mcp->mb[1]);
647 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
648 "Done %s.\n", __func__);
654 #define EXTENDED_BB_CREDITS BIT_0
655 #define NVME_ENABLE_FLAG BIT_3
656 static inline uint16_t qla25xx_set_sfp_lr_dist(struct qla_hw_data *ha)
658 uint16_t mb4 = BIT_0;
660 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
661 mb4 |= ha->long_range_distance << LR_DIST_FW_POS;
666 static inline uint16_t qla25xx_set_nvr_lr_dist(struct qla_hw_data *ha)
668 uint16_t mb4 = BIT_0;
670 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
671 struct nvram_81xx *nv = ha->nvram;
673 mb4 |= LR_DIST_FW_FIELD(nv->enhanced_features);
681 * Start adapter firmware.
684 * ha = adapter block pointer.
685 * TARGET_QUEUE_LOCK must be released.
686 * ADAPTER_STATE_LOCK must be released.
689 * qla2x00 local function return status code.
695 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
698 struct qla_hw_data *ha = vha->hw;
700 mbx_cmd_t *mcp = &mc;
702 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
703 "Entered %s.\n", __func__);
705 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
708 if (IS_FWI2_CAPABLE(ha)) {
709 mcp->mb[1] = MSW(risc_addr);
710 mcp->mb[2] = LSW(risc_addr);
714 ha->flags.using_lr_setting = 0;
715 if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
716 IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
717 if (ql2xautodetectsfp) {
718 if (ha->flags.detected_lr_sfp) {
720 qla25xx_set_sfp_lr_dist(ha);
721 ha->flags.using_lr_setting = 1;
724 struct nvram_81xx *nv = ha->nvram;
725 /* set LR distance if specified in nvram */
726 if (nv->enhanced_features &
727 NEF_LR_DIST_ENABLE) {
729 qla25xx_set_nvr_lr_dist(ha);
730 ha->flags.using_lr_setting = 1;
735 if (ql2xnvmeenable && (IS_QLA27XX(ha) || IS_QLA28XX(ha)))
736 mcp->mb[4] |= NVME_ENABLE_FLAG;
738 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
739 struct nvram_81xx *nv = ha->nvram;
740 /* set minimum speed if specified in nvram */
741 if (nv->min_supported_speed >= 2 &&
742 nv->min_supported_speed <= 5) {
744 mcp->mb[11] |= nv->min_supported_speed & 0xF;
745 mcp->out_mb |= MBX_11;
747 vha->min_supported_speed =
748 nv->min_supported_speed;
752 if (ha->flags.exlogins_enabled)
753 mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
755 if (ha->flags.exchoffld_enabled)
756 mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
758 mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11;
759 mcp->in_mb |= MBX_3 | MBX_2 | MBX_1;
761 mcp->mb[1] = LSW(risc_addr);
762 mcp->out_mb |= MBX_1;
763 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
765 mcp->out_mb |= MBX_2;
769 mcp->tov = MBX_TOV_SECONDS;
771 rval = qla2x00_mailbox_command(vha, mcp);
773 if (rval != QLA_SUCCESS) {
774 ql_dbg(ql_dbg_mbx, vha, 0x1026,
775 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
779 if (!IS_FWI2_CAPABLE(ha))
782 ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2];
783 ql_dbg(ql_dbg_mbx, vha, 0x119a,
784 "fw_ability_mask=%x.\n", ha->fw_ability_mask);
785 ql_dbg(ql_dbg_mbx, vha, 0x1027, "exchanges=%x.\n", mcp->mb[1]);
786 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
787 ha->max_supported_speed = mcp->mb[2] & (BIT_0|BIT_1);
788 ql_dbg(ql_dbg_mbx, vha, 0x119b, "max_supported_speed=%s.\n",
789 ha->max_supported_speed == 0 ? "16Gps" :
790 ha->max_supported_speed == 1 ? "32Gps" :
791 ha->max_supported_speed == 2 ? "64Gps" : "unknown");
792 if (vha->min_supported_speed) {
793 ha->min_supported_speed = mcp->mb[5] &
794 (BIT_0 | BIT_1 | BIT_2);
795 ql_dbg(ql_dbg_mbx, vha, 0x119c,
796 "min_supported_speed=%s.\n",
797 ha->min_supported_speed == 6 ? "64Gps" :
798 ha->min_supported_speed == 5 ? "32Gps" :
799 ha->min_supported_speed == 4 ? "16Gps" :
800 ha->min_supported_speed == 3 ? "8Gps" :
801 ha->min_supported_speed == 2 ? "4Gps" : "unknown");
806 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
807 "Done %s.\n", __func__);
813 * qla_get_exlogin_status
814 * Get extended login status
815 * uses the memory offload control/status Mailbox
818 * ha: adapter state pointer.
819 * fwopt: firmware options
822 * qla2x00 local function status
827 #define FETCH_XLOGINS_STAT 0x8
829 qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
830 uint16_t *ex_logins_cnt)
834 mbx_cmd_t *mcp = &mc;
836 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f,
837 "Entered %s\n", __func__);
839 memset(mcp->mb, 0 , sizeof(mcp->mb));
840 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
841 mcp->mb[1] = FETCH_XLOGINS_STAT;
842 mcp->out_mb = MBX_1|MBX_0;
843 mcp->in_mb = MBX_10|MBX_4|MBX_0;
844 mcp->tov = MBX_TOV_SECONDS;
847 rval = qla2x00_mailbox_command(vha, mcp);
848 if (rval != QLA_SUCCESS) {
849 ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval);
851 *buf_sz = mcp->mb[4];
852 *ex_logins_cnt = mcp->mb[10];
854 ql_log(ql_log_info, vha, 0x1190,
855 "buffer size 0x%x, exchange login count=%d\n",
856 mcp->mb[4], mcp->mb[10]);
858 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116,
859 "Done %s.\n", __func__);
866 * qla_set_exlogin_mem_cfg
867 * set extended login memory configuration
868 * Mbx needs to be issues before init_cb is set
871 * ha: adapter state pointer.
872 * buffer: buffer pointer
873 * phys_addr: physical address of buffer
874 * size: size of buffer
875 * TARGET_QUEUE_LOCK must be released
876 * ADAPTER_STATE_LOCK must be release
879 * qla2x00 local funxtion status code.
884 #define CONFIG_XLOGINS_MEM 0x3
886 qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
890 mbx_cmd_t *mcp = &mc;
891 struct qla_hw_data *ha = vha->hw;
893 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
894 "Entered %s.\n", __func__);
896 memset(mcp->mb, 0 , sizeof(mcp->mb));
897 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
898 mcp->mb[1] = CONFIG_XLOGINS_MEM;
899 mcp->mb[2] = MSW(phys_addr);
900 mcp->mb[3] = LSW(phys_addr);
901 mcp->mb[6] = MSW(MSD(phys_addr));
902 mcp->mb[7] = LSW(MSD(phys_addr));
903 mcp->mb[8] = MSW(ha->exlogin_size);
904 mcp->mb[9] = LSW(ha->exlogin_size);
905 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
906 mcp->in_mb = MBX_11|MBX_0;
907 mcp->tov = MBX_TOV_SECONDS;
909 rval = qla2x00_mailbox_command(vha, mcp);
910 if (rval != QLA_SUCCESS) {
912 ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval);
914 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
915 "Done %s.\n", __func__);
922 * qla_get_exchoffld_status
923 * Get exchange offload status
924 * uses the memory offload control/status Mailbox
927 * ha: adapter state pointer.
928 * fwopt: firmware options
931 * qla2x00 local function status
936 #define FETCH_XCHOFFLD_STAT 0x2
938 qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
939 uint16_t *ex_logins_cnt)
943 mbx_cmd_t *mcp = &mc;
945 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019,
946 "Entered %s\n", __func__);
948 memset(mcp->mb, 0 , sizeof(mcp->mb));
949 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
950 mcp->mb[1] = FETCH_XCHOFFLD_STAT;
951 mcp->out_mb = MBX_1|MBX_0;
952 mcp->in_mb = MBX_10|MBX_4|MBX_0;
953 mcp->tov = MBX_TOV_SECONDS;
956 rval = qla2x00_mailbox_command(vha, mcp);
957 if (rval != QLA_SUCCESS) {
958 ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval);
960 *buf_sz = mcp->mb[4];
961 *ex_logins_cnt = mcp->mb[10];
963 ql_log(ql_log_info, vha, 0x118e,
964 "buffer size 0x%x, exchange offload count=%d\n",
965 mcp->mb[4], mcp->mb[10]);
967 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156,
968 "Done %s.\n", __func__);
975 * qla_set_exchoffld_mem_cfg
976 * Set exchange offload memory configuration
977 * Mbx needs to be issues before init_cb is set
980 * ha: adapter state pointer.
981 * buffer: buffer pointer
982 * phys_addr: physical address of buffer
983 * size: size of buffer
984 * TARGET_QUEUE_LOCK must be released
985 * ADAPTER_STATE_LOCK must be release
988 * qla2x00 local funxtion status code.
993 #define CONFIG_XCHOFFLD_MEM 0x3
995 qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha)
999 mbx_cmd_t *mcp = &mc;
1000 struct qla_hw_data *ha = vha->hw;
1002 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157,
1003 "Entered %s.\n", __func__);
1005 memset(mcp->mb, 0 , sizeof(mcp->mb));
1006 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
1007 mcp->mb[1] = CONFIG_XCHOFFLD_MEM;
1008 mcp->mb[2] = MSW(ha->exchoffld_buf_dma);
1009 mcp->mb[3] = LSW(ha->exchoffld_buf_dma);
1010 mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma));
1011 mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma));
1012 mcp->mb[8] = MSW(ha->exchoffld_size);
1013 mcp->mb[9] = LSW(ha->exchoffld_size);
1014 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1015 mcp->in_mb = MBX_11|MBX_0;
1016 mcp->tov = MBX_TOV_SECONDS;
1018 rval = qla2x00_mailbox_command(vha, mcp);
1019 if (rval != QLA_SUCCESS) {
1021 ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval);
1023 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192,
1024 "Done %s.\n", __func__);
1031 * qla2x00_get_fw_version
1032 * Get firmware version.
1035 * ha: adapter state pointer.
1036 * major: pointer for major number.
1037 * minor: pointer for minor number.
1038 * subminor: pointer for subminor number.
1041 * qla2x00 local function return status code.
1047 qla2x00_get_fw_version(scsi_qla_host_t *vha)
1051 mbx_cmd_t *mcp = &mc;
1052 struct qla_hw_data *ha = vha->hw;
1054 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
1055 "Entered %s.\n", __func__);
1057 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
1058 mcp->out_mb = MBX_0;
1059 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1060 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha))
1061 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
1062 if (IS_FWI2_CAPABLE(ha))
1063 mcp->in_mb |= MBX_17|MBX_16|MBX_15;
1064 if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
1066 MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18|
1067 MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7;
1070 mcp->tov = MBX_TOV_SECONDS;
1071 rval = qla2x00_mailbox_command(vha, mcp);
1072 if (rval != QLA_SUCCESS)
1075 /* Return mailbox data. */
1076 ha->fw_major_version = mcp->mb[1];
1077 ha->fw_minor_version = mcp->mb[2];
1078 ha->fw_subminor_version = mcp->mb[3];
1079 ha->fw_attributes = mcp->mb[6];
1080 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
1081 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */
1083 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
1085 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1086 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1087 ha->mpi_version[1] = mcp->mb[11] >> 8;
1088 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1089 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13];
1090 ha->phy_version[0] = mcp->mb[8] & 0xff;
1091 ha->phy_version[1] = mcp->mb[9] >> 8;
1092 ha->phy_version[2] = mcp->mb[9] & 0xff;
1095 if (IS_FWI2_CAPABLE(ha)) {
1096 ha->fw_attributes_h = mcp->mb[15];
1097 ha->fw_attributes_ext[0] = mcp->mb[16];
1098 ha->fw_attributes_ext[1] = mcp->mb[17];
1099 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
1100 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
1101 __func__, mcp->mb[15], mcp->mb[6]);
1102 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
1103 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
1104 __func__, mcp->mb[17], mcp->mb[16]);
1106 if (ha->fw_attributes_h & 0x4)
1107 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d,
1108 "%s: Firmware supports Extended Login 0x%x\n",
1109 __func__, ha->fw_attributes_h);
1111 if (ha->fw_attributes_h & 0x8)
1112 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191,
1113 "%s: Firmware supports Exchange Offload 0x%x\n",
1114 __func__, ha->fw_attributes_h);
1117 * FW supports nvme and driver load parameter requested nvme.
1118 * BIT 26 of fw_attributes indicates NVMe support.
1120 if ((ha->fw_attributes_h &
1121 (FW_ATTR_H_NVME | FW_ATTR_H_NVME_UPDATED)) &&
1123 if (ha->fw_attributes_h & FW_ATTR_H_NVME_FBURST)
1124 vha->flags.nvme_first_burst = 1;
1126 vha->flags.nvme_enabled = 1;
1127 ql_log(ql_log_info, vha, 0xd302,
1128 "%s: FC-NVMe is Enabled (0x%x)\n",
1129 __func__, ha->fw_attributes_h);
1133 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1134 ha->serdes_version[0] = mcp->mb[7] & 0xff;
1135 ha->serdes_version[1] = mcp->mb[8] >> 8;
1136 ha->serdes_version[2] = mcp->mb[8] & 0xff;
1137 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1138 ha->mpi_version[1] = mcp->mb[11] >> 8;
1139 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1140 ha->pep_version[0] = mcp->mb[13] & 0xff;
1141 ha->pep_version[1] = mcp->mb[14] >> 8;
1142 ha->pep_version[2] = mcp->mb[14] & 0xff;
1143 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
1144 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
1145 ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22];
1146 ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24];
1147 if (IS_QLA28XX(ha)) {
1148 if (mcp->mb[16] & BIT_10) {
1149 ql_log(ql_log_info, vha, 0xffff,
1150 "FW support secure flash updates\n");
1151 ha->flags.secure_fw = 1;
1157 if (rval != QLA_SUCCESS) {
1159 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
1162 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
1163 "Done %s.\n", __func__);
1169 * qla2x00_get_fw_options
1170 * Set firmware options.
1173 * ha = adapter block pointer.
1174 * fwopt = pointer for firmware options.
1177 * qla2x00 local function return status code.
1183 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1187 mbx_cmd_t *mcp = &mc;
1189 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
1190 "Entered %s.\n", __func__);
1192 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
1193 mcp->out_mb = MBX_0;
1194 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1195 mcp->tov = MBX_TOV_SECONDS;
1197 rval = qla2x00_mailbox_command(vha, mcp);
1199 if (rval != QLA_SUCCESS) {
1201 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
1203 fwopts[0] = mcp->mb[0];
1204 fwopts[1] = mcp->mb[1];
1205 fwopts[2] = mcp->mb[2];
1206 fwopts[3] = mcp->mb[3];
1208 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
1209 "Done %s.\n", __func__);
1217 * qla2x00_set_fw_options
1218 * Set firmware options.
1221 * ha = adapter block pointer.
1222 * fwopt = pointer for firmware options.
1225 * qla2x00 local function return status code.
1231 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1235 mbx_cmd_t *mcp = &mc;
1237 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
1238 "Entered %s.\n", __func__);
1240 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
1241 mcp->mb[1] = fwopts[1];
1242 mcp->mb[2] = fwopts[2];
1243 mcp->mb[3] = fwopts[3];
1244 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1246 if (IS_FWI2_CAPABLE(vha->hw)) {
1247 mcp->in_mb |= MBX_1;
1248 mcp->mb[10] = fwopts[10];
1249 mcp->out_mb |= MBX_10;
1251 mcp->mb[10] = fwopts[10];
1252 mcp->mb[11] = fwopts[11];
1253 mcp->mb[12] = 0; /* Undocumented, but used */
1254 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
1256 mcp->tov = MBX_TOV_SECONDS;
1258 rval = qla2x00_mailbox_command(vha, mcp);
1260 fwopts[0] = mcp->mb[0];
1262 if (rval != QLA_SUCCESS) {
1264 ql_dbg(ql_dbg_mbx, vha, 0x1030,
1265 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
1268 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
1269 "Done %s.\n", __func__);
1276 * qla2x00_mbx_reg_test
1277 * Mailbox register wrap test.
1280 * ha = adapter block pointer.
1281 * TARGET_QUEUE_LOCK must be released.
1282 * ADAPTER_STATE_LOCK must be released.
1285 * qla2x00 local function return status code.
1291 qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
1295 mbx_cmd_t *mcp = &mc;
1297 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
1298 "Entered %s.\n", __func__);
1300 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
1301 mcp->mb[1] = 0xAAAA;
1302 mcp->mb[2] = 0x5555;
1303 mcp->mb[3] = 0xAA55;
1304 mcp->mb[4] = 0x55AA;
1305 mcp->mb[5] = 0xA5A5;
1306 mcp->mb[6] = 0x5A5A;
1307 mcp->mb[7] = 0x2525;
1308 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1309 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1310 mcp->tov = MBX_TOV_SECONDS;
1312 rval = qla2x00_mailbox_command(vha, mcp);
1314 if (rval == QLA_SUCCESS) {
1315 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
1316 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA)
1317 rval = QLA_FUNCTION_FAILED;
1318 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
1319 mcp->mb[7] != 0x2525)
1320 rval = QLA_FUNCTION_FAILED;
1323 if (rval != QLA_SUCCESS) {
1325 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
1328 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
1329 "Done %s.\n", __func__);
1336 * qla2x00_verify_checksum
1337 * Verify firmware checksum.
1340 * ha = adapter block pointer.
1341 * TARGET_QUEUE_LOCK must be released.
1342 * ADAPTER_STATE_LOCK must be released.
1345 * qla2x00 local function return status code.
1351 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
1355 mbx_cmd_t *mcp = &mc;
1357 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
1358 "Entered %s.\n", __func__);
1360 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
1361 mcp->out_mb = MBX_0;
1363 if (IS_FWI2_CAPABLE(vha->hw)) {
1364 mcp->mb[1] = MSW(risc_addr);
1365 mcp->mb[2] = LSW(risc_addr);
1366 mcp->out_mb |= MBX_2|MBX_1;
1367 mcp->in_mb |= MBX_2|MBX_1;
1369 mcp->mb[1] = LSW(risc_addr);
1370 mcp->out_mb |= MBX_1;
1371 mcp->in_mb |= MBX_1;
1374 mcp->tov = MBX_TOV_SECONDS;
1376 rval = qla2x00_mailbox_command(vha, mcp);
1378 if (rval != QLA_SUCCESS) {
1379 ql_dbg(ql_dbg_mbx, vha, 0x1036,
1380 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
1381 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
1383 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
1384 "Done %s.\n", __func__);
1391 * qla2x00_issue_iocb
1392 * Issue IOCB using mailbox command
1395 * ha = adapter state pointer.
1396 * buffer = buffer pointer.
1397 * phys_addr = physical address of buffer.
1398 * size = size of buffer.
1399 * TARGET_QUEUE_LOCK must be released.
1400 * ADAPTER_STATE_LOCK must be released.
1403 * qla2x00 local function return status code.
1409 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
1410 dma_addr_t phys_addr, size_t size, uint32_t tov)
1414 mbx_cmd_t *mcp = &mc;
1416 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
1417 "Entered %s.\n", __func__);
1419 mcp->mb[0] = MBC_IOCB_COMMAND_A64;
1421 mcp->mb[2] = MSW(phys_addr);
1422 mcp->mb[3] = LSW(phys_addr);
1423 mcp->mb[6] = MSW(MSD(phys_addr));
1424 mcp->mb[7] = LSW(MSD(phys_addr));
1425 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1426 mcp->in_mb = MBX_2|MBX_0;
1429 rval = qla2x00_mailbox_command(vha, mcp);
1431 if (rval != QLA_SUCCESS) {
1433 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
1435 sts_entry_t *sts_entry = (sts_entry_t *) buffer;
1437 /* Mask reserved bits. */
1438 sts_entry->entry_status &=
1439 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
1440 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
1441 "Done %s.\n", __func__);
1448 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
1451 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
1456 * qla2x00_abort_command
1457 * Abort command aborts a specified IOCB.
1460 * ha = adapter block pointer.
1461 * sp = SB structure pointer.
1464 * qla2x00 local function return status code.
1470 qla2x00_abort_command(srb_t *sp)
1472 unsigned long flags = 0;
1474 uint32_t handle = 0;
1476 mbx_cmd_t *mcp = &mc;
1477 fc_port_t *fcport = sp->fcport;
1478 scsi_qla_host_t *vha = fcport->vha;
1479 struct qla_hw_data *ha = vha->hw;
1480 struct req_que *req;
1481 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1483 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
1484 "Entered %s.\n", __func__);
1486 if (vha->flags.qpairs_available && sp->qpair)
1487 req = sp->qpair->req;
1491 spin_lock_irqsave(&ha->hardware_lock, flags);
1492 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1493 if (req->outstanding_cmds[handle] == sp)
1496 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1498 if (handle == req->num_outstanding_cmds) {
1499 /* command not found */
1500 return QLA_FUNCTION_FAILED;
1503 mcp->mb[0] = MBC_ABORT_COMMAND;
1504 if (HAS_EXTENDED_IDS(ha))
1505 mcp->mb[1] = fcport->loop_id;
1507 mcp->mb[1] = fcport->loop_id << 8;
1508 mcp->mb[2] = (uint16_t)handle;
1509 mcp->mb[3] = (uint16_t)(handle >> 16);
1510 mcp->mb[6] = (uint16_t)cmd->device->lun;
1511 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1513 mcp->tov = MBX_TOV_SECONDS;
1515 rval = qla2x00_mailbox_command(vha, mcp);
1517 if (rval != QLA_SUCCESS) {
1518 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
1520 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
1521 "Done %s.\n", __func__);
1528 qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
1532 mbx_cmd_t *mcp = &mc;
1533 scsi_qla_host_t *vha;
1537 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
1538 "Entered %s.\n", __func__);
1540 mcp->mb[0] = MBC_ABORT_TARGET;
1541 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
1542 if (HAS_EXTENDED_IDS(vha->hw)) {
1543 mcp->mb[1] = fcport->loop_id;
1545 mcp->out_mb |= MBX_10;
1547 mcp->mb[1] = fcport->loop_id << 8;
1549 mcp->mb[2] = vha->hw->loop_reset_delay;
1550 mcp->mb[9] = vha->vp_idx;
1553 mcp->tov = MBX_TOV_SECONDS;
1555 rval = qla2x00_mailbox_command(vha, mcp);
1556 if (rval != QLA_SUCCESS) {
1557 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
1558 "Failed=%x.\n", rval);
1561 /* Issue marker IOCB. */
1562 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, 0,
1564 if (rval2 != QLA_SUCCESS) {
1565 ql_dbg(ql_dbg_mbx, vha, 0x1040,
1566 "Failed to issue marker IOCB (%x).\n", rval2);
1568 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
1569 "Done %s.\n", __func__);
1576 qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
1580 mbx_cmd_t *mcp = &mc;
1581 scsi_qla_host_t *vha;
1585 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
1586 "Entered %s.\n", __func__);
1588 mcp->mb[0] = MBC_LUN_RESET;
1589 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
1590 if (HAS_EXTENDED_IDS(vha->hw))
1591 mcp->mb[1] = fcport->loop_id;
1593 mcp->mb[1] = fcport->loop_id << 8;
1594 mcp->mb[2] = (u32)l;
1596 mcp->mb[9] = vha->vp_idx;
1599 mcp->tov = MBX_TOV_SECONDS;
1601 rval = qla2x00_mailbox_command(vha, mcp);
1602 if (rval != QLA_SUCCESS) {
1603 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
1606 /* Issue marker IOCB. */
1607 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, l,
1609 if (rval2 != QLA_SUCCESS) {
1610 ql_dbg(ql_dbg_mbx, vha, 0x1044,
1611 "Failed to issue marker IOCB (%x).\n", rval2);
1613 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
1614 "Done %s.\n", __func__);
1621 * qla2x00_get_adapter_id
1622 * Get adapter ID and topology.
1625 * ha = adapter block pointer.
1626 * id = pointer for loop ID.
1627 * al_pa = pointer for AL_PA.
1628 * area = pointer for area.
1629 * domain = pointer for domain.
1630 * top = pointer for topology.
1631 * TARGET_QUEUE_LOCK must be released.
1632 * ADAPTER_STATE_LOCK must be released.
1635 * qla2x00 local function return status code.
1641 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1642 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
1646 mbx_cmd_t *mcp = &mc;
1648 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
1649 "Entered %s.\n", __func__);
1651 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1652 mcp->mb[9] = vha->vp_idx;
1653 mcp->out_mb = MBX_9|MBX_0;
1654 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1655 if (IS_CNA_CAPABLE(vha->hw))
1656 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
1657 if (IS_FWI2_CAPABLE(vha->hw))
1658 mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
1659 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw))
1660 mcp->in_mb |= MBX_15;
1661 mcp->tov = MBX_TOV_SECONDS;
1663 rval = qla2x00_mailbox_command(vha, mcp);
1664 if (mcp->mb[0] == MBS_COMMAND_ERROR)
1665 rval = QLA_COMMAND_ERROR;
1666 else if (mcp->mb[0] == MBS_INVALID_COMMAND)
1667 rval = QLA_INVALID_COMMAND;
1671 *al_pa = LSB(mcp->mb[2]);
1672 *area = MSB(mcp->mb[2]);
1673 *domain = LSB(mcp->mb[3]);
1675 *sw_cap = mcp->mb[7];
1677 if (rval != QLA_SUCCESS) {
1679 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
1681 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
1682 "Done %s.\n", __func__);
1684 if (IS_CNA_CAPABLE(vha->hw)) {
1685 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
1686 vha->fcoe_fcf_idx = mcp->mb[10];
1687 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
1688 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
1689 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
1690 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
1691 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
1692 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
1694 /* If FA-WWN supported */
1695 if (IS_FAWWN_CAPABLE(vha->hw)) {
1696 if (mcp->mb[7] & BIT_14) {
1697 vha->port_name[0] = MSB(mcp->mb[16]);
1698 vha->port_name[1] = LSB(mcp->mb[16]);
1699 vha->port_name[2] = MSB(mcp->mb[17]);
1700 vha->port_name[3] = LSB(mcp->mb[17]);
1701 vha->port_name[4] = MSB(mcp->mb[18]);
1702 vha->port_name[5] = LSB(mcp->mb[18]);
1703 vha->port_name[6] = MSB(mcp->mb[19]);
1704 vha->port_name[7] = LSB(mcp->mb[19]);
1705 fc_host_port_name(vha->host) =
1706 wwn_to_u64(vha->port_name);
1707 ql_dbg(ql_dbg_mbx, vha, 0x10ca,
1708 "FA-WWN acquired %016llx\n",
1709 wwn_to_u64(vha->port_name));
1713 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw))
1714 vha->bbcr = mcp->mb[15];
1721 * qla2x00_get_retry_cnt
1722 * Get current firmware login retry count and delay.
1725 * ha = adapter block pointer.
1726 * retry_cnt = pointer to login retry count.
1727 * tov = pointer to login timeout value.
1730 * qla2x00 local function return status code.
1736 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1742 mbx_cmd_t *mcp = &mc;
1744 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
1745 "Entered %s.\n", __func__);
1747 mcp->mb[0] = MBC_GET_RETRY_COUNT;
1748 mcp->out_mb = MBX_0;
1749 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1750 mcp->tov = MBX_TOV_SECONDS;
1752 rval = qla2x00_mailbox_command(vha, mcp);
1754 if (rval != QLA_SUCCESS) {
1756 ql_dbg(ql_dbg_mbx, vha, 0x104a,
1757 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1759 /* Convert returned data and check our values. */
1760 *r_a_tov = mcp->mb[3] / 2;
1761 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */
1762 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) {
1763 /* Update to the larger values */
1764 *retry_cnt = (uint8_t)mcp->mb[1];
1768 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
1769 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1776 * qla2x00_init_firmware
1777 * Initialize adapter firmware.
1780 * ha = adapter block pointer.
1781 * dptr = Initialization control block pointer.
1782 * size = size of initialization control block.
1783 * TARGET_QUEUE_LOCK must be released.
1784 * ADAPTER_STATE_LOCK must be released.
1787 * qla2x00 local function return status code.
1793 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1797 mbx_cmd_t *mcp = &mc;
1798 struct qla_hw_data *ha = vha->hw;
1800 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
1801 "Entered %s.\n", __func__);
1803 if (IS_P3P_TYPE(ha) && ql2xdbwr)
1804 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr,
1805 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
1807 if (ha->flags.npiv_supported)
1808 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
1810 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
1813 mcp->mb[2] = MSW(ha->init_cb_dma);
1814 mcp->mb[3] = LSW(ha->init_cb_dma);
1815 mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
1816 mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
1817 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1818 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1820 mcp->mb[10] = MSW(ha->ex_init_cb_dma);
1821 mcp->mb[11] = LSW(ha->ex_init_cb_dma);
1822 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma));
1823 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma));
1824 mcp->mb[14] = sizeof(*ha->ex_init_cb);
1825 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
1827 /* 1 and 2 should normally be captured. */
1828 mcp->in_mb = MBX_2|MBX_1|MBX_0;
1829 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
1830 /* mb3 is additional info about the installed SFP. */
1831 mcp->in_mb |= MBX_3;
1832 mcp->buf_size = size;
1833 mcp->flags = MBX_DMA_OUT;
1834 mcp->tov = MBX_TOV_SECONDS;
1835 rval = qla2x00_mailbox_command(vha, mcp);
1837 if (rval != QLA_SUCCESS) {
1839 ql_dbg(ql_dbg_mbx, vha, 0x104d,
1840 "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x.\n",
1841 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
1843 ql_dbg(ql_dbg_mbx, vha, 0x104d, "init_cb:\n");
1844 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha,
1845 0x0104d, ha->init_cb, sizeof(*ha->init_cb));
1847 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1848 ql_dbg(ql_dbg_mbx, vha, 0x104d, "ex_init_cb:\n");
1849 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha,
1850 0x0104d, ha->ex_init_cb, sizeof(*ha->ex_init_cb));
1853 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1854 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
1855 ql_dbg(ql_dbg_mbx, vha, 0x119d,
1856 "Invalid SFP/Validation Failed\n");
1858 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
1859 "Done %s.\n", __func__);
1867 * qla2x00_get_port_database
1868 * Issue normal/enhanced get port database mailbox command
1869 * and copy device name as necessary.
1872 * ha = adapter state pointer.
1873 * dev = structure pointer.
1874 * opt = enhanced cmd option byte.
1877 * qla2x00 local function return status code.
1883 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1887 mbx_cmd_t *mcp = &mc;
1888 port_database_t *pd;
1889 struct port_database_24xx *pd24;
1891 struct qla_hw_data *ha = vha->hw;
1893 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
1894 "Entered %s.\n", __func__);
1897 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1899 ql_log(ql_log_warn, vha, 0x1050,
1900 "Failed to allocate port database structure.\n");
1902 return QLA_MEMORY_ALLOC_FAILED;
1905 mcp->mb[0] = MBC_GET_PORT_DATABASE;
1906 if (opt != 0 && !IS_FWI2_CAPABLE(ha))
1907 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
1908 mcp->mb[2] = MSW(pd_dma);
1909 mcp->mb[3] = LSW(pd_dma);
1910 mcp->mb[6] = MSW(MSD(pd_dma));
1911 mcp->mb[7] = LSW(MSD(pd_dma));
1912 mcp->mb[9] = vha->vp_idx;
1913 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
1915 if (IS_FWI2_CAPABLE(ha)) {
1916 mcp->mb[1] = fcport->loop_id;
1918 mcp->out_mb |= MBX_10|MBX_1;
1919 mcp->in_mb |= MBX_1;
1920 } else if (HAS_EXTENDED_IDS(ha)) {
1921 mcp->mb[1] = fcport->loop_id;
1923 mcp->out_mb |= MBX_10|MBX_1;
1925 mcp->mb[1] = fcport->loop_id << 8 | opt;
1926 mcp->out_mb |= MBX_1;
1928 mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
1929 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
1930 mcp->flags = MBX_DMA_IN;
1931 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1932 rval = qla2x00_mailbox_command(vha, mcp);
1933 if (rval != QLA_SUCCESS)
1936 if (IS_FWI2_CAPABLE(ha)) {
1938 u8 current_login_state, last_login_state;
1940 pd24 = (struct port_database_24xx *) pd;
1942 /* Check for logged in state. */
1943 if (fcport->fc4f_nvme) {
1944 current_login_state = pd24->current_login_state >> 4;
1945 last_login_state = pd24->last_login_state >> 4;
1947 current_login_state = pd24->current_login_state & 0xf;
1948 last_login_state = pd24->last_login_state & 0xf;
1950 fcport->current_login_state = pd24->current_login_state;
1951 fcport->last_login_state = pd24->last_login_state;
1953 /* Check for logged in state. */
1954 if (current_login_state != PDS_PRLI_COMPLETE &&
1955 last_login_state != PDS_PRLI_COMPLETE) {
1956 ql_dbg(ql_dbg_mbx, vha, 0x119a,
1957 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
1958 current_login_state, last_login_state,
1960 rval = QLA_FUNCTION_FAILED;
1966 if (fcport->loop_id == FC_NO_LOOP_ID ||
1967 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
1968 memcmp(fcport->port_name, pd24->port_name, 8))) {
1969 /* We lost the device mid way. */
1970 rval = QLA_NOT_LOGGED_IN;
1974 /* Names are little-endian. */
1975 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
1976 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
1978 /* Get port_id of device. */
1979 fcport->d_id.b.domain = pd24->port_id[0];
1980 fcport->d_id.b.area = pd24->port_id[1];
1981 fcport->d_id.b.al_pa = pd24->port_id[2];
1982 fcport->d_id.b.rsvd_1 = 0;
1984 /* If not target must be initiator or unknown type. */
1985 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0)
1986 fcport->port_type = FCT_INITIATOR;
1988 fcport->port_type = FCT_TARGET;
1990 /* Passback COS information. */
1991 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
1992 FC_COS_CLASS2 : FC_COS_CLASS3;
1994 if (pd24->prli_svc_param_word_3[0] & BIT_7)
1995 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1999 /* Check for logged in state. */
2000 if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
2001 pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
2002 ql_dbg(ql_dbg_mbx, vha, 0x100a,
2003 "Unable to verify login-state (%x/%x) - "
2004 "portid=%02x%02x%02x.\n", pd->master_state,
2005 pd->slave_state, fcport->d_id.b.domain,
2006 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2007 rval = QLA_FUNCTION_FAILED;
2011 if (fcport->loop_id == FC_NO_LOOP_ID ||
2012 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
2013 memcmp(fcport->port_name, pd->port_name, 8))) {
2014 /* We lost the device mid way. */
2015 rval = QLA_NOT_LOGGED_IN;
2019 /* Names are little-endian. */
2020 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
2021 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
2023 /* Get port_id of device. */
2024 fcport->d_id.b.domain = pd->port_id[0];
2025 fcport->d_id.b.area = pd->port_id[3];
2026 fcport->d_id.b.al_pa = pd->port_id[2];
2027 fcport->d_id.b.rsvd_1 = 0;
2029 /* If not target must be initiator or unknown type. */
2030 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
2031 fcport->port_type = FCT_INITIATOR;
2033 fcport->port_type = FCT_TARGET;
2035 /* Passback COS information. */
2036 fcport->supported_classes = (pd->options & BIT_4) ?
2037 FC_COS_CLASS2 : FC_COS_CLASS3;
2041 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
2044 if (rval != QLA_SUCCESS) {
2045 ql_dbg(ql_dbg_mbx, vha, 0x1052,
2046 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
2047 mcp->mb[0], mcp->mb[1]);
2049 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
2050 "Done %s.\n", __func__);
2057 * qla2x00_get_firmware_state
2058 * Get adapter firmware state.
2061 * ha = adapter block pointer.
2062 * dptr = pointer for firmware state.
2063 * TARGET_QUEUE_LOCK must be released.
2064 * ADAPTER_STATE_LOCK must be released.
2067 * qla2x00 local function return status code.
2073 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
2077 mbx_cmd_t *mcp = &mc;
2078 struct qla_hw_data *ha = vha->hw;
2080 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
2081 "Entered %s.\n", __func__);
2083 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
2084 mcp->out_mb = MBX_0;
2085 if (IS_FWI2_CAPABLE(vha->hw))
2086 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2088 mcp->in_mb = MBX_1|MBX_0;
2089 mcp->tov = MBX_TOV_SECONDS;
2091 rval = qla2x00_mailbox_command(vha, mcp);
2093 /* Return firmware states. */
2094 states[0] = mcp->mb[1];
2095 if (IS_FWI2_CAPABLE(vha->hw)) {
2096 states[1] = mcp->mb[2];
2097 states[2] = mcp->mb[3]; /* SFP info */
2098 states[3] = mcp->mb[4];
2099 states[4] = mcp->mb[5];
2100 states[5] = mcp->mb[6]; /* DPORT status */
2103 if (rval != QLA_SUCCESS) {
2105 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
2107 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
2108 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
2109 ql_dbg(ql_dbg_mbx, vha, 0x119e,
2110 "Invalid SFP/Validation Failed\n");
2112 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
2113 "Done %s.\n", __func__);
2120 * qla2x00_get_port_name
2121 * Issue get port name mailbox command.
2122 * Returned name is in big endian format.
2125 * ha = adapter block pointer.
2126 * loop_id = loop ID of device.
2127 * name = pointer for name.
2128 * TARGET_QUEUE_LOCK must be released.
2129 * ADAPTER_STATE_LOCK must be released.
2132 * qla2x00 local function return status code.
2138 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
2143 mbx_cmd_t *mcp = &mc;
2145 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
2146 "Entered %s.\n", __func__);
2148 mcp->mb[0] = MBC_GET_PORT_NAME;
2149 mcp->mb[9] = vha->vp_idx;
2150 mcp->out_mb = MBX_9|MBX_1|MBX_0;
2151 if (HAS_EXTENDED_IDS(vha->hw)) {
2152 mcp->mb[1] = loop_id;
2154 mcp->out_mb |= MBX_10;
2156 mcp->mb[1] = loop_id << 8 | opt;
2159 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2160 mcp->tov = MBX_TOV_SECONDS;
2162 rval = qla2x00_mailbox_command(vha, mcp);
2164 if (rval != QLA_SUCCESS) {
2166 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
2169 /* This function returns name in big endian. */
2170 name[0] = MSB(mcp->mb[2]);
2171 name[1] = LSB(mcp->mb[2]);
2172 name[2] = MSB(mcp->mb[3]);
2173 name[3] = LSB(mcp->mb[3]);
2174 name[4] = MSB(mcp->mb[6]);
2175 name[5] = LSB(mcp->mb[6]);
2176 name[6] = MSB(mcp->mb[7]);
2177 name[7] = LSB(mcp->mb[7]);
2180 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
2181 "Done %s.\n", __func__);
2188 * qla24xx_link_initialization
2189 * Issue link initialization mailbox command.
2192 * ha = adapter block pointer.
2193 * TARGET_QUEUE_LOCK must be released.
2194 * ADAPTER_STATE_LOCK must be released.
2197 * qla2x00 local function return status code.
2203 qla24xx_link_initialize(scsi_qla_host_t *vha)
2207 mbx_cmd_t *mcp = &mc;
2209 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152,
2210 "Entered %s.\n", __func__);
2212 if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw))
2213 return QLA_FUNCTION_FAILED;
2215 mcp->mb[0] = MBC_LINK_INITIALIZATION;
2217 if (vha->hw->operating_mode == LOOP)
2218 mcp->mb[1] |= BIT_6;
2220 mcp->mb[1] |= BIT_5;
2223 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2225 mcp->tov = MBX_TOV_SECONDS;
2227 rval = qla2x00_mailbox_command(vha, mcp);
2229 if (rval != QLA_SUCCESS) {
2230 ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval);
2232 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154,
2233 "Done %s.\n", __func__);
2241 * Issue LIP reset mailbox command.
2244 * ha = adapter block pointer.
2245 * TARGET_QUEUE_LOCK must be released.
2246 * ADAPTER_STATE_LOCK must be released.
2249 * qla2x00 local function return status code.
2255 qla2x00_lip_reset(scsi_qla_host_t *vha)
2259 mbx_cmd_t *mcp = &mc;
2261 ql_dbg(ql_dbg_disc, vha, 0x105a,
2262 "Entered %s.\n", __func__);
2264 if (IS_CNA_CAPABLE(vha->hw)) {
2265 /* Logout across all FCFs. */
2266 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2269 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2270 } else if (IS_FWI2_CAPABLE(vha->hw)) {
2271 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2274 mcp->mb[3] = vha->hw->loop_reset_delay;
2275 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2277 mcp->mb[0] = MBC_LIP_RESET;
2278 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2279 if (HAS_EXTENDED_IDS(vha->hw)) {
2280 mcp->mb[1] = 0x00ff;
2282 mcp->out_mb |= MBX_10;
2284 mcp->mb[1] = 0xff00;
2286 mcp->mb[2] = vha->hw->loop_reset_delay;
2290 mcp->tov = MBX_TOV_SECONDS;
2292 rval = qla2x00_mailbox_command(vha, mcp);
2294 if (rval != QLA_SUCCESS) {
2296 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
2299 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
2300 "Done %s.\n", __func__);
2311 * ha = adapter block pointer.
2312 * sns = pointer for command.
2313 * cmd_size = command size.
2314 * buf_size = response/command size.
2315 * TARGET_QUEUE_LOCK must be released.
2316 * ADAPTER_STATE_LOCK must be released.
2319 * qla2x00 local function return status code.
2325 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
2326 uint16_t cmd_size, size_t buf_size)
2330 mbx_cmd_t *mcp = &mc;
2332 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
2333 "Entered %s.\n", __func__);
2335 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
2336 "Retry cnt=%d ratov=%d total tov=%d.\n",
2337 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
2339 mcp->mb[0] = MBC_SEND_SNS_COMMAND;
2340 mcp->mb[1] = cmd_size;
2341 mcp->mb[2] = MSW(sns_phys_address);
2342 mcp->mb[3] = LSW(sns_phys_address);
2343 mcp->mb[6] = MSW(MSD(sns_phys_address));
2344 mcp->mb[7] = LSW(MSD(sns_phys_address));
2345 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2346 mcp->in_mb = MBX_0|MBX_1;
2347 mcp->buf_size = buf_size;
2348 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
2349 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
2350 rval = qla2x00_mailbox_command(vha, mcp);
2352 if (rval != QLA_SUCCESS) {
2354 ql_dbg(ql_dbg_mbx, vha, 0x105f,
2355 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2356 rval, mcp->mb[0], mcp->mb[1]);
2359 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
2360 "Done %s.\n", __func__);
2367 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2368 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2372 struct logio_entry_24xx *lg;
2375 struct qla_hw_data *ha = vha->hw;
2376 struct req_que *req;
2378 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
2379 "Entered %s.\n", __func__);
2381 if (vha->vp_idx && vha->qpair)
2382 req = vha->qpair->req;
2384 req = ha->req_q_map[0];
2386 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2388 ql_log(ql_log_warn, vha, 0x1062,
2389 "Failed to allocate login IOCB.\n");
2390 return QLA_MEMORY_ALLOC_FAILED;
2393 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2394 lg->entry_count = 1;
2395 lg->handle = MAKE_HANDLE(req->id, lg->handle);
2396 lg->nport_handle = cpu_to_le16(loop_id);
2397 lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2399 lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2401 lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2402 lg->port_id[0] = al_pa;
2403 lg->port_id[1] = area;
2404 lg->port_id[2] = domain;
2405 lg->vp_index = vha->vp_idx;
2406 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2407 (ha->r_a_tov / 10 * 2) + 2);
2408 if (rval != QLA_SUCCESS) {
2409 ql_dbg(ql_dbg_mbx, vha, 0x1063,
2410 "Failed to issue login IOCB (%x).\n", rval);
2411 } else if (lg->entry_status != 0) {
2412 ql_dbg(ql_dbg_mbx, vha, 0x1064,
2413 "Failed to complete IOCB -- error status (%x).\n",
2415 rval = QLA_FUNCTION_FAILED;
2416 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2417 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2418 iop[1] = le32_to_cpu(lg->io_parameter[1]);
2420 ql_dbg(ql_dbg_mbx, vha, 0x1065,
2421 "Failed to complete IOCB -- completion status (%x) "
2422 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2426 case LSC_SCODE_PORTID_USED:
2427 mb[0] = MBS_PORT_ID_USED;
2428 mb[1] = LSW(iop[1]);
2430 case LSC_SCODE_NPORT_USED:
2431 mb[0] = MBS_LOOP_ID_USED;
2433 case LSC_SCODE_NOLINK:
2434 case LSC_SCODE_NOIOCB:
2435 case LSC_SCODE_NOXCB:
2436 case LSC_SCODE_CMD_FAILED:
2437 case LSC_SCODE_NOFABRIC:
2438 case LSC_SCODE_FW_NOT_READY:
2439 case LSC_SCODE_NOT_LOGGED_IN:
2440 case LSC_SCODE_NOPCB:
2441 case LSC_SCODE_ELS_REJECT:
2442 case LSC_SCODE_CMD_PARAM_ERR:
2443 case LSC_SCODE_NONPORT:
2444 case LSC_SCODE_LOGGED_IN:
2445 case LSC_SCODE_NOFLOGI_ACC:
2447 mb[0] = MBS_COMMAND_ERROR;
2451 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
2452 "Done %s.\n", __func__);
2454 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2456 mb[0] = MBS_COMMAND_COMPLETE;
2458 if (iop[0] & BIT_4) {
2464 /* Passback COS information. */
2466 if (lg->io_parameter[7] || lg->io_parameter[8])
2467 mb[10] |= BIT_0; /* Class 2. */
2468 if (lg->io_parameter[9] || lg->io_parameter[10])
2469 mb[10] |= BIT_1; /* Class 3. */
2470 if (lg->io_parameter[0] & cpu_to_le32(BIT_7))
2471 mb[10] |= BIT_7; /* Confirmed Completion
2476 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2482 * qla2x00_login_fabric
2483 * Issue login fabric port mailbox command.
2486 * ha = adapter block pointer.
2487 * loop_id = device loop ID.
2488 * domain = device domain.
2489 * area = device area.
2490 * al_pa = device AL_PA.
2491 * status = pointer for return status.
2492 * opt = command options.
2493 * TARGET_QUEUE_LOCK must be released.
2494 * ADAPTER_STATE_LOCK must be released.
2497 * qla2x00 local function return status code.
2503 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2504 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2508 mbx_cmd_t *mcp = &mc;
2509 struct qla_hw_data *ha = vha->hw;
2511 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
2512 "Entered %s.\n", __func__);
2514 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
2515 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2516 if (HAS_EXTENDED_IDS(ha)) {
2517 mcp->mb[1] = loop_id;
2519 mcp->out_mb |= MBX_10;
2521 mcp->mb[1] = (loop_id << 8) | opt;
2523 mcp->mb[2] = domain;
2524 mcp->mb[3] = area << 8 | al_pa;
2526 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
2527 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2529 rval = qla2x00_mailbox_command(vha, mcp);
2531 /* Return mailbox statuses. */
2538 /* COS retrieved from Get-Port-Database mailbox command. */
2542 if (rval != QLA_SUCCESS) {
2543 /* RLU tmp code: need to change main mailbox_command function to
2544 * return ok even when the mailbox completion value is not
2545 * SUCCESS. The caller needs to be responsible to interpret
2546 * the return values of this mailbox command if we're not
2547 * to change too much of the existing code.
2549 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 ||
2550 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 ||
2551 mcp->mb[0] == 0x4006)
2555 ql_dbg(ql_dbg_mbx, vha, 0x1068,
2556 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
2557 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
2560 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
2561 "Done %s.\n", __func__);
2568 * qla2x00_login_local_device
2569 * Issue login loop port mailbox command.
2572 * ha = adapter block pointer.
2573 * loop_id = device loop ID.
2574 * opt = command options.
2577 * Return status code.
2584 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
2585 uint16_t *mb_ret, uint8_t opt)
2589 mbx_cmd_t *mcp = &mc;
2590 struct qla_hw_data *ha = vha->hw;
2592 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
2593 "Entered %s.\n", __func__);
2595 if (IS_FWI2_CAPABLE(ha))
2596 return qla24xx_login_fabric(vha, fcport->loop_id,
2597 fcport->d_id.b.domain, fcport->d_id.b.area,
2598 fcport->d_id.b.al_pa, mb_ret, opt);
2600 mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
2601 if (HAS_EXTENDED_IDS(ha))
2602 mcp->mb[1] = fcport->loop_id;
2604 mcp->mb[1] = fcport->loop_id << 8;
2606 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2607 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
2608 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2610 rval = qla2x00_mailbox_command(vha, mcp);
2612 /* Return mailbox statuses. */
2613 if (mb_ret != NULL) {
2614 mb_ret[0] = mcp->mb[0];
2615 mb_ret[1] = mcp->mb[1];
2616 mb_ret[6] = mcp->mb[6];
2617 mb_ret[7] = mcp->mb[7];
2620 if (rval != QLA_SUCCESS) {
2621 /* AV tmp code: need to change main mailbox_command function to
2622 * return ok even when the mailbox completion value is not
2623 * SUCCESS. The caller needs to be responsible to interpret
2624 * the return values of this mailbox command if we're not
2625 * to change too much of the existing code.
2627 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
2630 ql_dbg(ql_dbg_mbx, vha, 0x106b,
2631 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
2632 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
2635 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
2636 "Done %s.\n", __func__);
2643 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2644 uint8_t area, uint8_t al_pa)
2647 struct logio_entry_24xx *lg;
2649 struct qla_hw_data *ha = vha->hw;
2650 struct req_que *req;
2652 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
2653 "Entered %s.\n", __func__);
2655 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2657 ql_log(ql_log_warn, vha, 0x106e,
2658 "Failed to allocate logout IOCB.\n");
2659 return QLA_MEMORY_ALLOC_FAILED;
2663 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2664 lg->entry_count = 1;
2665 lg->handle = MAKE_HANDLE(req->id, lg->handle);
2666 lg->nport_handle = cpu_to_le16(loop_id);
2668 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
2670 lg->port_id[0] = al_pa;
2671 lg->port_id[1] = area;
2672 lg->port_id[2] = domain;
2673 lg->vp_index = vha->vp_idx;
2674 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2675 (ha->r_a_tov / 10 * 2) + 2);
2676 if (rval != QLA_SUCCESS) {
2677 ql_dbg(ql_dbg_mbx, vha, 0x106f,
2678 "Failed to issue logout IOCB (%x).\n", rval);
2679 } else if (lg->entry_status != 0) {
2680 ql_dbg(ql_dbg_mbx, vha, 0x1070,
2681 "Failed to complete IOCB -- error status (%x).\n",
2683 rval = QLA_FUNCTION_FAILED;
2684 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2685 ql_dbg(ql_dbg_mbx, vha, 0x1071,
2686 "Failed to complete IOCB -- completion status (%x) "
2687 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2688 le32_to_cpu(lg->io_parameter[0]),
2689 le32_to_cpu(lg->io_parameter[1]));
2692 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
2693 "Done %s.\n", __func__);
2696 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2702 * qla2x00_fabric_logout
2703 * Issue logout fabric port mailbox command.
2706 * ha = adapter block pointer.
2707 * loop_id = device loop ID.
2708 * TARGET_QUEUE_LOCK must be released.
2709 * ADAPTER_STATE_LOCK must be released.
2712 * qla2x00 local function return status code.
2718 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2719 uint8_t area, uint8_t al_pa)
2723 mbx_cmd_t *mcp = &mc;
2725 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
2726 "Entered %s.\n", __func__);
2728 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
2729 mcp->out_mb = MBX_1|MBX_0;
2730 if (HAS_EXTENDED_IDS(vha->hw)) {
2731 mcp->mb[1] = loop_id;
2733 mcp->out_mb |= MBX_10;
2735 mcp->mb[1] = loop_id << 8;
2738 mcp->in_mb = MBX_1|MBX_0;
2739 mcp->tov = MBX_TOV_SECONDS;
2741 rval = qla2x00_mailbox_command(vha, mcp);
2743 if (rval != QLA_SUCCESS) {
2745 ql_dbg(ql_dbg_mbx, vha, 0x1074,
2746 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
2749 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
2750 "Done %s.\n", __func__);
2757 * qla2x00_full_login_lip
2758 * Issue full login LIP mailbox command.
2761 * ha = adapter block pointer.
2762 * TARGET_QUEUE_LOCK must be released.
2763 * ADAPTER_STATE_LOCK must be released.
2766 * qla2x00 local function return status code.
2772 qla2x00_full_login_lip(scsi_qla_host_t *vha)
2776 mbx_cmd_t *mcp = &mc;
2778 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
2779 "Entered %s.\n", __func__);
2781 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2782 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_4 : 0;
2785 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2787 mcp->tov = MBX_TOV_SECONDS;
2789 rval = qla2x00_mailbox_command(vha, mcp);
2791 if (rval != QLA_SUCCESS) {
2793 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
2796 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
2797 "Done %s.\n", __func__);
2804 * qla2x00_get_id_list
2807 * ha = adapter block pointer.
2810 * qla2x00 local function return status code.
2816 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2821 mbx_cmd_t *mcp = &mc;
2823 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
2824 "Entered %s.\n", __func__);
2826 if (id_list == NULL)
2827 return QLA_FUNCTION_FAILED;
2829 mcp->mb[0] = MBC_GET_ID_LIST;
2830 mcp->out_mb = MBX_0;
2831 if (IS_FWI2_CAPABLE(vha->hw)) {
2832 mcp->mb[2] = MSW(id_list_dma);
2833 mcp->mb[3] = LSW(id_list_dma);
2834 mcp->mb[6] = MSW(MSD(id_list_dma));
2835 mcp->mb[7] = LSW(MSD(id_list_dma));
2837 mcp->mb[9] = vha->vp_idx;
2838 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
2840 mcp->mb[1] = MSW(id_list_dma);
2841 mcp->mb[2] = LSW(id_list_dma);
2842 mcp->mb[3] = MSW(MSD(id_list_dma));
2843 mcp->mb[6] = LSW(MSD(id_list_dma));
2844 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1;
2846 mcp->in_mb = MBX_1|MBX_0;
2847 mcp->tov = MBX_TOV_SECONDS;
2849 rval = qla2x00_mailbox_command(vha, mcp);
2851 if (rval != QLA_SUCCESS) {
2853 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
2855 *entries = mcp->mb[1];
2856 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
2857 "Done %s.\n", __func__);
2864 * qla2x00_get_resource_cnts
2865 * Get current firmware resource counts.
2868 * ha = adapter block pointer.
2871 * qla2x00 local function return status code.
2877 qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
2879 struct qla_hw_data *ha = vha->hw;
2882 mbx_cmd_t *mcp = &mc;
2884 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
2885 "Entered %s.\n", __func__);
2887 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
2888 mcp->out_mb = MBX_0;
2889 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2890 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
2891 IS_QLA27XX(ha) || IS_QLA28XX(ha))
2892 mcp->in_mb |= MBX_12;
2893 mcp->tov = MBX_TOV_SECONDS;
2895 rval = qla2x00_mailbox_command(vha, mcp);
2897 if (rval != QLA_SUCCESS) {
2899 ql_dbg(ql_dbg_mbx, vha, 0x107d,
2900 "Failed mb[0]=%x.\n", mcp->mb[0]);
2902 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
2903 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
2904 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
2905 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
2906 mcp->mb[11], mcp->mb[12]);
2908 ha->orig_fw_tgt_xcb_count = mcp->mb[1];
2909 ha->cur_fw_tgt_xcb_count = mcp->mb[2];
2910 ha->cur_fw_xcb_count = mcp->mb[3];
2911 ha->orig_fw_xcb_count = mcp->mb[6];
2912 ha->cur_fw_iocb_count = mcp->mb[7];
2913 ha->orig_fw_iocb_count = mcp->mb[10];
2914 if (ha->flags.npiv_supported)
2915 ha->max_npiv_vports = mcp->mb[11];
2916 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
2918 ha->fw_max_fcf_count = mcp->mb[12];
2925 * qla2x00_get_fcal_position_map
2926 * Get FCAL (LILP) position map using mailbox command
2929 * ha = adapter state pointer.
2930 * pos_map = buffer pointer (can be NULL).
2933 * qla2x00 local function return status code.
2939 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2943 mbx_cmd_t *mcp = &mc;
2945 dma_addr_t pmap_dma;
2946 struct qla_hw_data *ha = vha->hw;
2948 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
2949 "Entered %s.\n", __func__);
2951 pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
2953 ql_log(ql_log_warn, vha, 0x1080,
2954 "Memory alloc failed.\n");
2955 return QLA_MEMORY_ALLOC_FAILED;
2958 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
2959 mcp->mb[2] = MSW(pmap_dma);
2960 mcp->mb[3] = LSW(pmap_dma);
2961 mcp->mb[6] = MSW(MSD(pmap_dma));
2962 mcp->mb[7] = LSW(MSD(pmap_dma));
2963 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2964 mcp->in_mb = MBX_1|MBX_0;
2965 mcp->buf_size = FCAL_MAP_SIZE;
2966 mcp->flags = MBX_DMA_IN;
2967 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2968 rval = qla2x00_mailbox_command(vha, mcp);
2970 if (rval == QLA_SUCCESS) {
2971 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
2972 "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
2973 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
2974 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
2978 memcpy(pos_map, pmap, FCAL_MAP_SIZE);
2980 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
2982 if (rval != QLA_SUCCESS) {
2983 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
2985 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
2986 "Done %s.\n", __func__);
2993 * qla2x00_get_link_status
2996 * ha = adapter block pointer.
2997 * loop_id = device loop ID.
2998 * ret_buf = pointer to link status return buffer.
3002 * BIT_0 = mem alloc error.
3003 * BIT_1 = mailbox error.
3006 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
3007 struct link_statistics *stats, dma_addr_t stats_dma)
3011 mbx_cmd_t *mcp = &mc;
3012 uint32_t *iter = (void *)stats;
3013 ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter);
3014 struct qla_hw_data *ha = vha->hw;
3016 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
3017 "Entered %s.\n", __func__);
3019 mcp->mb[0] = MBC_GET_LINK_STATUS;
3020 mcp->mb[2] = MSW(LSD(stats_dma));
3021 mcp->mb[3] = LSW(LSD(stats_dma));
3022 mcp->mb[6] = MSW(MSD(stats_dma));
3023 mcp->mb[7] = LSW(MSD(stats_dma));
3024 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
3026 if (IS_FWI2_CAPABLE(ha)) {
3027 mcp->mb[1] = loop_id;
3030 mcp->out_mb |= MBX_10|MBX_4|MBX_1;
3031 mcp->in_mb |= MBX_1;
3032 } else if (HAS_EXTENDED_IDS(ha)) {
3033 mcp->mb[1] = loop_id;
3035 mcp->out_mb |= MBX_10|MBX_1;
3037 mcp->mb[1] = loop_id << 8;
3038 mcp->out_mb |= MBX_1;
3040 mcp->tov = MBX_TOV_SECONDS;
3041 mcp->flags = IOCTL_CMD;
3042 rval = qla2x00_mailbox_command(vha, mcp);
3044 if (rval == QLA_SUCCESS) {
3045 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3046 ql_dbg(ql_dbg_mbx, vha, 0x1085,
3047 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3048 rval = QLA_FUNCTION_FAILED;
3050 /* Re-endianize - firmware data is le32. */
3051 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
3052 "Done %s.\n", __func__);
3053 for ( ; dwords--; iter++)
3058 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
3065 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
3066 dma_addr_t stats_dma, uint16_t options)
3070 mbx_cmd_t *mcp = &mc;
3071 uint32_t *iter, dwords;
3073 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
3074 "Entered %s.\n", __func__);
3076 memset(&mc, 0, sizeof(mc));
3077 mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
3078 mc.mb[2] = MSW(stats_dma);
3079 mc.mb[3] = LSW(stats_dma);
3080 mc.mb[6] = MSW(MSD(stats_dma));
3081 mc.mb[7] = LSW(MSD(stats_dma));
3082 mc.mb[8] = sizeof(struct link_statistics) / 4;
3083 mc.mb[9] = cpu_to_le16(vha->vp_idx);
3084 mc.mb[10] = cpu_to_le16(options);
3086 rval = qla24xx_send_mb_cmd(vha, &mc);
3088 if (rval == QLA_SUCCESS) {
3089 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3090 ql_dbg(ql_dbg_mbx, vha, 0x1089,
3091 "Failed mb[0]=%x.\n", mcp->mb[0]);
3092 rval = QLA_FUNCTION_FAILED;
3094 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
3095 "Done %s.\n", __func__);
3096 /* Re-endianize - firmware data is le32. */
3097 dwords = sizeof(struct link_statistics) / 4;
3098 iter = &stats->link_fail_cnt;
3099 for ( ; dwords--; iter++)
3104 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
3111 qla24xx_abort_command(srb_t *sp)
3114 unsigned long flags = 0;
3116 struct abort_entry_24xx *abt;
3119 fc_port_t *fcport = sp->fcport;
3120 struct scsi_qla_host *vha = fcport->vha;
3121 struct qla_hw_data *ha = vha->hw;
3122 struct req_que *req = vha->req;
3123 struct qla_qpair *qpair = sp->qpair;
3125 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
3126 "Entered %s.\n", __func__);
3128 if (vha->flags.qpairs_available && sp->qpair)
3129 req = sp->qpair->req;
3131 return QLA_FUNCTION_FAILED;
3133 if (ql2xasynctmfenable)
3134 return qla24xx_async_abort_command(sp);
3136 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3137 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
3138 if (req->outstanding_cmds[handle] == sp)
3141 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3142 if (handle == req->num_outstanding_cmds) {
3143 /* Command not found. */
3144 return QLA_FUNCTION_FAILED;
3147 abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
3149 ql_log(ql_log_warn, vha, 0x108d,
3150 "Failed to allocate abort IOCB.\n");
3151 return QLA_MEMORY_ALLOC_FAILED;
3154 abt->entry_type = ABORT_IOCB_TYPE;
3155 abt->entry_count = 1;
3156 abt->handle = MAKE_HANDLE(req->id, abt->handle);
3157 abt->nport_handle = cpu_to_le16(fcport->loop_id);
3158 abt->handle_to_abort = MAKE_HANDLE(req->id, handle);
3159 abt->port_id[0] = fcport->d_id.b.al_pa;
3160 abt->port_id[1] = fcport->d_id.b.area;
3161 abt->port_id[2] = fcport->d_id.b.domain;
3162 abt->vp_index = fcport->vha->vp_idx;
3164 abt->req_que_no = cpu_to_le16(req->id);
3166 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
3167 if (rval != QLA_SUCCESS) {
3168 ql_dbg(ql_dbg_mbx, vha, 0x108e,
3169 "Failed to issue IOCB (%x).\n", rval);
3170 } else if (abt->entry_status != 0) {
3171 ql_dbg(ql_dbg_mbx, vha, 0x108f,
3172 "Failed to complete IOCB -- error status (%x).\n",
3174 rval = QLA_FUNCTION_FAILED;
3175 } else if (abt->nport_handle != cpu_to_le16(0)) {
3176 ql_dbg(ql_dbg_mbx, vha, 0x1090,
3177 "Failed to complete IOCB -- completion status (%x).\n",
3178 le16_to_cpu(abt->nport_handle));
3179 if (abt->nport_handle == CS_IOCB_ERROR)
3180 rval = QLA_FUNCTION_PARAMETER_ERROR;
3182 rval = QLA_FUNCTION_FAILED;
3184 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
3185 "Done %s.\n", __func__);
3188 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
3193 struct tsk_mgmt_cmd {
3195 struct tsk_mgmt_entry tsk;
3196 struct sts_entry_24xx sts;
3201 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
3202 uint64_t l, int tag)
3205 struct tsk_mgmt_cmd *tsk;
3206 struct sts_entry_24xx *sts;
3208 scsi_qla_host_t *vha;
3209 struct qla_hw_data *ha;
3210 struct req_que *req;
3211 struct qla_qpair *qpair;
3217 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
3218 "Entered %s.\n", __func__);
3220 if (vha->vp_idx && vha->qpair) {
3226 tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
3228 ql_log(ql_log_warn, vha, 0x1093,
3229 "Failed to allocate task management IOCB.\n");
3230 return QLA_MEMORY_ALLOC_FAILED;
3233 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
3234 tsk->p.tsk.entry_count = 1;
3235 tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle);
3236 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
3237 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
3238 tsk->p.tsk.control_flags = cpu_to_le32(type);
3239 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
3240 tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
3241 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
3242 tsk->p.tsk.vp_index = fcport->vha->vp_idx;
3243 if (type == TCF_LUN_RESET) {
3244 int_to_scsilun(l, &tsk->p.tsk.lun);
3245 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
3246 sizeof(tsk->p.tsk.lun));
3250 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
3251 if (rval != QLA_SUCCESS) {
3252 ql_dbg(ql_dbg_mbx, vha, 0x1094,
3253 "Failed to issue %s reset IOCB (%x).\n", name, rval);
3254 } else if (sts->entry_status != 0) {
3255 ql_dbg(ql_dbg_mbx, vha, 0x1095,
3256 "Failed to complete IOCB -- error status (%x).\n",
3258 rval = QLA_FUNCTION_FAILED;
3259 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
3260 ql_dbg(ql_dbg_mbx, vha, 0x1096,
3261 "Failed to complete IOCB -- completion status (%x).\n",
3262 le16_to_cpu(sts->comp_status));
3263 rval = QLA_FUNCTION_FAILED;
3264 } else if (le16_to_cpu(sts->scsi_status) &
3265 SS_RESPONSE_INFO_LEN_VALID) {
3266 if (le32_to_cpu(sts->rsp_data_len) < 4) {
3267 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
3268 "Ignoring inconsistent data length -- not enough "
3269 "response info (%d).\n",
3270 le32_to_cpu(sts->rsp_data_len));
3271 } else if (sts->data[3]) {
3272 ql_dbg(ql_dbg_mbx, vha, 0x1098,
3273 "Failed to complete IOCB -- response (%x).\n",
3275 rval = QLA_FUNCTION_FAILED;
3279 /* Issue marker IOCB. */
3280 rval2 = qla2x00_marker(vha, ha->base_qpair, fcport->loop_id, l,
3281 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
3282 if (rval2 != QLA_SUCCESS) {
3283 ql_dbg(ql_dbg_mbx, vha, 0x1099,
3284 "Failed to issue marker IOCB (%x).\n", rval2);
3286 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
3287 "Done %s.\n", __func__);
3290 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
3296 qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag)
3298 struct qla_hw_data *ha = fcport->vha->hw;
3300 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3301 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
3303 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
3307 qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
3309 struct qla_hw_data *ha = fcport->vha->hw;
3311 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3312 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
3314 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
3318 qla2x00_system_error(scsi_qla_host_t *vha)
3322 mbx_cmd_t *mcp = &mc;
3323 struct qla_hw_data *ha = vha->hw;
3325 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
3326 return QLA_FUNCTION_FAILED;
3328 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
3329 "Entered %s.\n", __func__);
3331 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
3332 mcp->out_mb = MBX_0;
3336 rval = qla2x00_mailbox_command(vha, mcp);
3338 if (rval != QLA_SUCCESS) {
3339 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
3341 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
3342 "Done %s.\n", __func__);
3349 qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
3353 mbx_cmd_t *mcp = &mc;
3355 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3356 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
3357 return QLA_FUNCTION_FAILED;
3359 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
3360 "Entered %s.\n", __func__);
3362 mcp->mb[0] = MBC_WRITE_SERDES;
3364 if (IS_QLA2031(vha->hw))
3365 mcp->mb[2] = data & 0xff;
3370 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
3372 mcp->tov = MBX_TOV_SECONDS;
3374 rval = qla2x00_mailbox_command(vha, mcp);
3376 if (rval != QLA_SUCCESS) {
3377 ql_dbg(ql_dbg_mbx, vha, 0x1183,
3378 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3380 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184,
3381 "Done %s.\n", __func__);
3388 qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
3392 mbx_cmd_t *mcp = &mc;
3394 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3395 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
3396 return QLA_FUNCTION_FAILED;
3398 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
3399 "Entered %s.\n", __func__);
3401 mcp->mb[0] = MBC_READ_SERDES;
3404 mcp->out_mb = MBX_3|MBX_1|MBX_0;
3405 mcp->in_mb = MBX_1|MBX_0;
3406 mcp->tov = MBX_TOV_SECONDS;
3408 rval = qla2x00_mailbox_command(vha, mcp);
3410 if (IS_QLA2031(vha->hw))
3411 *data = mcp->mb[1] & 0xff;
3415 if (rval != QLA_SUCCESS) {
3416 ql_dbg(ql_dbg_mbx, vha, 0x1186,
3417 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3419 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187,
3420 "Done %s.\n", __func__);
3427 qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
3431 mbx_cmd_t *mcp = &mc;
3433 if (!IS_QLA8044(vha->hw))
3434 return QLA_FUNCTION_FAILED;
3436 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0,
3437 "Entered %s.\n", __func__);
3439 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3440 mcp->mb[1] = HCS_WRITE_SERDES;
3441 mcp->mb[3] = LSW(addr);
3442 mcp->mb[4] = MSW(addr);
3443 mcp->mb[5] = LSW(data);
3444 mcp->mb[6] = MSW(data);
3445 mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
3447 mcp->tov = MBX_TOV_SECONDS;
3449 rval = qla2x00_mailbox_command(vha, mcp);
3451 if (rval != QLA_SUCCESS) {
3452 ql_dbg(ql_dbg_mbx, vha, 0x11a1,
3453 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3455 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188,
3456 "Done %s.\n", __func__);
3463 qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
3467 mbx_cmd_t *mcp = &mc;
3469 if (!IS_QLA8044(vha->hw))
3470 return QLA_FUNCTION_FAILED;
3472 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189,
3473 "Entered %s.\n", __func__);
3475 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3476 mcp->mb[1] = HCS_READ_SERDES;
3477 mcp->mb[3] = LSW(addr);
3478 mcp->mb[4] = MSW(addr);
3479 mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0;
3480 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3481 mcp->tov = MBX_TOV_SECONDS;
3483 rval = qla2x00_mailbox_command(vha, mcp);
3485 *data = mcp->mb[2] << 16 | mcp->mb[1];
3487 if (rval != QLA_SUCCESS) {
3488 ql_dbg(ql_dbg_mbx, vha, 0x118a,
3489 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3491 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b,
3492 "Done %s.\n", __func__);
3499 * qla2x00_set_serdes_params() -
3501 * @sw_em_1g: serial link options
3502 * @sw_em_2g: serial link options
3503 * @sw_em_4g: serial link options
3508 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
3509 uint16_t sw_em_2g, uint16_t sw_em_4g)
3513 mbx_cmd_t *mcp = &mc;
3515 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
3516 "Entered %s.\n", __func__);
3518 mcp->mb[0] = MBC_SERDES_PARAMS;
3520 mcp->mb[2] = sw_em_1g | BIT_15;
3521 mcp->mb[3] = sw_em_2g | BIT_15;
3522 mcp->mb[4] = sw_em_4g | BIT_15;
3523 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3525 mcp->tov = MBX_TOV_SECONDS;
3527 rval = qla2x00_mailbox_command(vha, mcp);
3529 if (rval != QLA_SUCCESS) {
3531 ql_dbg(ql_dbg_mbx, vha, 0x109f,
3532 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3535 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
3536 "Done %s.\n", __func__);
3543 qla2x00_stop_firmware(scsi_qla_host_t *vha)
3547 mbx_cmd_t *mcp = &mc;
3549 if (!IS_FWI2_CAPABLE(vha->hw))
3550 return QLA_FUNCTION_FAILED;
3552 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
3553 "Entered %s.\n", __func__);
3555 mcp->mb[0] = MBC_STOP_FIRMWARE;
3557 mcp->out_mb = MBX_1|MBX_0;
3561 rval = qla2x00_mailbox_command(vha, mcp);
3563 if (rval != QLA_SUCCESS) {
3564 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
3565 if (mcp->mb[0] == MBS_INVALID_COMMAND)
3566 rval = QLA_INVALID_COMMAND;
3568 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
3569 "Done %s.\n", __func__);
3576 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
3581 mbx_cmd_t *mcp = &mc;
3583 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
3584 "Entered %s.\n", __func__);
3586 if (!IS_FWI2_CAPABLE(vha->hw))
3587 return QLA_FUNCTION_FAILED;
3589 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3590 return QLA_FUNCTION_FAILED;
3592 mcp->mb[0] = MBC_TRACE_CONTROL;
3593 mcp->mb[1] = TC_EFT_ENABLE;
3594 mcp->mb[2] = LSW(eft_dma);
3595 mcp->mb[3] = MSW(eft_dma);
3596 mcp->mb[4] = LSW(MSD(eft_dma));
3597 mcp->mb[5] = MSW(MSD(eft_dma));
3598 mcp->mb[6] = buffers;
3599 mcp->mb[7] = TC_AEN_DISABLE;
3600 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3601 mcp->in_mb = MBX_1|MBX_0;
3602 mcp->tov = MBX_TOV_SECONDS;
3604 rval = qla2x00_mailbox_command(vha, mcp);
3605 if (rval != QLA_SUCCESS) {
3606 ql_dbg(ql_dbg_mbx, vha, 0x10a5,
3607 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3608 rval, mcp->mb[0], mcp->mb[1]);
3610 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
3611 "Done %s.\n", __func__);
3618 qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
3622 mbx_cmd_t *mcp = &mc;
3624 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
3625 "Entered %s.\n", __func__);
3627 if (!IS_FWI2_CAPABLE(vha->hw))
3628 return QLA_FUNCTION_FAILED;
3630 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3631 return QLA_FUNCTION_FAILED;
3633 mcp->mb[0] = MBC_TRACE_CONTROL;
3634 mcp->mb[1] = TC_EFT_DISABLE;
3635 mcp->out_mb = MBX_1|MBX_0;
3636 mcp->in_mb = MBX_1|MBX_0;
3637 mcp->tov = MBX_TOV_SECONDS;
3639 rval = qla2x00_mailbox_command(vha, mcp);
3640 if (rval != QLA_SUCCESS) {
3641 ql_dbg(ql_dbg_mbx, vha, 0x10a8,
3642 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3643 rval, mcp->mb[0], mcp->mb[1]);
3645 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
3646 "Done %s.\n", __func__);
3653 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
3654 uint16_t buffers, uint16_t *mb, uint32_t *dwords)
3658 mbx_cmd_t *mcp = &mc;
3660 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
3661 "Entered %s.\n", __func__);
3663 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
3664 !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
3665 !IS_QLA28XX(vha->hw))
3666 return QLA_FUNCTION_FAILED;
3668 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3669 return QLA_FUNCTION_FAILED;
3671 mcp->mb[0] = MBC_TRACE_CONTROL;
3672 mcp->mb[1] = TC_FCE_ENABLE;
3673 mcp->mb[2] = LSW(fce_dma);
3674 mcp->mb[3] = MSW(fce_dma);
3675 mcp->mb[4] = LSW(MSD(fce_dma));
3676 mcp->mb[5] = MSW(MSD(fce_dma));
3677 mcp->mb[6] = buffers;
3678 mcp->mb[7] = TC_AEN_DISABLE;
3680 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE;
3681 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE;
3682 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3684 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3685 mcp->tov = MBX_TOV_SECONDS;
3687 rval = qla2x00_mailbox_command(vha, mcp);
3688 if (rval != QLA_SUCCESS) {
3689 ql_dbg(ql_dbg_mbx, vha, 0x10ab,
3690 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3691 rval, mcp->mb[0], mcp->mb[1]);
3693 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
3694 "Done %s.\n", __func__);
3697 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
3706 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
3710 mbx_cmd_t *mcp = &mc;
3712 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
3713 "Entered %s.\n", __func__);
3715 if (!IS_FWI2_CAPABLE(vha->hw))
3716 return QLA_FUNCTION_FAILED;
3718 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3719 return QLA_FUNCTION_FAILED;
3721 mcp->mb[0] = MBC_TRACE_CONTROL;
3722 mcp->mb[1] = TC_FCE_DISABLE;
3723 mcp->mb[2] = TC_FCE_DISABLE_TRACE;
3724 mcp->out_mb = MBX_2|MBX_1|MBX_0;
3725 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3727 mcp->tov = MBX_TOV_SECONDS;
3729 rval = qla2x00_mailbox_command(vha, mcp);
3730 if (rval != QLA_SUCCESS) {
3731 ql_dbg(ql_dbg_mbx, vha, 0x10ae,
3732 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3733 rval, mcp->mb[0], mcp->mb[1]);
3735 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
3736 "Done %s.\n", __func__);
3739 *wr = (uint64_t) mcp->mb[5] << 48 |
3740 (uint64_t) mcp->mb[4] << 32 |
3741 (uint64_t) mcp->mb[3] << 16 |
3742 (uint64_t) mcp->mb[2];
3744 *rd = (uint64_t) mcp->mb[9] << 48 |
3745 (uint64_t) mcp->mb[8] << 32 |
3746 (uint64_t) mcp->mb[7] << 16 |
3747 (uint64_t) mcp->mb[6];
3754 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3755 uint16_t *port_speed, uint16_t *mb)
3759 mbx_cmd_t *mcp = &mc;
3761 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
3762 "Entered %s.\n", __func__);
3764 if (!IS_IIDMA_CAPABLE(vha->hw))
3765 return QLA_FUNCTION_FAILED;
3767 mcp->mb[0] = MBC_PORT_PARAMS;
3768 mcp->mb[1] = loop_id;
3769 mcp->mb[2] = mcp->mb[3] = 0;
3770 mcp->mb[9] = vha->vp_idx;
3771 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3772 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3773 mcp->tov = MBX_TOV_SECONDS;
3775 rval = qla2x00_mailbox_command(vha, mcp);
3777 /* Return mailbox statuses. */
3784 if (rval != QLA_SUCCESS) {
3785 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
3787 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
3788 "Done %s.\n", __func__);
3790 *port_speed = mcp->mb[3];
3797 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3798 uint16_t port_speed, uint16_t *mb)
3802 mbx_cmd_t *mcp = &mc;
3804 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
3805 "Entered %s.\n", __func__);
3807 if (!IS_IIDMA_CAPABLE(vha->hw))
3808 return QLA_FUNCTION_FAILED;
3810 mcp->mb[0] = MBC_PORT_PARAMS;
3811 mcp->mb[1] = loop_id;
3813 mcp->mb[3] = port_speed & 0x3F;
3814 mcp->mb[9] = vha->vp_idx;
3815 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3816 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3817 mcp->tov = MBX_TOV_SECONDS;
3819 rval = qla2x00_mailbox_command(vha, mcp);
3821 /* Return mailbox statuses. */
3828 if (rval != QLA_SUCCESS) {
3829 ql_dbg(ql_dbg_mbx, vha, 0x10b4,
3830 "Failed=%x.\n", rval);
3832 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
3833 "Done %s.\n", __func__);
3840 qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3841 struct vp_rpt_id_entry_24xx *rptid_entry)
3843 struct qla_hw_data *ha = vha->hw;
3844 scsi_qla_host_t *vp = NULL;
3845 unsigned long flags;
3848 struct fc_port *fcport;
3850 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
3851 "Entered %s.\n", __func__);
3853 if (rptid_entry->entry_status != 0)
3856 id.b.domain = rptid_entry->port_id[2];
3857 id.b.area = rptid_entry->port_id[1];
3858 id.b.al_pa = rptid_entry->port_id[0];
3860 ha->flags.n2n_ae = 0;
3862 if (rptid_entry->format == 0) {
3864 ql_dbg(ql_dbg_async, vha, 0x10b7,
3865 "Format 0 : Number of VPs setup %d, number of "
3866 "VPs acquired %d.\n", rptid_entry->vp_setup,
3867 rptid_entry->vp_acquired);
3868 ql_dbg(ql_dbg_async, vha, 0x10b8,
3869 "Primary port id %02x%02x%02x.\n",
3870 rptid_entry->port_id[2], rptid_entry->port_id[1],
3871 rptid_entry->port_id[0]);
3872 ha->current_topology = ISP_CFG_NL;
3873 qlt_update_host_map(vha, id);
3875 } else if (rptid_entry->format == 1) {
3877 ql_dbg(ql_dbg_async, vha, 0x10b9,
3878 "Format 1: VP[%d] enabled - status %d - with "
3879 "port id %02x%02x%02x.\n", rptid_entry->vp_idx,
3880 rptid_entry->vp_status,
3881 rptid_entry->port_id[2], rptid_entry->port_id[1],
3882 rptid_entry->port_id[0]);
3883 ql_dbg(ql_dbg_async, vha, 0x5075,
3884 "Format 1: Remote WWPN %8phC.\n",
3885 rptid_entry->u.f1.port_name);
3887 ql_dbg(ql_dbg_async, vha, 0x5075,
3888 "Format 1: WWPN %8phC.\n",
3891 switch (rptid_entry->u.f1.flags & TOPO_MASK) {
3893 ha->current_topology = ISP_CFG_N;
3894 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3895 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3896 fcport->scan_state = QLA_FCPORT_SCAN;
3897 fcport->n2n_flag = 0;
3900 fcport = qla2x00_find_fcport_by_wwpn(vha,
3901 rptid_entry->u.f1.port_name, 1);
3902 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3905 fcport->plogi_nack_done_deadline = jiffies + HZ;
3906 fcport->dm_login_expire = jiffies + 2*HZ;
3907 fcport->scan_state = QLA_FCPORT_FOUND;
3908 fcport->n2n_flag = 1;
3909 fcport->keep_nport_handle = 1;
3910 if (vha->flags.nvme_enabled)
3911 fcport->fc4f_nvme = 1;
3913 switch (fcport->disc_state) {
3915 set_bit(RELOGIN_NEEDED,
3918 case DSC_DELETE_PEND:
3921 qlt_schedule_sess_for_deletion(fcport);
3926 if (wwn_to_u64(vha->port_name) >
3927 wwn_to_u64(rptid_entry->u.f1.port_name)) {
3929 vha->d_id.b.al_pa = 1;
3930 ha->flags.n2n_bigger = 1;
3933 ql_dbg(ql_dbg_async, vha, 0x5075,
3934 "Format 1: assign local id %x remote id %x\n",
3935 vha->d_id.b24, id.b24);
3937 ql_dbg(ql_dbg_async, vha, 0x5075,
3938 "Format 1: Remote login - Waiting for WWPN %8phC.\n",
3939 rptid_entry->u.f1.port_name);
3940 ha->flags.n2n_bigger = 0;
3942 qla24xx_post_newsess_work(vha, &id,
3943 rptid_entry->u.f1.port_name,
3944 rptid_entry->u.f1.node_name,
3949 /* if our portname is higher then initiate N2N login */
3951 set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
3952 ha->flags.n2n_ae = 1;
3956 ha->current_topology = ISP_CFG_FL;
3959 ha->current_topology = ISP_CFG_F;
3965 ha->flags.gpsc_supported = 1;
3966 ha->current_topology = ISP_CFG_F;
3967 /* buffer to buffer credit flag */
3968 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
3970 if (rptid_entry->vp_idx == 0) {
3971 if (rptid_entry->vp_status == VP_STAT_COMPL) {
3972 /* FA-WWN is only for physical port */
3973 if (qla_ini_mode_enabled(vha) &&
3974 ha->flags.fawwpn_enabled &&
3975 (rptid_entry->u.f1.flags &
3977 memcpy(vha->port_name,
3978 rptid_entry->u.f1.port_name,
3982 qlt_update_host_map(vha, id);
3985 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
3986 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
3988 if (rptid_entry->vp_status != VP_STAT_COMPL &&
3989 rptid_entry->vp_status != VP_STAT_ID_CHG) {
3990 ql_dbg(ql_dbg_mbx, vha, 0x10ba,
3991 "Could not acquire ID for VP[%d].\n",
3992 rptid_entry->vp_idx);
3997 spin_lock_irqsave(&ha->vport_slock, flags);
3998 list_for_each_entry(vp, &ha->vp_list, list) {
3999 if (rptid_entry->vp_idx == vp->vp_idx) {
4004 spin_unlock_irqrestore(&ha->vport_slock, flags);
4009 qlt_update_host_map(vp, id);
4012 * Cannot configure here as we are still sitting on the
4013 * response queue. Handle it in dpc context.
4015 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
4016 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
4017 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
4019 set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
4020 qla2xxx_wake_dpc(vha);
4021 } else if (rptid_entry->format == 2) {
4022 ql_dbg(ql_dbg_async, vha, 0x505f,
4023 "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n",
4024 rptid_entry->port_id[2], rptid_entry->port_id[1],
4025 rptid_entry->port_id[0]);
4027 ql_dbg(ql_dbg_async, vha, 0x5075,
4028 "N2N: Remote WWPN %8phC.\n",
4029 rptid_entry->u.f2.port_name);
4031 /* N2N. direct connect */
4032 ha->current_topology = ISP_CFG_N;
4033 ha->flags.rida_fmt2 = 1;
4034 vha->d_id.b.domain = rptid_entry->port_id[2];
4035 vha->d_id.b.area = rptid_entry->port_id[1];
4036 vha->d_id.b.al_pa = rptid_entry->port_id[0];
4038 ha->flags.n2n_ae = 1;
4039 spin_lock_irqsave(&ha->vport_slock, flags);
4040 qlt_update_vp_map(vha, SET_AL_PA);
4041 spin_unlock_irqrestore(&ha->vport_slock, flags);
4043 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4044 fcport->scan_state = QLA_FCPORT_SCAN;
4045 fcport->n2n_flag = 0;
4048 fcport = qla2x00_find_fcport_by_wwpn(vha,
4049 rptid_entry->u.f2.port_name, 1);
4052 fcport->login_retry = vha->hw->login_retry_count;
4053 fcport->plogi_nack_done_deadline = jiffies + HZ;
4054 fcport->scan_state = QLA_FCPORT_FOUND;
4055 fcport->keep_nport_handle = 1;
4056 fcport->n2n_flag = 1;
4057 fcport->d_id.b.domain =
4058 rptid_entry->u.f2.remote_nport_id[2];
4059 fcport->d_id.b.area =
4060 rptid_entry->u.f2.remote_nport_id[1];
4061 fcport->d_id.b.al_pa =
4062 rptid_entry->u.f2.remote_nport_id[0];
4068 * qla24xx_modify_vp_config
4069 * Change VP configuration for vha
4072 * vha = adapter block pointer.
4075 * qla2xxx local function return status code.
4081 qla24xx_modify_vp_config(scsi_qla_host_t *vha)
4084 struct vp_config_entry_24xx *vpmod;
4085 dma_addr_t vpmod_dma;
4086 struct qla_hw_data *ha = vha->hw;
4087 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4089 /* This can be called by the parent */
4091 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
4092 "Entered %s.\n", __func__);
4094 vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
4096 ql_log(ql_log_warn, vha, 0x10bc,
4097 "Failed to allocate modify VP IOCB.\n");
4098 return QLA_MEMORY_ALLOC_FAILED;
4101 vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
4102 vpmod->entry_count = 1;
4103 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
4104 vpmod->vp_count = 1;
4105 vpmod->vp_index1 = vha->vp_idx;
4106 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
4108 qlt_modify_vp_config(vha, vpmod);
4110 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
4111 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
4112 vpmod->entry_count = 1;
4114 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
4115 if (rval != QLA_SUCCESS) {
4116 ql_dbg(ql_dbg_mbx, vha, 0x10bd,
4117 "Failed to issue VP config IOCB (%x).\n", rval);
4118 } else if (vpmod->comp_status != 0) {
4119 ql_dbg(ql_dbg_mbx, vha, 0x10be,
4120 "Failed to complete IOCB -- error status (%x).\n",
4121 vpmod->comp_status);
4122 rval = QLA_FUNCTION_FAILED;
4123 } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) {
4124 ql_dbg(ql_dbg_mbx, vha, 0x10bf,
4125 "Failed to complete IOCB -- completion status (%x).\n",
4126 le16_to_cpu(vpmod->comp_status));
4127 rval = QLA_FUNCTION_FAILED;
4130 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
4131 "Done %s.\n", __func__);
4132 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
4134 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
4140 * qla2x00_send_change_request
4141 * Receive or disable RSCN request from fabric controller
4144 * ha = adapter block pointer
4145 * format = registration format:
4147 * 1 - Fabric detected registration
4148 * 2 - N_port detected registration
4149 * 3 - Full registration
4150 * FF - clear registration
4151 * vp_idx = Virtual port index
4154 * qla2x00 local function return status code.
4161 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
4166 mbx_cmd_t *mcp = &mc;
4168 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
4169 "Entered %s.\n", __func__);
4171 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
4172 mcp->mb[1] = format;
4173 mcp->mb[9] = vp_idx;
4174 mcp->out_mb = MBX_9|MBX_1|MBX_0;
4175 mcp->in_mb = MBX_0|MBX_1;
4176 mcp->tov = MBX_TOV_SECONDS;
4178 rval = qla2x00_mailbox_command(vha, mcp);
4180 if (rval == QLA_SUCCESS) {
4181 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
4191 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
4196 mbx_cmd_t *mcp = &mc;
4198 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
4199 "Entered %s.\n", __func__);
4201 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
4202 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
4203 mcp->mb[8] = MSW(addr);
4204 mcp->out_mb = MBX_8|MBX_0;
4206 mcp->mb[0] = MBC_DUMP_RISC_RAM;
4207 mcp->out_mb = MBX_0;
4209 mcp->mb[1] = LSW(addr);
4210 mcp->mb[2] = MSW(req_dma);
4211 mcp->mb[3] = LSW(req_dma);
4212 mcp->mb[6] = MSW(MSD(req_dma));
4213 mcp->mb[7] = LSW(MSD(req_dma));
4214 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
4215 if (IS_FWI2_CAPABLE(vha->hw)) {
4216 mcp->mb[4] = MSW(size);
4217 mcp->mb[5] = LSW(size);
4218 mcp->out_mb |= MBX_5|MBX_4;
4220 mcp->mb[4] = LSW(size);
4221 mcp->out_mb |= MBX_4;
4225 mcp->tov = MBX_TOV_SECONDS;
4227 rval = qla2x00_mailbox_command(vha, mcp);
4229 if (rval != QLA_SUCCESS) {
4230 ql_dbg(ql_dbg_mbx, vha, 0x1008,
4231 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4233 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
4234 "Done %s.\n", __func__);
4239 /* 84XX Support **************************************************************/
4241 struct cs84xx_mgmt_cmd {
4243 struct verify_chip_entry_84xx req;
4244 struct verify_chip_rsp_84xx rsp;
4249 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
4252 struct cs84xx_mgmt_cmd *mn;
4255 unsigned long flags;
4256 struct qla_hw_data *ha = vha->hw;
4258 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
4259 "Entered %s.\n", __func__);
4261 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
4263 return QLA_MEMORY_ALLOC_FAILED;
4267 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0;
4268 /* Diagnostic firmware? */
4269 /* options |= MENLO_DIAG_FW; */
4270 /* We update the firmware with only one data sequence. */
4271 options |= VCO_END_OF_DATA;
4275 memset(mn, 0, sizeof(*mn));
4276 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
4277 mn->p.req.entry_count = 1;
4278 mn->p.req.options = cpu_to_le16(options);
4280 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
4281 "Dump of Verify Request.\n");
4282 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
4285 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
4286 if (rval != QLA_SUCCESS) {
4287 ql_dbg(ql_dbg_mbx, vha, 0x10cb,
4288 "Failed to issue verify IOCB (%x).\n", rval);
4292 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
4293 "Dump of Verify Response.\n");
4294 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
4297 status[0] = le16_to_cpu(mn->p.rsp.comp_status);
4298 status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
4299 le16_to_cpu(mn->p.rsp.failure_code) : 0;
4300 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
4301 "cs=%x fc=%x.\n", status[0], status[1]);
4303 if (status[0] != CS_COMPLETE) {
4304 rval = QLA_FUNCTION_FAILED;
4305 if (!(options & VCO_DONT_UPDATE_FW)) {
4306 ql_dbg(ql_dbg_mbx, vha, 0x10cf,
4307 "Firmware update failed. Retrying "
4308 "without update firmware.\n");
4309 options |= VCO_DONT_UPDATE_FW;
4310 options &= ~VCO_FORCE_UPDATE;
4314 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
4315 "Firmware updated to %x.\n",
4316 le32_to_cpu(mn->p.rsp.fw_ver));
4318 /* NOTE: we only update OP firmware. */
4319 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
4320 ha->cs84xx->op_fw_version =
4321 le32_to_cpu(mn->p.rsp.fw_ver);
4322 spin_unlock_irqrestore(&ha->cs84xx->access_lock,
4328 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
4330 if (rval != QLA_SUCCESS) {
4331 ql_dbg(ql_dbg_mbx, vha, 0x10d1,
4332 "Failed=%x.\n", rval);
4334 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
4335 "Done %s.\n", __func__);
4342 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
4345 unsigned long flags;
4347 mbx_cmd_t *mcp = &mc;
4348 struct qla_hw_data *ha = vha->hw;
4350 if (!ha->flags.fw_started)
4353 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
4354 "Entered %s.\n", __func__);
4356 if (IS_SHADOW_REG_CAPABLE(ha))
4357 req->options |= BIT_13;
4359 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4360 mcp->mb[1] = req->options;
4361 mcp->mb[2] = MSW(LSD(req->dma));
4362 mcp->mb[3] = LSW(LSD(req->dma));
4363 mcp->mb[6] = MSW(MSD(req->dma));
4364 mcp->mb[7] = LSW(MSD(req->dma));
4365 mcp->mb[5] = req->length;
4367 mcp->mb[10] = req->rsp->id;
4368 mcp->mb[12] = req->qos;
4369 mcp->mb[11] = req->vp_idx;
4370 mcp->mb[13] = req->rid;
4371 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
4374 mcp->mb[4] = req->id;
4375 /* que in ptr index */
4377 /* que out ptr index */
4378 mcp->mb[9] = *req->out_ptr = 0;
4379 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
4380 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4382 mcp->flags = MBX_DMA_OUT;
4383 mcp->tov = MBX_TOV_SECONDS * 2;
4385 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
4387 mcp->in_mb |= MBX_1;
4388 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4389 mcp->out_mb |= MBX_15;
4390 /* debug q create issue in SR-IOV */
4391 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4394 spin_lock_irqsave(&ha->hardware_lock, flags);
4395 if (!(req->options & BIT_0)) {
4396 WRT_REG_DWORD(req->req_q_in, 0);
4397 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4398 WRT_REG_DWORD(req->req_q_out, 0);
4400 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4402 rval = qla2x00_mailbox_command(vha, mcp);
4403 if (rval != QLA_SUCCESS) {
4404 ql_dbg(ql_dbg_mbx, vha, 0x10d4,
4405 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4407 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
4408 "Done %s.\n", __func__);
4415 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
4418 unsigned long flags;
4420 mbx_cmd_t *mcp = &mc;
4421 struct qla_hw_data *ha = vha->hw;
4423 if (!ha->flags.fw_started)
4426 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
4427 "Entered %s.\n", __func__);
4429 if (IS_SHADOW_REG_CAPABLE(ha))
4430 rsp->options |= BIT_13;
4432 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4433 mcp->mb[1] = rsp->options;
4434 mcp->mb[2] = MSW(LSD(rsp->dma));
4435 mcp->mb[3] = LSW(LSD(rsp->dma));
4436 mcp->mb[6] = MSW(MSD(rsp->dma));
4437 mcp->mb[7] = LSW(MSD(rsp->dma));
4438 mcp->mb[5] = rsp->length;
4439 mcp->mb[14] = rsp->msix->entry;
4440 mcp->mb[13] = rsp->rid;
4441 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
4444 mcp->mb[4] = rsp->id;
4445 /* que in ptr index */
4446 mcp->mb[8] = *rsp->in_ptr = 0;
4447 /* que out ptr index */
4449 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
4450 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4452 mcp->flags = MBX_DMA_OUT;
4453 mcp->tov = MBX_TOV_SECONDS * 2;
4455 if (IS_QLA81XX(ha)) {
4456 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
4457 mcp->in_mb |= MBX_1;
4458 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4459 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
4460 mcp->in_mb |= MBX_1;
4461 /* debug q create issue in SR-IOV */
4462 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4465 spin_lock_irqsave(&ha->hardware_lock, flags);
4466 if (!(rsp->options & BIT_0)) {
4467 WRT_REG_DWORD(rsp->rsp_q_out, 0);
4468 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4469 WRT_REG_DWORD(rsp->rsp_q_in, 0);
4472 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4474 rval = qla2x00_mailbox_command(vha, mcp);
4475 if (rval != QLA_SUCCESS) {
4476 ql_dbg(ql_dbg_mbx, vha, 0x10d7,
4477 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4479 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
4480 "Done %s.\n", __func__);
4487 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
4491 mbx_cmd_t *mcp = &mc;
4493 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
4494 "Entered %s.\n", __func__);
4496 mcp->mb[0] = MBC_IDC_ACK;
4497 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
4498 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4500 mcp->tov = MBX_TOV_SECONDS;
4502 rval = qla2x00_mailbox_command(vha, mcp);
4504 if (rval != QLA_SUCCESS) {
4505 ql_dbg(ql_dbg_mbx, vha, 0x10da,
4506 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4508 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
4509 "Done %s.\n", __func__);
4516 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
4520 mbx_cmd_t *mcp = &mc;
4522 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
4523 "Entered %s.\n", __func__);
4525 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4526 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4527 return QLA_FUNCTION_FAILED;
4529 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4530 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
4531 mcp->out_mb = MBX_1|MBX_0;
4532 mcp->in_mb = MBX_1|MBX_0;
4533 mcp->tov = MBX_TOV_SECONDS;
4535 rval = qla2x00_mailbox_command(vha, mcp);
4537 if (rval != QLA_SUCCESS) {
4538 ql_dbg(ql_dbg_mbx, vha, 0x10dd,
4539 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4540 rval, mcp->mb[0], mcp->mb[1]);
4542 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
4543 "Done %s.\n", __func__);
4544 *sector_size = mcp->mb[1];
4551 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
4555 mbx_cmd_t *mcp = &mc;
4557 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4558 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4559 return QLA_FUNCTION_FAILED;
4561 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
4562 "Entered %s.\n", __func__);
4564 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4565 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
4566 FAC_OPT_CMD_WRITE_PROTECT;
4567 mcp->out_mb = MBX_1|MBX_0;
4568 mcp->in_mb = MBX_1|MBX_0;
4569 mcp->tov = MBX_TOV_SECONDS;
4571 rval = qla2x00_mailbox_command(vha, mcp);
4573 if (rval != QLA_SUCCESS) {
4574 ql_dbg(ql_dbg_mbx, vha, 0x10e0,
4575 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4576 rval, mcp->mb[0], mcp->mb[1]);
4578 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
4579 "Done %s.\n", __func__);
4586 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
4590 mbx_cmd_t *mcp = &mc;
4592 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4593 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4594 return QLA_FUNCTION_FAILED;
4596 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4597 "Entered %s.\n", __func__);
4599 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4600 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
4601 mcp->mb[2] = LSW(start);
4602 mcp->mb[3] = MSW(start);
4603 mcp->mb[4] = LSW(finish);
4604 mcp->mb[5] = MSW(finish);
4605 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4606 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4607 mcp->tov = MBX_TOV_SECONDS;
4609 rval = qla2x00_mailbox_command(vha, mcp);
4611 if (rval != QLA_SUCCESS) {
4612 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4613 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4614 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4616 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4617 "Done %s.\n", __func__);
4624 qla81xx_fac_semaphore_access(scsi_qla_host_t *vha, int lock)
4626 int rval = QLA_SUCCESS;
4628 mbx_cmd_t *mcp = &mc;
4629 struct qla_hw_data *ha = vha->hw;
4631 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
4632 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4635 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4636 "Entered %s.\n", __func__);
4638 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4639 mcp->mb[1] = (lock ? FAC_OPT_CMD_LOCK_SEMAPHORE :
4640 FAC_OPT_CMD_UNLOCK_SEMAPHORE);
4641 mcp->out_mb = MBX_1|MBX_0;
4642 mcp->in_mb = MBX_1|MBX_0;
4643 mcp->tov = MBX_TOV_SECONDS;
4645 rval = qla2x00_mailbox_command(vha, mcp);
4647 if (rval != QLA_SUCCESS) {
4648 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4649 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4650 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4652 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4653 "Done %s.\n", __func__);
4660 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
4664 mbx_cmd_t *mcp = &mc;
4666 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
4667 "Entered %s.\n", __func__);
4669 mcp->mb[0] = MBC_RESTART_MPI_FW;
4670 mcp->out_mb = MBX_0;
4671 mcp->in_mb = MBX_0|MBX_1;
4672 mcp->tov = MBX_TOV_SECONDS;
4674 rval = qla2x00_mailbox_command(vha, mcp);
4676 if (rval != QLA_SUCCESS) {
4677 ql_dbg(ql_dbg_mbx, vha, 0x10e6,
4678 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4679 rval, mcp->mb[0], mcp->mb[1]);
4681 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
4682 "Done %s.\n", __func__);
4689 qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4693 mbx_cmd_t *mcp = &mc;
4697 struct qla_hw_data *ha = vha->hw;
4699 if (!IS_P3P_TYPE(ha))
4700 return QLA_FUNCTION_FAILED;
4702 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
4703 "Entered %s.\n", __func__);
4705 str = (void *)version;
4706 len = strlen(version);
4708 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4709 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
4710 mcp->out_mb = MBX_1|MBX_0;
4711 for (i = 4; i < 16 && len; i++, str++, len -= 2) {
4712 mcp->mb[i] = cpu_to_le16p(str);
4713 mcp->out_mb |= 1<<i;
4715 for (; i < 16; i++) {
4717 mcp->out_mb |= 1<<i;
4719 mcp->in_mb = MBX_1|MBX_0;
4720 mcp->tov = MBX_TOV_SECONDS;
4722 rval = qla2x00_mailbox_command(vha, mcp);
4724 if (rval != QLA_SUCCESS) {
4725 ql_dbg(ql_dbg_mbx, vha, 0x117c,
4726 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4728 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d,
4729 "Done %s.\n", __func__);
4736 qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4740 mbx_cmd_t *mcp = &mc;
4745 struct qla_hw_data *ha = vha->hw;
4747 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) ||
4749 return QLA_FUNCTION_FAILED;
4751 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e,
4752 "Entered %s.\n", __func__);
4754 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
4756 ql_log(ql_log_warn, vha, 0x117f,
4757 "Failed to allocate driver version param.\n");
4758 return QLA_MEMORY_ALLOC_FAILED;
4761 memcpy(str, "\x7\x3\x11\x0", 4);
4763 len = dwlen * 4 - 4;
4764 memset(str + 4, 0, len);
4765 if (len > strlen(version))
4766 len = strlen(version);
4767 memcpy(str + 4, version, len);
4769 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4770 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
4771 mcp->mb[2] = MSW(LSD(str_dma));
4772 mcp->mb[3] = LSW(LSD(str_dma));
4773 mcp->mb[6] = MSW(MSD(str_dma));
4774 mcp->mb[7] = LSW(MSD(str_dma));
4775 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4776 mcp->in_mb = MBX_1|MBX_0;
4777 mcp->tov = MBX_TOV_SECONDS;
4779 rval = qla2x00_mailbox_command(vha, mcp);
4781 if (rval != QLA_SUCCESS) {
4782 ql_dbg(ql_dbg_mbx, vha, 0x1180,
4783 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4785 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181,
4786 "Done %s.\n", __func__);
4789 dma_pool_free(ha->s_dma_pool, str, str_dma);
4795 qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
4796 void *buf, uint16_t bufsiz)
4800 mbx_cmd_t *mcp = &mc;
4803 if (!IS_FWI2_CAPABLE(vha->hw))
4804 return QLA_FUNCTION_FAILED;
4806 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4807 "Entered %s.\n", __func__);
4809 mcp->mb[0] = MBC_GET_RNID_PARAMS;
4810 mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8;
4811 mcp->mb[2] = MSW(buf_dma);
4812 mcp->mb[3] = LSW(buf_dma);
4813 mcp->mb[6] = MSW(MSD(buf_dma));
4814 mcp->mb[7] = LSW(MSD(buf_dma));
4815 mcp->mb[8] = bufsiz/4;
4816 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4817 mcp->in_mb = MBX_1|MBX_0;
4818 mcp->tov = MBX_TOV_SECONDS;
4820 rval = qla2x00_mailbox_command(vha, mcp);
4822 if (rval != QLA_SUCCESS) {
4823 ql_dbg(ql_dbg_mbx, vha, 0x115a,
4824 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4826 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4827 "Done %s.\n", __func__);
4828 bp = (uint32_t *) buf;
4829 for (i = 0; i < (bufsiz-4)/4; i++, bp++)
4830 *bp = le32_to_cpu(*bp);
4837 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
4841 mbx_cmd_t *mcp = &mc;
4843 if (!IS_FWI2_CAPABLE(vha->hw))
4844 return QLA_FUNCTION_FAILED;
4846 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4847 "Entered %s.\n", __func__);
4849 mcp->mb[0] = MBC_GET_RNID_PARAMS;
4850 mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8;
4851 mcp->out_mb = MBX_1|MBX_0;
4852 mcp->in_mb = MBX_1|MBX_0;
4853 mcp->tov = MBX_TOV_SECONDS;
4855 rval = qla2x00_mailbox_command(vha, mcp);
4858 if (rval != QLA_SUCCESS) {
4859 ql_dbg(ql_dbg_mbx, vha, 0x115a,
4860 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4862 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4863 "Done %s.\n", __func__);
4870 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
4871 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
4875 mbx_cmd_t *mcp = &mc;
4876 struct qla_hw_data *ha = vha->hw;
4878 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
4879 "Entered %s.\n", __func__);
4881 if (!IS_FWI2_CAPABLE(ha))
4882 return QLA_FUNCTION_FAILED;
4887 mcp->mb[0] = MBC_READ_SFP;
4889 mcp->mb[2] = MSW(sfp_dma);
4890 mcp->mb[3] = LSW(sfp_dma);
4891 mcp->mb[6] = MSW(MSD(sfp_dma));
4892 mcp->mb[7] = LSW(MSD(sfp_dma));
4896 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4897 mcp->in_mb = MBX_1|MBX_0;
4898 mcp->tov = MBX_TOV_SECONDS;
4900 rval = qla2x00_mailbox_command(vha, mcp);
4905 if (rval != QLA_SUCCESS) {
4906 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
4907 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4908 if (mcp->mb[0] == MBS_COMMAND_ERROR && mcp->mb[1] == 0x22) {
4909 /* sfp is not there */
4910 rval = QLA_INTERFACE_ERROR;
4913 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
4914 "Done %s.\n", __func__);
4921 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
4922 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
4926 mbx_cmd_t *mcp = &mc;
4927 struct qla_hw_data *ha = vha->hw;
4929 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
4930 "Entered %s.\n", __func__);
4932 if (!IS_FWI2_CAPABLE(ha))
4933 return QLA_FUNCTION_FAILED;
4941 mcp->mb[0] = MBC_WRITE_SFP;
4943 mcp->mb[2] = MSW(sfp_dma);
4944 mcp->mb[3] = LSW(sfp_dma);
4945 mcp->mb[6] = MSW(MSD(sfp_dma));
4946 mcp->mb[7] = LSW(MSD(sfp_dma));
4950 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4951 mcp->in_mb = MBX_1|MBX_0;
4952 mcp->tov = MBX_TOV_SECONDS;
4954 rval = qla2x00_mailbox_command(vha, mcp);
4956 if (rval != QLA_SUCCESS) {
4957 ql_dbg(ql_dbg_mbx, vha, 0x10ec,
4958 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4960 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
4961 "Done %s.\n", __func__);
4968 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
4969 uint16_t size_in_bytes, uint16_t *actual_size)
4973 mbx_cmd_t *mcp = &mc;
4975 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
4976 "Entered %s.\n", __func__);
4978 if (!IS_CNA_CAPABLE(vha->hw))
4979 return QLA_FUNCTION_FAILED;
4981 mcp->mb[0] = MBC_GET_XGMAC_STATS;
4982 mcp->mb[2] = MSW(stats_dma);
4983 mcp->mb[3] = LSW(stats_dma);
4984 mcp->mb[6] = MSW(MSD(stats_dma));
4985 mcp->mb[7] = LSW(MSD(stats_dma));
4986 mcp->mb[8] = size_in_bytes >> 2;
4987 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
4988 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4989 mcp->tov = MBX_TOV_SECONDS;
4991 rval = qla2x00_mailbox_command(vha, mcp);
4993 if (rval != QLA_SUCCESS) {
4994 ql_dbg(ql_dbg_mbx, vha, 0x10ef,
4995 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4996 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4998 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
4999 "Done %s.\n", __func__);
5002 *actual_size = mcp->mb[2] << 2;
5009 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
5014 mbx_cmd_t *mcp = &mc;
5016 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
5017 "Entered %s.\n", __func__);
5019 if (!IS_CNA_CAPABLE(vha->hw))
5020 return QLA_FUNCTION_FAILED;
5022 mcp->mb[0] = MBC_GET_DCBX_PARAMS;
5024 mcp->mb[2] = MSW(tlv_dma);
5025 mcp->mb[3] = LSW(tlv_dma);
5026 mcp->mb[6] = MSW(MSD(tlv_dma));
5027 mcp->mb[7] = LSW(MSD(tlv_dma));
5029 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5030 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5031 mcp->tov = MBX_TOV_SECONDS;
5033 rval = qla2x00_mailbox_command(vha, mcp);
5035 if (rval != QLA_SUCCESS) {
5036 ql_dbg(ql_dbg_mbx, vha, 0x10f2,
5037 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
5038 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
5040 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
5041 "Done %s.\n", __func__);
5048 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
5052 mbx_cmd_t *mcp = &mc;
5054 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
5055 "Entered %s.\n", __func__);
5057 if (!IS_FWI2_CAPABLE(vha->hw))
5058 return QLA_FUNCTION_FAILED;
5060 mcp->mb[0] = MBC_READ_RAM_EXTENDED;
5061 mcp->mb[1] = LSW(risc_addr);
5062 mcp->mb[8] = MSW(risc_addr);
5063 mcp->out_mb = MBX_8|MBX_1|MBX_0;
5064 mcp->in_mb = MBX_3|MBX_2|MBX_0;
5067 rval = qla2x00_mailbox_command(vha, mcp);
5068 if (rval != QLA_SUCCESS) {
5069 ql_dbg(ql_dbg_mbx, vha, 0x10f5,
5070 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5072 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
5073 "Done %s.\n", __func__);
5074 *data = mcp->mb[3] << 16 | mcp->mb[2];
5081 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
5086 mbx_cmd_t *mcp = &mc;
5088 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
5089 "Entered %s.\n", __func__);
5091 memset(mcp->mb, 0 , sizeof(mcp->mb));
5092 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
5093 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing
5095 /* transfer count */
5096 mcp->mb[10] = LSW(mreq->transfer_size);
5097 mcp->mb[11] = MSW(mreq->transfer_size);
5099 /* send data address */
5100 mcp->mb[14] = LSW(mreq->send_dma);
5101 mcp->mb[15] = MSW(mreq->send_dma);
5102 mcp->mb[20] = LSW(MSD(mreq->send_dma));
5103 mcp->mb[21] = MSW(MSD(mreq->send_dma));
5105 /* receive data address */
5106 mcp->mb[16] = LSW(mreq->rcv_dma);
5107 mcp->mb[17] = MSW(mreq->rcv_dma);
5108 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5109 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5111 /* Iteration count */
5112 mcp->mb[18] = LSW(mreq->iteration_count);
5113 mcp->mb[19] = MSW(mreq->iteration_count);
5115 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
5116 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5117 if (IS_CNA_CAPABLE(vha->hw))
5118 mcp->out_mb |= MBX_2;
5119 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
5121 mcp->buf_size = mreq->transfer_size;
5122 mcp->tov = MBX_TOV_SECONDS;
5123 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5125 rval = qla2x00_mailbox_command(vha, mcp);
5127 if (rval != QLA_SUCCESS) {
5128 ql_dbg(ql_dbg_mbx, vha, 0x10f8,
5129 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
5130 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
5131 mcp->mb[3], mcp->mb[18], mcp->mb[19]);
5133 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
5134 "Done %s.\n", __func__);
5137 /* Copy mailbox information */
5138 memcpy( mresp, mcp->mb, 64);
5143 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
5148 mbx_cmd_t *mcp = &mc;
5149 struct qla_hw_data *ha = vha->hw;
5151 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
5152 "Entered %s.\n", __func__);
5154 memset(mcp->mb, 0 , sizeof(mcp->mb));
5155 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
5156 /* BIT_6 specifies 64bit address */
5157 mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
5158 if (IS_CNA_CAPABLE(ha)) {
5159 mcp->mb[2] = vha->fcoe_fcf_idx;
5161 mcp->mb[16] = LSW(mreq->rcv_dma);
5162 mcp->mb[17] = MSW(mreq->rcv_dma);
5163 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5164 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5166 mcp->mb[10] = LSW(mreq->transfer_size);
5168 mcp->mb[14] = LSW(mreq->send_dma);
5169 mcp->mb[15] = MSW(mreq->send_dma);
5170 mcp->mb[20] = LSW(MSD(mreq->send_dma));
5171 mcp->mb[21] = MSW(MSD(mreq->send_dma));
5173 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
5174 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5175 if (IS_CNA_CAPABLE(ha))
5176 mcp->out_mb |= MBX_2;
5179 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
5180 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
5181 mcp->in_mb |= MBX_1;
5182 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
5183 mcp->in_mb |= MBX_3;
5185 mcp->tov = MBX_TOV_SECONDS;
5186 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5187 mcp->buf_size = mreq->transfer_size;
5189 rval = qla2x00_mailbox_command(vha, mcp);
5191 if (rval != QLA_SUCCESS) {
5192 ql_dbg(ql_dbg_mbx, vha, 0x10fb,
5193 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5194 rval, mcp->mb[0], mcp->mb[1]);
5196 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
5197 "Done %s.\n", __func__);
5200 /* Copy mailbox information */
5201 memcpy(mresp, mcp->mb, 64);
5206 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
5210 mbx_cmd_t *mcp = &mc;
5212 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
5213 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
5215 mcp->mb[0] = MBC_ISP84XX_RESET;
5216 mcp->mb[1] = enable_diagnostic;
5217 mcp->out_mb = MBX_1|MBX_0;
5218 mcp->in_mb = MBX_1|MBX_0;
5219 mcp->tov = MBX_TOV_SECONDS;
5220 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5221 rval = qla2x00_mailbox_command(vha, mcp);
5223 if (rval != QLA_SUCCESS)
5224 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
5226 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
5227 "Done %s.\n", __func__);
5233 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
5237 mbx_cmd_t *mcp = &mc;
5239 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
5240 "Entered %s.\n", __func__);
5242 if (!IS_FWI2_CAPABLE(vha->hw))
5243 return QLA_FUNCTION_FAILED;
5245 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
5246 mcp->mb[1] = LSW(risc_addr);
5247 mcp->mb[2] = LSW(data);
5248 mcp->mb[3] = MSW(data);
5249 mcp->mb[8] = MSW(risc_addr);
5250 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
5251 mcp->in_mb = MBX_1|MBX_0;
5254 rval = qla2x00_mailbox_command(vha, mcp);
5255 if (rval != QLA_SUCCESS) {
5256 ql_dbg(ql_dbg_mbx, vha, 0x1101,
5257 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5258 rval, mcp->mb[0], mcp->mb[1]);
5260 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
5261 "Done %s.\n", __func__);
5268 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
5271 uint32_t stat, timer;
5273 struct qla_hw_data *ha = vha->hw;
5274 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
5278 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
5279 "Entered %s.\n", __func__);
5281 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
5283 /* Write the MBC data to the registers */
5284 WRT_REG_WORD(®->mailbox0, MBC_WRITE_MPI_REGISTER);
5285 WRT_REG_WORD(®->mailbox1, mb[0]);
5286 WRT_REG_WORD(®->mailbox2, mb[1]);
5287 WRT_REG_WORD(®->mailbox3, mb[2]);
5288 WRT_REG_WORD(®->mailbox4, mb[3]);
5290 WRT_REG_DWORD(®->hccr, HCCRX_SET_HOST_INT);
5292 /* Poll for MBC interrupt */
5293 for (timer = 6000000; timer; timer--) {
5294 /* Check for pending interrupts. */
5295 stat = RD_REG_DWORD(®->host_status);
5296 if (stat & HSRX_RISC_INT) {
5299 if (stat == 0x1 || stat == 0x2 ||
5300 stat == 0x10 || stat == 0x11) {
5301 set_bit(MBX_INTERRUPT,
5302 &ha->mbx_cmd_flags);
5303 mb0 = RD_REG_WORD(®->mailbox0);
5304 WRT_REG_DWORD(®->hccr,
5305 HCCRX_CLR_RISC_INT);
5306 RD_REG_DWORD(®->hccr);
5313 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags))
5314 rval = mb0 & MBS_MASK;
5316 rval = QLA_FUNCTION_FAILED;
5318 if (rval != QLA_SUCCESS) {
5319 ql_dbg(ql_dbg_mbx, vha, 0x1104,
5320 "Failed=%x mb[0]=%x.\n", rval, mb[0]);
5322 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
5323 "Done %s.\n", __func__);
5329 /* Set the specified data rate */
5331 qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode)
5335 mbx_cmd_t *mcp = &mc;
5336 struct qla_hw_data *ha = vha->hw;
5339 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5340 "Entered %s speed:0x%x mode:0x%x.\n", __func__, ha->set_data_rate,
5343 if (!IS_FWI2_CAPABLE(ha))
5344 return QLA_FUNCTION_FAILED;
5346 memset(mcp, 0, sizeof(*mcp));
5347 switch (ha->set_data_rate) {
5348 case PORT_SPEED_AUTO:
5349 case PORT_SPEED_4GB:
5350 case PORT_SPEED_8GB:
5351 case PORT_SPEED_16GB:
5352 case PORT_SPEED_32GB:
5353 val = ha->set_data_rate;
5356 ql_log(ql_log_warn, vha, 0x1199,
5357 "Unrecognized speed setting:%d. Setting Autoneg\n",
5359 val = ha->set_data_rate = PORT_SPEED_AUTO;
5363 mcp->mb[0] = MBC_DATA_RATE;
5367 mcp->out_mb = MBX_2|MBX_1|MBX_0;
5368 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5369 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5370 mcp->in_mb |= MBX_4|MBX_3;
5371 mcp->tov = MBX_TOV_SECONDS;
5373 rval = qla2x00_mailbox_command(vha, mcp);
5374 if (rval != QLA_SUCCESS) {
5375 ql_dbg(ql_dbg_mbx, vha, 0x1107,
5376 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5378 if (mcp->mb[1] != 0x7)
5379 ql_dbg(ql_dbg_mbx, vha, 0x1179,
5380 "Speed set:0x%x\n", mcp->mb[1]);
5382 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5383 "Done %s.\n", __func__);
5390 qla2x00_get_data_rate(scsi_qla_host_t *vha)
5394 mbx_cmd_t *mcp = &mc;
5395 struct qla_hw_data *ha = vha->hw;
5397 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5398 "Entered %s.\n", __func__);
5400 if (!IS_FWI2_CAPABLE(ha))
5401 return QLA_FUNCTION_FAILED;
5403 mcp->mb[0] = MBC_DATA_RATE;
5404 mcp->mb[1] = QLA_GET_DATA_RATE;
5405 mcp->out_mb = MBX_1|MBX_0;
5406 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5407 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5408 mcp->in_mb |= MBX_3;
5409 mcp->tov = MBX_TOV_SECONDS;
5411 rval = qla2x00_mailbox_command(vha, mcp);
5412 if (rval != QLA_SUCCESS) {
5413 ql_dbg(ql_dbg_mbx, vha, 0x1107,
5414 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5416 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5417 "Done %s.\n", __func__);
5418 if (mcp->mb[1] != 0x7)
5419 ha->link_data_rate = mcp->mb[1];
5426 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5430 mbx_cmd_t *mcp = &mc;
5431 struct qla_hw_data *ha = vha->hw;
5433 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
5434 "Entered %s.\n", __func__);
5436 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
5437 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
5438 return QLA_FUNCTION_FAILED;
5439 mcp->mb[0] = MBC_GET_PORT_CONFIG;
5440 mcp->out_mb = MBX_0;
5441 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5442 mcp->tov = MBX_TOV_SECONDS;
5445 rval = qla2x00_mailbox_command(vha, mcp);
5447 if (rval != QLA_SUCCESS) {
5448 ql_dbg(ql_dbg_mbx, vha, 0x110a,
5449 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5451 /* Copy all bits to preserve original value */
5452 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
5454 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
5455 "Done %s.\n", __func__);
5461 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5465 mbx_cmd_t *mcp = &mc;
5467 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
5468 "Entered %s.\n", __func__);
5470 mcp->mb[0] = MBC_SET_PORT_CONFIG;
5471 /* Copy all bits to preserve original setting */
5472 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4);
5473 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5475 mcp->tov = MBX_TOV_SECONDS;
5477 rval = qla2x00_mailbox_command(vha, mcp);
5479 if (rval != QLA_SUCCESS) {
5480 ql_dbg(ql_dbg_mbx, vha, 0x110d,
5481 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5483 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
5484 "Done %s.\n", __func__);
5491 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
5496 mbx_cmd_t *mcp = &mc;
5497 struct qla_hw_data *ha = vha->hw;
5499 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
5500 "Entered %s.\n", __func__);
5502 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
5503 return QLA_FUNCTION_FAILED;
5505 mcp->mb[0] = MBC_PORT_PARAMS;
5506 mcp->mb[1] = loop_id;
5507 if (ha->flags.fcp_prio_enabled)
5511 mcp->mb[4] = priority & 0xf;
5512 mcp->mb[9] = vha->vp_idx;
5513 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5514 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
5517 rval = qla2x00_mailbox_command(vha, mcp);
5525 if (rval != QLA_SUCCESS) {
5526 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
5528 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
5529 "Done %s.\n", __func__);
5536 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
5538 int rval = QLA_FUNCTION_FAILED;
5539 struct qla_hw_data *ha = vha->hw;
5542 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) {
5543 ql_dbg(ql_dbg_mbx, vha, 0x1150,
5544 "Thermal not supported by this card.\n");
5548 if (IS_QLA25XX(ha)) {
5549 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
5550 ha->pdev->subsystem_device == 0x0175) {
5551 rval = qla2x00_read_sfp(vha, 0, &byte,
5552 0x98, 0x1, 1, BIT_13|BIT_0);
5556 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
5557 ha->pdev->subsystem_device == 0x338e) {
5558 rval = qla2x00_read_sfp(vha, 0, &byte,
5559 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0);
5563 ql_dbg(ql_dbg_mbx, vha, 0x10c9,
5564 "Thermal not supported by this card.\n");
5568 if (IS_QLA82XX(ha)) {
5569 *temp = qla82xx_read_temperature(vha);
5572 } else if (IS_QLA8044(ha)) {
5573 *temp = qla8044_read_temperature(vha);
5578 rval = qla2x00_read_asic_temperature(vha, temp);
5583 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
5586 struct qla_hw_data *ha = vha->hw;
5588 mbx_cmd_t *mcp = &mc;
5590 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
5591 "Entered %s.\n", __func__);
5593 if (!IS_FWI2_CAPABLE(ha))
5594 return QLA_FUNCTION_FAILED;
5596 memset(mcp, 0, sizeof(mbx_cmd_t));
5597 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5600 mcp->out_mb = MBX_1|MBX_0;
5605 rval = qla2x00_mailbox_command(vha, mcp);
5606 if (rval != QLA_SUCCESS) {
5607 ql_dbg(ql_dbg_mbx, vha, 0x1016,
5608 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5610 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
5611 "Done %s.\n", __func__);
5618 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
5621 struct qla_hw_data *ha = vha->hw;
5623 mbx_cmd_t *mcp = &mc;
5625 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
5626 "Entered %s.\n", __func__);
5628 if (!IS_P3P_TYPE(ha))
5629 return QLA_FUNCTION_FAILED;
5631 memset(mcp, 0, sizeof(mbx_cmd_t));
5632 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5635 mcp->out_mb = MBX_1|MBX_0;
5640 rval = qla2x00_mailbox_command(vha, mcp);
5641 if (rval != QLA_SUCCESS) {
5642 ql_dbg(ql_dbg_mbx, vha, 0x100c,
5643 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5645 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
5646 "Done %s.\n", __func__);
5653 qla82xx_md_get_template_size(scsi_qla_host_t *vha)
5655 struct qla_hw_data *ha = vha->hw;
5657 mbx_cmd_t *mcp = &mc;
5658 int rval = QLA_FUNCTION_FAILED;
5660 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
5661 "Entered %s.\n", __func__);
5663 memset(mcp->mb, 0 , sizeof(mcp->mb));
5664 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5665 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5666 mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
5667 mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
5669 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5670 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
5671 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5673 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5674 mcp->tov = MBX_TOV_SECONDS;
5675 rval = qla2x00_mailbox_command(vha, mcp);
5677 /* Always copy back return mailbox values. */
5678 if (rval != QLA_SUCCESS) {
5679 ql_dbg(ql_dbg_mbx, vha, 0x1120,
5680 "mailbox command FAILED=0x%x, subcode=%x.\n",
5681 (mcp->mb[1] << 16) | mcp->mb[0],
5682 (mcp->mb[3] << 16) | mcp->mb[2]);
5684 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
5685 "Done %s.\n", __func__);
5686 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
5687 if (!ha->md_template_size) {
5688 ql_dbg(ql_dbg_mbx, vha, 0x1122,
5689 "Null template size obtained.\n");
5690 rval = QLA_FUNCTION_FAILED;
5697 qla82xx_md_get_template(scsi_qla_host_t *vha)
5699 struct qla_hw_data *ha = vha->hw;
5701 mbx_cmd_t *mcp = &mc;
5702 int rval = QLA_FUNCTION_FAILED;
5704 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
5705 "Entered %s.\n", __func__);
5707 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5708 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5709 if (!ha->md_tmplt_hdr) {
5710 ql_log(ql_log_warn, vha, 0x1124,
5711 "Unable to allocate memory for Minidump template.\n");
5715 memset(mcp->mb, 0 , sizeof(mcp->mb));
5716 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5717 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5718 mcp->mb[2] = LSW(RQST_TMPLT);
5719 mcp->mb[3] = MSW(RQST_TMPLT);
5720 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
5721 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
5722 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
5723 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
5724 mcp->mb[8] = LSW(ha->md_template_size);
5725 mcp->mb[9] = MSW(ha->md_template_size);
5727 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5728 mcp->tov = MBX_TOV_SECONDS;
5729 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5730 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5731 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5732 rval = qla2x00_mailbox_command(vha, mcp);
5734 if (rval != QLA_SUCCESS) {
5735 ql_dbg(ql_dbg_mbx, vha, 0x1125,
5736 "mailbox command FAILED=0x%x, subcode=%x.\n",
5737 ((mcp->mb[1] << 16) | mcp->mb[0]),
5738 ((mcp->mb[3] << 16) | mcp->mb[2]));
5740 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
5741 "Done %s.\n", __func__);
5746 qla8044_md_get_template(scsi_qla_host_t *vha)
5748 struct qla_hw_data *ha = vha->hw;
5750 mbx_cmd_t *mcp = &mc;
5751 int rval = QLA_FUNCTION_FAILED;
5752 int offset = 0, size = MINIDUMP_SIZE_36K;
5754 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
5755 "Entered %s.\n", __func__);
5757 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5758 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5759 if (!ha->md_tmplt_hdr) {
5760 ql_log(ql_log_warn, vha, 0xb11b,
5761 "Unable to allocate memory for Minidump template.\n");
5765 memset(mcp->mb, 0 , sizeof(mcp->mb));
5766 while (offset < ha->md_template_size) {
5767 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5768 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5769 mcp->mb[2] = LSW(RQST_TMPLT);
5770 mcp->mb[3] = MSW(RQST_TMPLT);
5771 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset));
5772 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset));
5773 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset));
5774 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset));
5775 mcp->mb[8] = LSW(size);
5776 mcp->mb[9] = MSW(size);
5777 mcp->mb[10] = offset & 0x0000FFFF;
5778 mcp->mb[11] = offset & 0xFFFF0000;
5779 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5780 mcp->tov = MBX_TOV_SECONDS;
5781 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5782 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5783 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5784 rval = qla2x00_mailbox_command(vha, mcp);
5786 if (rval != QLA_SUCCESS) {
5787 ql_dbg(ql_dbg_mbx, vha, 0xb11c,
5788 "mailbox command FAILED=0x%x, subcode=%x.\n",
5789 ((mcp->mb[1] << 16) | mcp->mb[0]),
5790 ((mcp->mb[3] << 16) | mcp->mb[2]));
5793 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d,
5794 "Done %s.\n", __func__);
5795 offset = offset + size;
5801 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
5804 struct qla_hw_data *ha = vha->hw;
5806 mbx_cmd_t *mcp = &mc;
5808 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
5809 return QLA_FUNCTION_FAILED;
5811 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
5812 "Entered %s.\n", __func__);
5814 memset(mcp, 0, sizeof(mbx_cmd_t));
5815 mcp->mb[0] = MBC_SET_LED_CONFIG;
5816 mcp->mb[1] = led_cfg[0];
5817 mcp->mb[2] = led_cfg[1];
5818 if (IS_QLA8031(ha)) {
5819 mcp->mb[3] = led_cfg[2];
5820 mcp->mb[4] = led_cfg[3];
5821 mcp->mb[5] = led_cfg[4];
5822 mcp->mb[6] = led_cfg[5];
5825 mcp->out_mb = MBX_2|MBX_1|MBX_0;
5827 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
5832 rval = qla2x00_mailbox_command(vha, mcp);
5833 if (rval != QLA_SUCCESS) {
5834 ql_dbg(ql_dbg_mbx, vha, 0x1134,
5835 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5837 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
5838 "Done %s.\n", __func__);
5845 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
5848 struct qla_hw_data *ha = vha->hw;
5850 mbx_cmd_t *mcp = &mc;
5852 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
5853 return QLA_FUNCTION_FAILED;
5855 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
5856 "Entered %s.\n", __func__);
5858 memset(mcp, 0, sizeof(mbx_cmd_t));
5859 mcp->mb[0] = MBC_GET_LED_CONFIG;
5861 mcp->out_mb = MBX_0;
5862 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5864 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
5868 rval = qla2x00_mailbox_command(vha, mcp);
5869 if (rval != QLA_SUCCESS) {
5870 ql_dbg(ql_dbg_mbx, vha, 0x1137,
5871 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5873 led_cfg[0] = mcp->mb[1];
5874 led_cfg[1] = mcp->mb[2];
5875 if (IS_QLA8031(ha)) {
5876 led_cfg[2] = mcp->mb[3];
5877 led_cfg[3] = mcp->mb[4];
5878 led_cfg[4] = mcp->mb[5];
5879 led_cfg[5] = mcp->mb[6];
5881 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
5882 "Done %s.\n", __func__);
5889 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
5892 struct qla_hw_data *ha = vha->hw;
5894 mbx_cmd_t *mcp = &mc;
5896 if (!IS_P3P_TYPE(ha))
5897 return QLA_FUNCTION_FAILED;
5899 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
5900 "Entered %s.\n", __func__);
5902 memset(mcp, 0, sizeof(mbx_cmd_t));
5903 mcp->mb[0] = MBC_SET_LED_CONFIG;
5909 mcp->out_mb = MBX_7|MBX_0;
5911 mcp->tov = MBX_TOV_SECONDS;
5914 rval = qla2x00_mailbox_command(vha, mcp);
5915 if (rval != QLA_SUCCESS) {
5916 ql_dbg(ql_dbg_mbx, vha, 0x1128,
5917 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5919 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
5920 "Done %s.\n", __func__);
5927 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
5930 struct qla_hw_data *ha = vha->hw;
5932 mbx_cmd_t *mcp = &mc;
5934 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
5935 return QLA_FUNCTION_FAILED;
5937 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
5938 "Entered %s.\n", __func__);
5940 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
5941 mcp->mb[1] = LSW(reg);
5942 mcp->mb[2] = MSW(reg);
5943 mcp->mb[3] = LSW(data);
5944 mcp->mb[4] = MSW(data);
5945 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5947 mcp->in_mb = MBX_1|MBX_0;
5948 mcp->tov = MBX_TOV_SECONDS;
5950 rval = qla2x00_mailbox_command(vha, mcp);
5952 if (rval != QLA_SUCCESS) {
5953 ql_dbg(ql_dbg_mbx, vha, 0x1131,
5954 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5956 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
5957 "Done %s.\n", __func__);
5964 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
5967 struct qla_hw_data *ha = vha->hw;
5969 mbx_cmd_t *mcp = &mc;
5971 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
5972 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
5973 "Implicit LOGO Unsupported.\n");
5974 return QLA_FUNCTION_FAILED;
5978 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
5979 "Entering %s.\n", __func__);
5981 /* Perform Implicit LOGO. */
5982 mcp->mb[0] = MBC_PORT_LOGOUT;
5983 mcp->mb[1] = fcport->loop_id;
5984 mcp->mb[10] = BIT_15;
5985 mcp->out_mb = MBX_10|MBX_1|MBX_0;
5987 mcp->tov = MBX_TOV_SECONDS;
5989 rval = qla2x00_mailbox_command(vha, mcp);
5990 if (rval != QLA_SUCCESS)
5991 ql_dbg(ql_dbg_mbx, vha, 0x113d,
5992 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5994 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
5995 "Done %s.\n", __func__);
6001 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
6005 mbx_cmd_t *mcp = &mc;
6006 struct qla_hw_data *ha = vha->hw;
6007 unsigned long retry_max_time = jiffies + (2 * HZ);
6009 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
6010 return QLA_FUNCTION_FAILED;
6012 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
6015 mcp->mb[0] = MBC_READ_REMOTE_REG;
6016 mcp->mb[1] = LSW(reg);
6017 mcp->mb[2] = MSW(reg);
6018 mcp->out_mb = MBX_2|MBX_1|MBX_0;
6019 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
6020 mcp->tov = MBX_TOV_SECONDS;
6022 rval = qla2x00_mailbox_command(vha, mcp);
6024 if (rval != QLA_SUCCESS) {
6025 ql_dbg(ql_dbg_mbx, vha, 0x114c,
6026 "Failed=%x mb[0]=%x mb[1]=%x.\n",
6027 rval, mcp->mb[0], mcp->mb[1]);
6029 *data = (mcp->mb[3] | (mcp->mb[4] << 16));
6030 if (*data == QLA8XXX_BAD_VALUE) {
6032 * During soft-reset CAMRAM register reads might
6033 * return 0xbad0bad0. So retry for MAX of 2 sec
6034 * while reading camram registers.
6036 if (time_after(jiffies, retry_max_time)) {
6037 ql_dbg(ql_dbg_mbx, vha, 0x1141,
6038 "Failure to read CAMRAM register. "
6039 "data=0x%x.\n", *data);
6040 return QLA_FUNCTION_FAILED;
6045 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__);
6052 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
6056 mbx_cmd_t *mcp = &mc;
6057 struct qla_hw_data *ha = vha->hw;
6059 if (!IS_QLA83XX(ha))
6060 return QLA_FUNCTION_FAILED;
6062 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
6064 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE;
6065 mcp->out_mb = MBX_0;
6066 mcp->in_mb = MBX_1|MBX_0;
6067 mcp->tov = MBX_TOV_SECONDS;
6069 rval = qla2x00_mailbox_command(vha, mcp);
6071 if (rval != QLA_SUCCESS) {
6072 ql_dbg(ql_dbg_mbx, vha, 0x1144,
6073 "Failed=%x mb[0]=%x mb[1]=%x.\n",
6074 rval, mcp->mb[0], mcp->mb[1]);
6075 ha->isp_ops->fw_dump(vha, 0);
6077 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
6084 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
6085 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size)
6089 mbx_cmd_t *mcp = &mc;
6090 uint8_t subcode = (uint8_t)options;
6091 struct qla_hw_data *ha = vha->hw;
6093 if (!IS_QLA8031(ha))
6094 return QLA_FUNCTION_FAILED;
6096 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__);
6098 mcp->mb[0] = MBC_SET_ACCESS_CONTROL;
6099 mcp->mb[1] = options;
6100 mcp->out_mb = MBX_1|MBX_0;
6101 if (subcode & BIT_2) {
6102 mcp->mb[2] = LSW(start_addr);
6103 mcp->mb[3] = MSW(start_addr);
6104 mcp->mb[4] = LSW(end_addr);
6105 mcp->mb[5] = MSW(end_addr);
6106 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2;
6108 mcp->in_mb = MBX_2|MBX_1|MBX_0;
6109 if (!(subcode & (BIT_2 | BIT_5)))
6110 mcp->in_mb |= MBX_4|MBX_3;
6111 mcp->tov = MBX_TOV_SECONDS;
6113 rval = qla2x00_mailbox_command(vha, mcp);
6115 if (rval != QLA_SUCCESS) {
6116 ql_dbg(ql_dbg_mbx, vha, 0x1147,
6117 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
6118 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
6120 ha->isp_ops->fw_dump(vha, 0);
6122 if (subcode & BIT_5)
6123 *sector_size = mcp->mb[1];
6124 else if (subcode & (BIT_6 | BIT_7)) {
6125 ql_dbg(ql_dbg_mbx, vha, 0x1148,
6126 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
6127 } else if (subcode & (BIT_3 | BIT_4)) {
6128 ql_dbg(ql_dbg_mbx, vha, 0x1149,
6129 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
6131 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__);
6138 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
6143 mbx_cmd_t *mcp = &mc;
6145 if (!IS_MCTP_CAPABLE(vha->hw))
6146 return QLA_FUNCTION_FAILED;
6148 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f,
6149 "Entered %s.\n", __func__);
6151 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
6152 mcp->mb[1] = LSW(addr);
6153 mcp->mb[2] = MSW(req_dma);
6154 mcp->mb[3] = LSW(req_dma);
6155 mcp->mb[4] = MSW(size);
6156 mcp->mb[5] = LSW(size);
6157 mcp->mb[6] = MSW(MSD(req_dma));
6158 mcp->mb[7] = LSW(MSD(req_dma));
6159 mcp->mb[8] = MSW(addr);
6160 /* Setting RAM ID to valid */
6161 mcp->mb[10] |= BIT_7;
6162 /* For MCTP RAM ID is 0x40 */
6163 mcp->mb[10] |= 0x40;
6165 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
6169 mcp->tov = MBX_TOV_SECONDS;
6171 rval = qla2x00_mailbox_command(vha, mcp);
6173 if (rval != QLA_SUCCESS) {
6174 ql_dbg(ql_dbg_mbx, vha, 0x114e,
6175 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6177 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d,
6178 "Done %s.\n", __func__);
6185 qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
6186 void *dd_buf, uint size, uint options)
6190 mbx_cmd_t *mcp = &mc;
6193 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
6194 !IS_QLA28XX(vha->hw))
6195 return QLA_FUNCTION_FAILED;
6197 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
6198 "Entered %s.\n", __func__);
6200 dd_dma = dma_map_single(&vha->hw->pdev->dev,
6201 dd_buf, size, DMA_FROM_DEVICE);
6202 if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) {
6203 ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n");
6204 return QLA_MEMORY_ALLOC_FAILED;
6207 memset(dd_buf, 0, size);
6209 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
6210 mcp->mb[1] = options;
6211 mcp->mb[2] = MSW(LSD(dd_dma));
6212 mcp->mb[3] = LSW(LSD(dd_dma));
6213 mcp->mb[6] = MSW(MSD(dd_dma));
6214 mcp->mb[7] = LSW(MSD(dd_dma));
6216 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
6217 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
6218 mcp->buf_size = size;
6219 mcp->flags = MBX_DMA_IN;
6220 mcp->tov = MBX_TOV_SECONDS * 4;
6221 rval = qla2x00_mailbox_command(vha, mcp);
6223 if (rval != QLA_SUCCESS) {
6224 ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval);
6226 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
6227 "Done %s.\n", __func__);
6230 dma_unmap_single(&vha->hw->pdev->dev, dd_dma,
6231 size, DMA_FROM_DEVICE);
6236 static void qla2x00_async_mb_sp_done(void *s, int res)
6240 sp->u.iocb_cmd.u.mbx.rc = res;
6242 complete(&sp->u.iocb_cmd.u.mbx.comp);
6243 /* don't free sp here. Let the caller do the free */
6247 * This mailbox uses the iocb interface to send MB command.
6248 * This allows non-critial (non chip setup) command to go
6251 int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
6253 int rval = QLA_FUNCTION_FAILED;
6257 if (!vha->hw->flags.fw_started)
6260 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
6264 sp->type = SRB_MB_IOCB;
6265 sp->name = mb_to_str(mcp->mb[0]);
6267 c = &sp->u.iocb_cmd;
6268 c->timeout = qla2x00_async_iocb_timeout;
6269 init_completion(&c->u.mbx.comp);
6271 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
6273 memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
6275 sp->done = qla2x00_async_mb_sp_done;
6277 rval = qla2x00_start_sp(sp);
6278 if (rval != QLA_SUCCESS) {
6279 ql_dbg(ql_dbg_mbx, vha, 0x1018,
6280 "%s: %s Failed submission. %x.\n",
6281 __func__, sp->name, rval);
6285 ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n",
6286 sp->name, sp->handle);
6288 wait_for_completion(&c->u.mbx.comp);
6289 memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG);
6293 case QLA_FUNCTION_TIMEOUT:
6294 ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n",
6295 __func__, sp->name, rval);
6298 ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n",
6299 __func__, sp->name);
6303 ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n",
6304 __func__, sp->name, rval);
6319 * NOTE: Do not call this routine from DPC thread
6321 int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
6323 int rval = QLA_FUNCTION_FAILED;
6325 struct port_database_24xx *pd;
6326 struct qla_hw_data *ha = vha->hw;
6329 if (!vha->hw->flags.fw_started)
6332 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
6334 ql_log(ql_log_warn, vha, 0xd047,
6335 "Failed to allocate port database structure.\n");
6339 memset(&mc, 0, sizeof(mc));
6340 mc.mb[0] = MBC_GET_PORT_DATABASE;
6341 mc.mb[1] = cpu_to_le16(fcport->loop_id);
6342 mc.mb[2] = MSW(pd_dma);
6343 mc.mb[3] = LSW(pd_dma);
6344 mc.mb[6] = MSW(MSD(pd_dma));
6345 mc.mb[7] = LSW(MSD(pd_dma));
6346 mc.mb[9] = cpu_to_le16(vha->vp_idx);
6347 mc.mb[10] = cpu_to_le16((uint16_t)opt);
6349 rval = qla24xx_send_mb_cmd(vha, &mc);
6350 if (rval != QLA_SUCCESS) {
6351 ql_dbg(ql_dbg_mbx, vha, 0x1193,
6352 "%s: %8phC fail\n", __func__, fcport->port_name);
6356 rval = __qla24xx_parse_gpdb(vha, fcport, pd);
6358 ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n",
6359 __func__, fcport->port_name);
6363 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
6368 int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
6369 struct port_database_24xx *pd)
6371 int rval = QLA_SUCCESS;
6373 u8 current_login_state, last_login_state;
6375 if (fcport->fc4f_nvme) {
6376 current_login_state = pd->current_login_state >> 4;
6377 last_login_state = pd->last_login_state >> 4;
6379 current_login_state = pd->current_login_state & 0xf;
6380 last_login_state = pd->last_login_state & 0xf;
6383 /* Check for logged in state. */
6384 if (current_login_state != PDS_PRLI_COMPLETE) {
6385 ql_dbg(ql_dbg_mbx, vha, 0x119a,
6386 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
6387 current_login_state, last_login_state, fcport->loop_id);
6388 rval = QLA_FUNCTION_FAILED;
6392 if (fcport->loop_id == FC_NO_LOOP_ID ||
6393 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
6394 memcmp(fcport->port_name, pd->port_name, 8))) {
6395 /* We lost the device mid way. */
6396 rval = QLA_NOT_LOGGED_IN;
6400 /* Names are little-endian. */
6401 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
6402 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
6404 /* Get port_id of device. */
6405 fcport->d_id.b.domain = pd->port_id[0];
6406 fcport->d_id.b.area = pd->port_id[1];
6407 fcport->d_id.b.al_pa = pd->port_id[2];
6408 fcport->d_id.b.rsvd_1 = 0;
6410 if (fcport->fc4f_nvme) {
6411 fcport->port_type = 0;
6412 if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0)
6413 fcport->port_type |= FCT_NVME_INITIATOR;
6414 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6415 fcport->port_type |= FCT_NVME_TARGET;
6416 if ((pd->prli_svc_param_word_3[0] & BIT_3) == 0)
6417 fcport->port_type |= FCT_NVME_DISCOVERY;
6419 /* If not target must be initiator or unknown type. */
6420 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6421 fcport->port_type = FCT_INITIATOR;
6423 fcport->port_type = FCT_TARGET;
6425 /* Passback COS information. */
6426 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
6427 FC_COS_CLASS2 : FC_COS_CLASS3;
6429 if (pd->prli_svc_param_word_3[0] & BIT_7) {
6430 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
6431 fcport->conf_compl_supported = 1;
6439 * qla24xx_gidlist__wait
6440 * NOTE: don't call this routine from DPC thread.
6442 int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
6443 void *id_list, dma_addr_t id_list_dma, uint16_t *entries)
6445 int rval = QLA_FUNCTION_FAILED;
6448 if (!vha->hw->flags.fw_started)
6451 memset(&mc, 0, sizeof(mc));
6452 mc.mb[0] = MBC_GET_ID_LIST;
6453 mc.mb[2] = MSW(id_list_dma);
6454 mc.mb[3] = LSW(id_list_dma);
6455 mc.mb[6] = MSW(MSD(id_list_dma));
6456 mc.mb[7] = LSW(MSD(id_list_dma));
6458 mc.mb[9] = cpu_to_le16(vha->vp_idx);
6460 rval = qla24xx_send_mb_cmd(vha, &mc);
6461 if (rval != QLA_SUCCESS) {
6462 ql_dbg(ql_dbg_mbx, vha, 0x119b,
6463 "%s: fail\n", __func__);
6465 *entries = mc.mb[1];
6466 ql_dbg(ql_dbg_mbx, vha, 0x119c,
6467 "%s: done\n", __func__);
6473 int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value)
6477 mbx_cmd_t *mcp = &mc;
6479 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200,
6480 "Entered %s\n", __func__);
6482 memset(mcp->mb, 0 , sizeof(mcp->mb));
6483 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6484 mcp->mb[1] = cpu_to_le16(1);
6485 mcp->mb[2] = cpu_to_le16(value);
6486 mcp->out_mb = MBX_2 | MBX_1 | MBX_0;
6487 mcp->in_mb = MBX_2 | MBX_0;
6488 mcp->tov = MBX_TOV_SECONDS;
6491 rval = qla2x00_mailbox_command(vha, mcp);
6493 ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n",
6494 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6499 int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value)
6503 mbx_cmd_t *mcp = &mc;
6505 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203,
6506 "Entered %s\n", __func__);
6508 memset(mcp->mb, 0, sizeof(mcp->mb));
6509 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6510 mcp->mb[1] = cpu_to_le16(0);
6511 mcp->out_mb = MBX_1 | MBX_0;
6512 mcp->in_mb = MBX_2 | MBX_0;
6513 mcp->tov = MBX_TOV_SECONDS;
6516 rval = qla2x00_mailbox_command(vha, mcp);
6517 if (rval == QLA_SUCCESS)
6520 ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n",
6521 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6527 qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count)
6529 struct qla_hw_data *ha = vha->hw;
6530 uint16_t iter, addr, offset;
6531 dma_addr_t phys_addr;
6535 memset(ha->sfp_data, 0, SFP_DEV_SIZE);
6537 phys_addr = ha->sfp_data_dma;
6538 sfp_data = ha->sfp_data;
6541 for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) {
6543 /* Skip to next device address. */
6548 rval = qla2x00_read_sfp(vha, phys_addr, sfp_data,
6549 addr, offset, SFP_BLOCK_SIZE, BIT_1);
6550 if (rval != QLA_SUCCESS) {
6551 ql_log(ql_log_warn, vha, 0x706d,
6552 "Unable to read SFP data (%x/%x/%x).\n", rval,
6558 if (buf && (c < count)) {
6561 if ((count - c) >= SFP_BLOCK_SIZE)
6562 sz = SFP_BLOCK_SIZE;
6566 memcpy(buf, sfp_data, sz);
6567 buf += SFP_BLOCK_SIZE;
6570 phys_addr += SFP_BLOCK_SIZE;
6571 sfp_data += SFP_BLOCK_SIZE;
6572 offset += SFP_BLOCK_SIZE;
6578 int qla24xx_res_count_wait(struct scsi_qla_host *vha,
6579 uint16_t *out_mb, int out_mb_sz)
6581 int rval = QLA_FUNCTION_FAILED;
6584 if (!vha->hw->flags.fw_started)
6587 memset(&mc, 0, sizeof(mc));
6588 mc.mb[0] = MBC_GET_RESOURCE_COUNTS;
6590 rval = qla24xx_send_mb_cmd(vha, &mc);
6591 if (rval != QLA_SUCCESS) {
6592 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6593 "%s: fail\n", __func__);
6595 if (out_mb_sz <= SIZEOF_IOCB_MB_REG)
6596 memcpy(out_mb, mc.mb, out_mb_sz);
6598 memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG);
6600 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6601 "%s: done\n", __func__);
6607 int qla28xx_secure_flash_update(scsi_qla_host_t *vha, uint16_t opts,
6608 uint16_t region, uint32_t len, dma_addr_t sfub_dma_addr,
6613 mbx_cmd_t *mcp = &mc;
6615 mcp->mb[0] = MBC_SECURE_FLASH_UPDATE;
6617 mcp->mb[2] = region;
6618 mcp->mb[3] = MSW(len);
6619 mcp->mb[4] = LSW(len);
6620 mcp->mb[5] = MSW(sfub_dma_addr);
6621 mcp->mb[6] = LSW(sfub_dma_addr);
6622 mcp->mb[7] = MSW(MSD(sfub_dma_addr));
6623 mcp->mb[8] = LSW(MSD(sfub_dma_addr));
6624 mcp->mb[9] = sfub_len;
6626 MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6627 mcp->in_mb = MBX_2|MBX_1|MBX_0;
6628 mcp->tov = MBX_TOV_SECONDS;
6630 rval = qla2x00_mailbox_command(vha, mcp);
6632 if (rval != QLA_SUCCESS) {
6633 ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s(%ld): failed rval 0x%x, %x %x %x",
6634 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1],
6641 int qla2xxx_write_remote_register(scsi_qla_host_t *vha, uint32_t addr,
6646 mbx_cmd_t *mcp = &mc;
6648 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
6649 "Entered %s.\n", __func__);
6651 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
6652 mcp->mb[1] = LSW(addr);
6653 mcp->mb[2] = MSW(addr);
6654 mcp->mb[3] = LSW(data);
6655 mcp->mb[4] = MSW(data);
6656 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6657 mcp->in_mb = MBX_1|MBX_0;
6658 mcp->tov = MBX_TOV_SECONDS;
6660 rval = qla2x00_mailbox_command(vha, mcp);
6662 if (rval != QLA_SUCCESS) {
6663 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
6664 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6666 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
6667 "Done %s.\n", __func__);
6673 int qla2xxx_read_remote_register(scsi_qla_host_t *vha, uint32_t addr,
6678 mbx_cmd_t *mcp = &mc;
6680 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
6681 "Entered %s.\n", __func__);
6683 mcp->mb[0] = MBC_READ_REMOTE_REG;
6684 mcp->mb[1] = LSW(addr);
6685 mcp->mb[2] = MSW(addr);
6686 mcp->out_mb = MBX_2|MBX_1|MBX_0;
6687 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6688 mcp->tov = MBX_TOV_SECONDS;
6690 rval = qla2x00_mailbox_command(vha, mcp);
6692 *data = (uint32_t)((((uint32_t)mcp->mb[4]) << 16) | mcp->mb[3]);
6694 if (rval != QLA_SUCCESS) {
6695 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
6696 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6698 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
6699 "Done %s.\n", __func__);