2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/cpu.h>
13 #include <linux/t10-pi.h>
14 #include <scsi/scsi_tcq.h>
15 #include <scsi/scsi_bsg_fc.h>
16 #include <scsi/scsi_eh.h>
17 #include <scsi/fc/fc_fs.h>
18 #include <linux/nvme-fc-driver.h>
20 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
21 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
22 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
23 static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
26 const char *const port_state_str[] = {
35 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
36 * @irq: interrupt number
37 * @dev_id: SCSI driver HA context
39 * Called by system whenever the host adapter generates an interrupt.
41 * Returns handled flag.
44 qla2100_intr_handler(int irq, void *dev_id)
47 struct qla_hw_data *ha;
48 struct device_reg_2xxx __iomem *reg;
56 rsp = (struct rsp_que *) dev_id;
58 ql_log(ql_log_info, NULL, 0x505d,
59 "%s: NULL response queue pointer.\n", __func__);
64 reg = &ha->iobase->isp;
67 spin_lock_irqsave(&ha->hardware_lock, flags);
68 vha = pci_get_drvdata(ha->pdev);
69 for (iter = 50; iter--; ) {
70 hccr = RD_REG_WORD(®->hccr);
71 if (qla2x00_check_reg16_for_disconnect(vha, hccr))
73 if (hccr & HCCR_RISC_PAUSE) {
74 if (pci_channel_offline(ha->pdev))
78 * Issue a "HARD" reset in order for the RISC interrupt
79 * bit to be cleared. Schedule a big hammer to get
80 * out of the RISC PAUSED state.
82 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
83 RD_REG_WORD(®->hccr);
85 ha->isp_ops->fw_dump(vha, 1);
86 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
88 } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0)
91 if (RD_REG_WORD(®->semaphore) & BIT_0) {
92 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
93 RD_REG_WORD(®->hccr);
95 /* Get mailbox data. */
96 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
97 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
98 qla2x00_mbx_completion(vha, mb[0]);
99 status |= MBX_INTERRUPT;
100 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
101 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
102 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
103 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
104 qla2x00_async_event(vha, rsp, mb);
107 ql_dbg(ql_dbg_async, vha, 0x5025,
108 "Unrecognized interrupt type (%d).\n",
111 /* Release mailbox registers. */
112 WRT_REG_WORD(®->semaphore, 0);
113 RD_REG_WORD(®->semaphore);
115 qla2x00_process_response_queue(rsp);
117 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
118 RD_REG_WORD(®->hccr);
121 qla2x00_handle_mbx_completion(ha, status);
122 spin_unlock_irqrestore(&ha->hardware_lock, flags);
124 return (IRQ_HANDLED);
128 qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
130 /* Check for PCI disconnection */
131 if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) {
132 if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
133 !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
134 !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
136 * Schedule this (only once) on the default system
137 * workqueue so that all the adapter workqueues and the
138 * DPC thread can be shutdown cleanly.
140 schedule_work(&vha->hw->board_disable);
148 qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg)
150 return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg);
154 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
155 * @irq: interrupt number
156 * @dev_id: SCSI driver HA context
158 * Called by system whenever the host adapter generates an interrupt.
160 * Returns handled flag.
163 qla2300_intr_handler(int irq, void *dev_id)
165 scsi_qla_host_t *vha;
166 struct device_reg_2xxx __iomem *reg;
173 struct qla_hw_data *ha;
176 rsp = (struct rsp_que *) dev_id;
178 ql_log(ql_log_info, NULL, 0x5058,
179 "%s: NULL response queue pointer.\n", __func__);
184 reg = &ha->iobase->isp;
187 spin_lock_irqsave(&ha->hardware_lock, flags);
188 vha = pci_get_drvdata(ha->pdev);
189 for (iter = 50; iter--; ) {
190 stat = RD_REG_DWORD(®->u.isp2300.host_status);
191 if (qla2x00_check_reg32_for_disconnect(vha, stat))
193 if (stat & HSR_RISC_PAUSED) {
194 if (unlikely(pci_channel_offline(ha->pdev)))
197 hccr = RD_REG_WORD(®->hccr);
199 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
200 ql_log(ql_log_warn, vha, 0x5026,
201 "Parity error -- HCCR=%x, Dumping "
202 "firmware.\n", hccr);
204 ql_log(ql_log_warn, vha, 0x5027,
205 "RISC paused -- HCCR=%x, Dumping "
206 "firmware.\n", hccr);
209 * Issue a "HARD" reset in order for the RISC
210 * interrupt bit to be cleared. Schedule a big
211 * hammer to get out of the RISC PAUSED state.
213 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC);
214 RD_REG_WORD(®->hccr);
216 ha->isp_ops->fw_dump(vha, 1);
217 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
219 } else if ((stat & HSR_RISC_INT) == 0)
222 switch (stat & 0xff) {
227 qla2x00_mbx_completion(vha, MSW(stat));
228 status |= MBX_INTERRUPT;
230 /* Release mailbox registers. */
231 WRT_REG_WORD(®->semaphore, 0);
235 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
236 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
237 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
238 qla2x00_async_event(vha, rsp, mb);
241 qla2x00_process_response_queue(rsp);
244 mb[0] = MBA_CMPLT_1_16BIT;
246 qla2x00_async_event(vha, rsp, mb);
249 mb[0] = MBA_SCSI_COMPLETION;
251 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
252 qla2x00_async_event(vha, rsp, mb);
255 ql_dbg(ql_dbg_async, vha, 0x5028,
256 "Unrecognized interrupt type (%d).\n", stat & 0xff);
259 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT);
260 RD_REG_WORD_RELAXED(®->hccr);
262 qla2x00_handle_mbx_completion(ha, status);
263 spin_unlock_irqrestore(&ha->hardware_lock, flags);
265 return (IRQ_HANDLED);
269 * qla2x00_mbx_completion() - Process mailbox command completions.
270 * @vha: SCSI driver HA context
271 * @mb0: Mailbox0 register
274 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
278 uint16_t __iomem *wptr;
279 struct qla_hw_data *ha = vha->hw;
280 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
282 /* Read all mbox registers? */
283 WARN_ON_ONCE(ha->mbx_count > 32);
284 mboxes = (1ULL << ha->mbx_count) - 1;
286 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
288 mboxes = ha->mcp->in_mb;
290 /* Load return mailbox registers. */
291 ha->flags.mbox_int = 1;
292 ha->mailbox_out[0] = mb0;
294 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
296 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
297 if (IS_QLA2200(ha) && cnt == 8)
298 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
299 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
300 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
301 else if (mboxes & BIT_0)
302 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
310 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
312 static char *event[] =
313 { "Complete", "Request Notification", "Time Extension" };
315 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
316 struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82;
317 uint16_t __iomem *wptr;
318 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
320 /* Seed data -- mailbox1 -> mailbox7. */
321 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
322 wptr = (uint16_t __iomem *)®24->mailbox1;
323 else if (IS_QLA8044(vha->hw))
324 wptr = (uint16_t __iomem *)®82->mailbox_out[1];
328 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
329 mb[cnt] = RD_REG_WORD(wptr);
331 ql_dbg(ql_dbg_async, vha, 0x5021,
332 "Inter-Driver Communication %s -- "
333 "%04x %04x %04x %04x %04x %04x %04x.\n",
334 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
335 mb[4], mb[5], mb[6]);
337 /* Handle IDC Error completion case. */
338 case MBA_IDC_COMPLETE:
340 vha->hw->flags.idc_compl_status = 1;
341 if (vha->hw->notify_dcbx_comp && !vha->vp_idx)
342 complete(&vha->hw->dcbx_comp);
347 /* Acknowledgement needed? [Notify && non-zero timeout]. */
348 timeout = (descr >> 8) & 0xf;
349 ql_dbg(ql_dbg_async, vha, 0x5022,
350 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
351 vha->host_no, event[aen & 0xff], timeout);
355 rval = qla2x00_post_idc_ack_work(vha, mb);
356 if (rval != QLA_SUCCESS)
357 ql_log(ql_log_warn, vha, 0x5023,
358 "IDC failed to post ACK.\n");
360 case MBA_IDC_TIME_EXT:
361 vha->hw->idc_extend_tmo = descr;
362 ql_dbg(ql_dbg_async, vha, 0x5087,
363 "%lu Inter-Driver Communication %s -- "
364 "Extend timeout by=%d.\n",
365 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo);
372 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
374 static const char *const link_speeds[] = {
375 "1", "2", "?", "4", "8", "16", "32", "10"
377 #define QLA_LAST_SPEED (ARRAY_SIZE(link_speeds) - 1)
379 if (IS_QLA2100(ha) || IS_QLA2200(ha))
380 return link_speeds[0];
381 else if (speed == 0x13)
382 return link_speeds[QLA_LAST_SPEED];
383 else if (speed < QLA_LAST_SPEED)
384 return link_speeds[speed];
386 return link_speeds[LS_UNKNOWN];
390 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
392 struct qla_hw_data *ha = vha->hw;
395 * 8200 AEN Interpretation:
397 * mb[1] = AEN Reason code
398 * mb[2] = LSW of Peg-Halt Status-1 Register
399 * mb[6] = MSW of Peg-Halt Status-1 Register
400 * mb[3] = LSW of Peg-Halt Status-2 register
401 * mb[7] = MSW of Peg-Halt Status-2 register
402 * mb[4] = IDC Device-State Register value
403 * mb[5] = IDC Driver-Presence Register value
405 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
406 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
407 mb[0], mb[1], mb[2], mb[6]);
408 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
409 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
410 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
412 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
413 IDC_HEARTBEAT_FAILURE)) {
414 ha->flags.nic_core_hung = 1;
415 ql_log(ql_log_warn, vha, 0x5060,
416 "83XX: F/W Error Reported: Check if reset required.\n");
418 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
419 uint32_t protocol_engine_id, fw_err_code, err_level;
422 * IDC_PEG_HALT_STATUS_CHANGE interpretation:
423 * - PEG-Halt Status-1 Register:
424 * (LSW = mb[2], MSW = mb[6])
425 * Bits 0-7 = protocol-engine ID
426 * Bits 8-28 = f/w error code
427 * Bits 29-31 = Error-level
428 * Error-level 0x1 = Non-Fatal error
429 * Error-level 0x2 = Recoverable Fatal error
430 * Error-level 0x4 = UnRecoverable Fatal error
431 * - PEG-Halt Status-2 Register:
432 * (LSW = mb[3], MSW = mb[7])
434 protocol_engine_id = (mb[2] & 0xff);
435 fw_err_code = (((mb[2] & 0xff00) >> 8) |
436 ((mb[6] & 0x1fff) << 8));
437 err_level = ((mb[6] & 0xe000) >> 13);
438 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
439 "Register: protocol_engine_id=0x%x "
440 "fw_err_code=0x%x err_level=0x%x.\n",
441 protocol_engine_id, fw_err_code, err_level);
442 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
443 "Register: 0x%x%x.\n", mb[7], mb[3]);
444 if (err_level == ERR_LEVEL_NON_FATAL) {
445 ql_log(ql_log_warn, vha, 0x5063,
446 "Not a fatal error, f/w has recovered itself.\n");
447 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
448 ql_log(ql_log_fatal, vha, 0x5064,
449 "Recoverable Fatal error: Chip reset "
451 qla83xx_schedule_work(vha,
452 QLA83XX_NIC_CORE_RESET);
453 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
454 ql_log(ql_log_fatal, vha, 0x5065,
455 "Unrecoverable Fatal error: Set FAILED "
456 "state, reboot required.\n");
457 qla83xx_schedule_work(vha,
458 QLA83XX_NIC_CORE_UNRECOVERABLE);
462 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
463 uint16_t peg_fw_state, nw_interface_link_up;
464 uint16_t nw_interface_signal_detect, sfp_status;
465 uint16_t htbt_counter, htbt_monitor_enable;
466 uint16_t sfp_additional_info, sfp_multirate;
467 uint16_t sfp_tx_fault, link_speed, dcbx_status;
470 * IDC_NIC_FW_REPORTED_FAILURE interpretation:
471 * - PEG-to-FC Status Register:
472 * (LSW = mb[2], MSW = mb[6])
473 * Bits 0-7 = Peg-Firmware state
474 * Bit 8 = N/W Interface Link-up
475 * Bit 9 = N/W Interface signal detected
476 * Bits 10-11 = SFP Status
477 * SFP Status 0x0 = SFP+ transceiver not expected
478 * SFP Status 0x1 = SFP+ transceiver not present
479 * SFP Status 0x2 = SFP+ transceiver invalid
480 * SFP Status 0x3 = SFP+ transceiver present and
482 * Bits 12-14 = Heartbeat Counter
483 * Bit 15 = Heartbeat Monitor Enable
484 * Bits 16-17 = SFP Additional Info
485 * SFP info 0x0 = Unregocnized transceiver for
487 * SFP info 0x1 = SFP+ brand validation failed
488 * SFP info 0x2 = SFP+ speed validation failed
489 * SFP info 0x3 = SFP+ access error
490 * Bit 18 = SFP Multirate
491 * Bit 19 = SFP Tx Fault
492 * Bits 20-22 = Link Speed
493 * Bits 23-27 = Reserved
494 * Bits 28-30 = DCBX Status
495 * DCBX Status 0x0 = DCBX Disabled
496 * DCBX Status 0x1 = DCBX Enabled
497 * DCBX Status 0x2 = DCBX Exchange error
500 peg_fw_state = (mb[2] & 0x00ff);
501 nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
502 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
503 sfp_status = ((mb[2] & 0x0c00) >> 10);
504 htbt_counter = ((mb[2] & 0x7000) >> 12);
505 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
506 sfp_additional_info = (mb[6] & 0x0003);
507 sfp_multirate = ((mb[6] & 0x0004) >> 2);
508 sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
509 link_speed = ((mb[6] & 0x0070) >> 4);
510 dcbx_status = ((mb[6] & 0x7000) >> 12);
512 ql_log(ql_log_warn, vha, 0x5066,
513 "Peg-to-Fc Status Register:\n"
514 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
515 "nw_interface_signal_detect=0x%x"
516 "\nsfp_statis=0x%x.\n ", peg_fw_state,
517 nw_interface_link_up, nw_interface_signal_detect,
519 ql_log(ql_log_warn, vha, 0x5067,
520 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
521 "sfp_additional_info=0x%x, sfp_multirate=0x%x.\n ",
522 htbt_counter, htbt_monitor_enable,
523 sfp_additional_info, sfp_multirate);
524 ql_log(ql_log_warn, vha, 0x5068,
525 "sfp_tx_fault=0x%x, link_state=0x%x, "
526 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
529 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
532 if (mb[1] & IDC_HEARTBEAT_FAILURE) {
533 ql_log(ql_log_warn, vha, 0x5069,
534 "Heartbeat Failure encountered, chip reset "
537 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
541 if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
542 ql_log(ql_log_info, vha, 0x506a,
543 "IDC Device-State changed = 0x%x.\n", mb[4]);
544 if (ha->flags.nic_core_reset_owner)
546 qla83xx_schedule_work(vha, MBA_IDC_AEN);
551 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
553 struct qla_hw_data *ha = vha->hw;
562 spin_lock_irqsave(&ha->vport_slock, flags);
563 list_for_each_entry(vp, &ha->vp_list, list) {
564 vp_did = vp->d_id.b24;
565 if (vp_did == rscn_entry) {
570 spin_unlock_irqrestore(&ha->vport_slock, flags);
576 qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id)
581 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list)
582 if (f->loop_id == loop_id)
588 qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted)
593 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
594 if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) {
597 else if (f->deleted == 0)
605 qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id,
611 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
612 if (f->d_id.b24 == id->b24) {
615 else if (f->deleted == 0)
623 * qla2x00_async_event() - Process aynchronous events.
624 * @vha: SCSI driver HA context
625 * @rsp: response queue
626 * @mb: Mailbox registers (0 - 3)
629 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
634 struct qla_hw_data *ha = vha->hw;
635 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
636 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
637 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
638 uint32_t rscn_entry, host_pid;
640 fc_port_t *fcport = NULL;
642 if (!vha->hw->flags.fw_started)
645 /* Setup to process RIO completion. */
647 if (IS_CNA_CAPABLE(ha))
650 case MBA_SCSI_COMPLETION:
651 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
654 case MBA_CMPLT_1_16BIT:
657 mb[0] = MBA_SCSI_COMPLETION;
659 case MBA_CMPLT_2_16BIT:
663 mb[0] = MBA_SCSI_COMPLETION;
665 case MBA_CMPLT_3_16BIT:
670 mb[0] = MBA_SCSI_COMPLETION;
672 case MBA_CMPLT_4_16BIT:
676 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
678 mb[0] = MBA_SCSI_COMPLETION;
680 case MBA_CMPLT_5_16BIT:
684 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
685 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
687 mb[0] = MBA_SCSI_COMPLETION;
689 case MBA_CMPLT_2_32BIT:
690 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
691 handles[1] = le32_to_cpu(
692 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
693 RD_MAILBOX_REG(ha, reg, 6));
695 mb[0] = MBA_SCSI_COMPLETION;
702 case MBA_SCSI_COMPLETION: /* Fast Post */
703 if (!vha->flags.online)
706 for (cnt = 0; cnt < handle_cnt; cnt++)
707 qla2x00_process_completed_request(vha, rsp->req,
711 case MBA_RESET: /* Reset */
712 ql_dbg(ql_dbg_async, vha, 0x5002,
713 "Asynchronous RESET.\n");
715 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
718 case MBA_SYSTEM_ERR: /* System Error */
719 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
721 RD_REG_WORD(®24->mailbox7) : 0;
722 ql_log(ql_log_warn, vha, 0x5003,
723 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
724 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
726 (IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
727 RD_REG_WORD(®24->mailbox7) & BIT_8;
728 ha->isp_ops->fw_dump(vha, 1);
729 ha->flags.fw_init_done = 0;
732 if (IS_FWI2_CAPABLE(ha)) {
733 if (mb[1] == 0 && mb[2] == 0) {
734 ql_log(ql_log_fatal, vha, 0x5004,
735 "Unrecoverable Hardware Error: adapter "
736 "marked OFFLINE!\n");
737 vha->flags.online = 0;
738 vha->device_flags |= DFLG_DEV_FAILED;
740 /* Check to see if MPI timeout occurred */
741 if ((mbx & MBX_3) && (ha->port_no == 0))
742 set_bit(MPI_RESET_NEEDED,
745 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
747 } else if (mb[1] == 0) {
748 ql_log(ql_log_fatal, vha, 0x5005,
749 "Unrecoverable Hardware Error: adapter marked "
751 vha->flags.online = 0;
752 vha->device_flags |= DFLG_DEV_FAILED;
754 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
757 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
758 ql_log(ql_log_warn, vha, 0x5006,
759 "ISP Request Transfer Error (%x).\n", mb[1]);
761 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
764 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
765 ql_log(ql_log_warn, vha, 0x5007,
766 "ISP Response Transfer Error (%x).\n", mb[1]);
768 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
771 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
772 ql_dbg(ql_dbg_async, vha, 0x5008,
773 "Asynchronous WAKEUP_THRES (%x).\n", mb[1]);
776 case MBA_LOOP_INIT_ERR:
777 ql_log(ql_log_warn, vha, 0x5090,
778 "LOOP INIT ERROR (%x).\n", mb[1]);
779 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
782 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
783 ha->flags.lip_ae = 1;
785 ql_dbg(ql_dbg_async, vha, 0x5009,
786 "LIP occurred (%x).\n", mb[1]);
788 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
789 atomic_set(&vha->loop_state, LOOP_DOWN);
790 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
791 qla2x00_mark_all_devices_lost(vha, 1);
795 atomic_set(&vha->vp_state, VP_FAILED);
796 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
799 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
800 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
802 vha->flags.management_server_logged_in = 0;
803 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
806 case MBA_LOOP_UP: /* Loop Up Event */
807 if (IS_QLA2100(ha) || IS_QLA2200(ha))
808 ha->link_data_rate = PORT_SPEED_1GB;
810 ha->link_data_rate = mb[1];
812 ql_log(ql_log_info, vha, 0x500a,
813 "LOOP UP detected (%s Gbps).\n",
814 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
816 vha->flags.management_server_logged_in = 0;
817 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
819 if (AUTO_DETECT_SFP_SUPPORT(vha)) {
820 set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags);
821 qla2xxx_wake_dpc(vha);
825 case MBA_LOOP_DOWN: /* Loop Down Event */
827 ha->flags.lip_ae = 0;
828 ha->current_topology = 0;
830 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
831 ? RD_REG_WORD(®24->mailbox4) : 0;
832 mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(®82->mailbox_out[4])
834 ql_log(ql_log_info, vha, 0x500b,
835 "LOOP DOWN detected (%x %x %x %x).\n",
836 mb[1], mb[2], mb[3], mbx);
838 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
839 atomic_set(&vha->loop_state, LOOP_DOWN);
840 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
842 * In case of loop down, restore WWPN from
843 * NVRAM in case of FA-WWPN capable ISP
844 * Restore for Physical Port only
847 if (ha->flags.fawwpn_enabled &&
848 (ha->current_topology == ISP_CFG_F)) {
849 void *wwpn = ha->init_cb->port_name;
851 memcpy(vha->port_name, wwpn, WWN_SIZE);
852 fc_host_port_name(vha->host) =
853 wwn_to_u64(vha->port_name);
854 ql_dbg(ql_dbg_init + ql_dbg_verbose,
855 vha, 0x00d8, "LOOP DOWN detected,"
856 "restore WWPN %016llx\n",
857 wwn_to_u64(vha->port_name));
860 clear_bit(VP_CONFIG_OK, &vha->vp_flags);
863 vha->device_flags |= DFLG_NO_CABLE;
864 qla2x00_mark_all_devices_lost(vha, 1);
868 atomic_set(&vha->vp_state, VP_FAILED);
869 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
872 vha->flags.management_server_logged_in = 0;
873 ha->link_data_rate = PORT_SPEED_UNKNOWN;
874 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
877 case MBA_LIP_RESET: /* LIP reset occurred */
878 ql_dbg(ql_dbg_async, vha, 0x500c,
879 "LIP reset occurred (%x).\n", mb[1]);
881 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
882 atomic_set(&vha->loop_state, LOOP_DOWN);
883 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
884 qla2x00_mark_all_devices_lost(vha, 1);
888 atomic_set(&vha->vp_state, VP_FAILED);
889 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
892 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
894 ha->operating_mode = LOOP;
895 vha->flags.management_server_logged_in = 0;
896 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
899 /* case MBA_DCBX_COMPLETE: */
900 case MBA_POINT_TO_POINT: /* Point-to-Point */
901 ha->flags.lip_ae = 0;
906 if (IS_CNA_CAPABLE(ha)) {
907 ql_dbg(ql_dbg_async, vha, 0x500d,
908 "DCBX Completed -- %04x %04x %04x.\n",
909 mb[1], mb[2], mb[3]);
910 if (ha->notify_dcbx_comp && !vha->vp_idx)
911 complete(&ha->dcbx_comp);
914 ql_dbg(ql_dbg_async, vha, 0x500e,
915 "Asynchronous P2P MODE received.\n");
918 * Until there's a transition from loop down to loop up, treat
919 * this as loop down only.
921 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
922 atomic_set(&vha->loop_state, LOOP_DOWN);
923 if (!atomic_read(&vha->loop_down_timer))
924 atomic_set(&vha->loop_down_timer,
927 qla2x00_mark_all_devices_lost(vha, 1);
931 atomic_set(&vha->vp_state, VP_FAILED);
932 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
935 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
936 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
938 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
939 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
941 vha->flags.management_server_logged_in = 0;
944 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
948 ql_dbg(ql_dbg_async, vha, 0x500f,
949 "Configuration change detected: value=%x.\n", mb[1]);
951 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
952 atomic_set(&vha->loop_state, LOOP_DOWN);
953 if (!atomic_read(&vha->loop_down_timer))
954 atomic_set(&vha->loop_down_timer,
956 qla2x00_mark_all_devices_lost(vha, 1);
960 atomic_set(&vha->vp_state, VP_FAILED);
961 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
964 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
965 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
968 case MBA_PORT_UPDATE: /* Port database update */
970 * Handle only global and vn-port update events
973 * mb[1] = N_Port handle of changed port
974 * OR 0xffff for global event
975 * mb[2] = New login state
976 * 7 = Port logged out
977 * mb[3] = LSB is vp_idx, 0xff = all vps
979 * Skip processing if:
980 * Event is global, vp_idx is NOT all vps,
981 * vp_idx does not match
982 * Event is not global, vp_idx does not match
984 if (IS_QLA2XXX_MIDTYPE(ha) &&
985 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
986 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
990 ql_dbg(ql_dbg_async, vha, 0x5010,
991 "Port %s %04x %04x %04x.\n",
992 mb[1] == 0xffff ? "unavailable" : "logout",
993 mb[1], mb[2], mb[3]);
996 goto global_port_update;
998 if (mb[1] == NPH_SNS_LID(ha)) {
999 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1000 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1004 /* use handle_cnt for loop id/nport handle */
1005 if (IS_FWI2_CAPABLE(ha))
1006 handle_cnt = NPH_SNS;
1008 handle_cnt = SIMPLE_NAME_SERVER;
1009 if (mb[1] == handle_cnt) {
1010 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1011 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1016 fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]);
1019 if (atomic_read(&fcport->state) != FCS_ONLINE)
1021 ql_dbg(ql_dbg_async, vha, 0x508a,
1022 "Marking port lost loopid=%04x portid=%06x.\n",
1023 fcport->loop_id, fcport->d_id.b24);
1024 if (qla_ini_mode_enabled(vha)) {
1025 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1026 fcport->logout_on_delete = 0;
1027 qlt_schedule_sess_for_deletion(fcport);
1032 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1033 atomic_set(&vha->loop_state, LOOP_DOWN);
1034 atomic_set(&vha->loop_down_timer,
1036 vha->device_flags |= DFLG_NO_CABLE;
1037 qla2x00_mark_all_devices_lost(vha, 1);
1041 atomic_set(&vha->vp_state, VP_FAILED);
1042 fc_vport_set_state(vha->fc_vport,
1044 qla2x00_mark_all_devices_lost(vha, 1);
1047 vha->flags.management_server_logged_in = 0;
1048 ha->link_data_rate = PORT_SPEED_UNKNOWN;
1053 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
1054 * event etc. earlier indicating loop is down) then process
1055 * it. Otherwise ignore it and Wait for RSCN to come in.
1057 atomic_set(&vha->loop_down_timer, 0);
1058 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
1059 !ha->flags.n2n_ae &&
1060 atomic_read(&vha->loop_state) != LOOP_DEAD) {
1061 ql_dbg(ql_dbg_async, vha, 0x5011,
1062 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
1063 mb[1], mb[2], mb[3]);
1067 ql_dbg(ql_dbg_async, vha, 0x5012,
1068 "Port database changed %04x %04x %04x.\n",
1069 mb[1], mb[2], mb[3]);
1072 * Mark all devices as missing so we will login again.
1074 atomic_set(&vha->loop_state, LOOP_UP);
1075 vha->scan.scan_retry = 0;
1077 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1078 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
1079 set_bit(VP_CONFIG_OK, &vha->vp_flags);
1082 case MBA_RSCN_UPDATE: /* State Change Registration */
1083 /* Check if the Vport has issued a SCR */
1084 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
1086 /* Only handle SCNs for our Vport index. */
1087 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
1090 ql_dbg(ql_dbg_async, vha, 0x5013,
1091 "RSCN database changed -- %04x %04x %04x.\n",
1092 mb[1], mb[2], mb[3]);
1094 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
1095 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
1096 | vha->d_id.b.al_pa;
1097 if (rscn_entry == host_pid) {
1098 ql_dbg(ql_dbg_async, vha, 0x5014,
1099 "Ignoring RSCN update to local host "
1100 "port ID (%06x).\n", host_pid);
1104 /* Ignore reserved bits from RSCN-payload. */
1105 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
1107 /* Skip RSCNs for virtual ports on the same physical port */
1108 if (qla2x00_is_a_vp_did(vha, rscn_entry))
1111 atomic_set(&vha->loop_down_timer, 0);
1112 vha->flags.management_server_logged_in = 0;
1114 struct event_arg ea;
1116 memset(&ea, 0, sizeof(ea));
1117 ea.id.b24 = rscn_entry;
1118 ea.id.b.rsvd_1 = rscn_entry >> 24;
1119 qla2x00_handle_rscn(vha, &ea);
1120 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
1123 /* case MBA_RIO_RESPONSE: */
1124 case MBA_ZIO_RESPONSE:
1125 ql_dbg(ql_dbg_async, vha, 0x5015,
1126 "[R|Z]IO update completion.\n");
1128 if (IS_FWI2_CAPABLE(ha))
1129 qla24xx_process_response_queue(vha, rsp);
1131 qla2x00_process_response_queue(rsp);
1134 case MBA_DISCARD_RND_FRAME:
1135 ql_dbg(ql_dbg_async, vha, 0x5016,
1136 "Discard RND Frame -- %04x %04x %04x.\n",
1137 mb[1], mb[2], mb[3]);
1140 case MBA_TRACE_NOTIFICATION:
1141 ql_dbg(ql_dbg_async, vha, 0x5017,
1142 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
1145 case MBA_ISP84XX_ALERT:
1146 ql_dbg(ql_dbg_async, vha, 0x5018,
1147 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
1148 mb[1], mb[2], mb[3]);
1150 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
1152 case A84_PANIC_RECOVERY:
1153 ql_log(ql_log_info, vha, 0x5019,
1154 "Alert 84XX: panic recovery %04x %04x.\n",
1157 case A84_OP_LOGIN_COMPLETE:
1158 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
1159 ql_log(ql_log_info, vha, 0x501a,
1160 "Alert 84XX: firmware version %x.\n",
1161 ha->cs84xx->op_fw_version);
1163 case A84_DIAG_LOGIN_COMPLETE:
1164 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1165 ql_log(ql_log_info, vha, 0x501b,
1166 "Alert 84XX: diagnostic firmware version %x.\n",
1167 ha->cs84xx->diag_fw_version);
1169 case A84_GOLD_LOGIN_COMPLETE:
1170 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1171 ha->cs84xx->fw_update = 1;
1172 ql_log(ql_log_info, vha, 0x501c,
1173 "Alert 84XX: gold firmware version %x.\n",
1174 ha->cs84xx->gold_fw_version);
1177 ql_log(ql_log_warn, vha, 0x501d,
1178 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
1179 mb[1], mb[2], mb[3]);
1181 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
1183 case MBA_DCBX_START:
1184 ql_dbg(ql_dbg_async, vha, 0x501e,
1185 "DCBX Started -- %04x %04x %04x.\n",
1186 mb[1], mb[2], mb[3]);
1188 case MBA_DCBX_PARAM_UPDATE:
1189 ql_dbg(ql_dbg_async, vha, 0x501f,
1190 "DCBX Parameters Updated -- %04x %04x %04x.\n",
1191 mb[1], mb[2], mb[3]);
1193 case MBA_FCF_CONF_ERR:
1194 ql_dbg(ql_dbg_async, vha, 0x5020,
1195 "FCF Configuration Error -- %04x %04x %04x.\n",
1196 mb[1], mb[2], mb[3]);
1198 case MBA_IDC_NOTIFY:
1199 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1200 mb[4] = RD_REG_WORD(®24->mailbox4);
1201 if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
1202 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
1203 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
1204 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
1206 * Extend loop down timer since port is active.
1208 if (atomic_read(&vha->loop_state) == LOOP_DOWN)
1209 atomic_set(&vha->loop_down_timer,
1211 qla2xxx_wake_dpc(vha);
1215 case MBA_IDC_COMPLETE:
1216 if (ha->notify_lb_portup_comp && !vha->vp_idx)
1217 complete(&ha->lb_portup_comp);
1219 case MBA_IDC_TIME_EXT:
1220 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
1222 qla81xx_idc_event(vha, mb[0], mb[1]);
1226 mb[4] = RD_REG_WORD(®24->mailbox4);
1227 mb[5] = RD_REG_WORD(®24->mailbox5);
1228 mb[6] = RD_REG_WORD(®24->mailbox6);
1229 mb[7] = RD_REG_WORD(®24->mailbox7);
1230 qla83xx_handle_8200_aen(vha, mb);
1233 case MBA_DPORT_DIAGNOSTICS:
1234 ql_dbg(ql_dbg_async, vha, 0x5052,
1235 "D-Port Diagnostics: %04x result=%s\n",
1237 mb[1] == 0 ? "start" :
1238 mb[1] == 1 ? "done (pass)" :
1239 mb[1] == 2 ? "done (error)" : "other");
1242 case MBA_TEMPERATURE_ALERT:
1243 ql_dbg(ql_dbg_async, vha, 0x505e,
1244 "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]);
1246 schedule_work(&ha->board_disable);
1249 case MBA_TRANS_INSERT:
1250 ql_dbg(ql_dbg_async, vha, 0x5091,
1251 "Transceiver Insertion: %04x\n", mb[1]);
1255 ql_dbg(ql_dbg_async, vha, 0x5057,
1256 "Unknown AEN:%04x %04x %04x %04x\n",
1257 mb[0], mb[1], mb[2], mb[3]);
1260 qlt_async_event(mb[0], vha, mb);
1262 if (!vha->vp_idx && ha->num_vhosts)
1263 qla2x00_alert_all_vps(rsp, mb);
1267 * qla2x00_process_completed_request() - Process a Fast Post response.
1268 * @vha: SCSI driver HA context
1269 * @req: request queue
1273 qla2x00_process_completed_request(struct scsi_qla_host *vha,
1274 struct req_que *req, uint32_t index)
1277 struct qla_hw_data *ha = vha->hw;
1279 /* Validate handle. */
1280 if (index >= req->num_outstanding_cmds) {
1281 ql_log(ql_log_warn, vha, 0x3014,
1282 "Invalid SCSI command index (%x).\n", index);
1284 if (IS_P3P_TYPE(ha))
1285 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1287 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1291 sp = req->outstanding_cmds[index];
1293 /* Free outstanding command slot. */
1294 req->outstanding_cmds[index] = NULL;
1296 /* Save ISP completion status */
1297 sp->done(sp, DID_OK << 16);
1299 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
1301 if (IS_P3P_TYPE(ha))
1302 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1304 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1309 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
1310 struct req_que *req, void *iocb)
1312 struct qla_hw_data *ha = vha->hw;
1313 sts_entry_t *pkt = iocb;
1317 index = LSW(pkt->handle);
1318 if (index >= req->num_outstanding_cmds) {
1319 ql_log(ql_log_warn, vha, 0x5031,
1320 "Invalid command index (%x) type %8ph.\n",
1322 if (IS_P3P_TYPE(ha))
1323 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1325 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1328 sp = req->outstanding_cmds[index];
1330 ql_log(ql_log_warn, vha, 0x5032,
1331 "Invalid completion handle (%x) -- timed-out.\n", index);
1334 if (sp->handle != index) {
1335 ql_log(ql_log_warn, vha, 0x5033,
1336 "SRB handle (%x) mismatch %x.\n", sp->handle, index);
1340 req->outstanding_cmds[index] = NULL;
1347 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1348 struct mbx_entry *mbx)
1350 const char func[] = "MBX-IOCB";
1354 struct srb_iocb *lio;
1358 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
1362 lio = &sp->u.iocb_cmd;
1364 fcport = sp->fcport;
1365 data = lio->u.logio.data;
1367 data[0] = MBS_COMMAND_ERROR;
1368 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1369 QLA_LOGIO_LOGIN_RETRIED : 0;
1370 if (mbx->entry_status) {
1371 ql_dbg(ql_dbg_async, vha, 0x5043,
1372 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
1373 "entry-status=%x status=%x state-flag=%x "
1374 "status-flags=%x.\n", type, sp->handle,
1375 fcport->d_id.b.domain, fcport->d_id.b.area,
1376 fcport->d_id.b.al_pa, mbx->entry_status,
1377 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
1378 le16_to_cpu(mbx->status_flags));
1380 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
1386 status = le16_to_cpu(mbx->status);
1387 if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
1388 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
1390 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
1391 ql_dbg(ql_dbg_async, vha, 0x5045,
1392 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
1393 type, sp->handle, fcport->d_id.b.domain,
1394 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1395 le16_to_cpu(mbx->mb1));
1397 data[0] = MBS_COMMAND_COMPLETE;
1398 if (sp->type == SRB_LOGIN_CMD) {
1399 fcport->port_type = FCT_TARGET;
1400 if (le16_to_cpu(mbx->mb1) & BIT_0)
1401 fcport->port_type = FCT_INITIATOR;
1402 else if (le16_to_cpu(mbx->mb1) & BIT_1)
1403 fcport->flags |= FCF_FCP2_DEVICE;
1408 data[0] = le16_to_cpu(mbx->mb0);
1410 case MBS_PORT_ID_USED:
1411 data[1] = le16_to_cpu(mbx->mb1);
1413 case MBS_LOOP_ID_USED:
1416 data[0] = MBS_COMMAND_ERROR;
1420 ql_log(ql_log_warn, vha, 0x5046,
1421 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
1422 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
1423 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
1424 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
1425 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
1426 le16_to_cpu(mbx->mb7));
1433 qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1434 struct mbx_24xx_entry *pkt)
1436 const char func[] = "MBX-IOCB2";
1438 struct srb_iocb *si;
1442 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1446 si = &sp->u.iocb_cmd;
1447 sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb));
1449 for (i = 0; i < sz; i++)
1450 si->u.mbx.in_mb[i] = le16_to_cpu(pkt->mb[i]);
1452 res = (si->u.mbx.in_mb[0] & MBS_MASK);
1458 qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1459 struct nack_to_isp *pkt)
1461 const char func[] = "nack";
1465 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1469 if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS))
1470 res = QLA_FUNCTION_FAILED;
1476 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1477 sts_entry_t *pkt, int iocb_type)
1479 const char func[] = "CT_IOCB";
1482 struct bsg_job *bsg_job;
1483 struct fc_bsg_reply *bsg_reply;
1484 uint16_t comp_status;
1487 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1493 bsg_job = sp->u.bsg_job;
1494 bsg_reply = bsg_job->reply;
1496 type = "ct pass-through";
1498 comp_status = le16_to_cpu(pkt->comp_status);
1501 * return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1502 * fc payload to the caller
1504 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1505 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1507 if (comp_status != CS_COMPLETE) {
1508 if (comp_status == CS_DATA_UNDERRUN) {
1510 bsg_reply->reply_payload_rcv_len =
1511 le16_to_cpu(pkt->rsp_info_len);
1513 ql_log(ql_log_warn, vha, 0x5048,
1514 "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n",
1516 bsg_reply->reply_payload_rcv_len);
1518 ql_log(ql_log_warn, vha, 0x5049,
1519 "CT pass-through-%s error comp_status=0x%x.\n",
1521 res = DID_ERROR << 16;
1522 bsg_reply->reply_payload_rcv_len = 0;
1524 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
1528 bsg_reply->reply_payload_rcv_len =
1529 bsg_job->reply_payload.payload_len;
1530 bsg_job->reply_len = 0;
1533 case SRB_CT_PTHRU_CMD:
1535 * borrowing sts_entry_24xx.comp_status.
1536 * same location as ct_entry_24xx.comp_status
1538 res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt,
1539 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
1548 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1549 struct sts_entry_24xx *pkt, int iocb_type)
1551 const char func[] = "ELS_CT_IOCB";
1554 struct bsg_job *bsg_job;
1555 struct fc_bsg_reply *bsg_reply;
1556 uint16_t comp_status;
1557 uint32_t fw_status[3];
1559 struct srb_iocb *els;
1561 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1567 case SRB_ELS_CMD_RPT:
1568 case SRB_ELS_CMD_HST:
1572 type = "ct pass-through";
1575 type = "Driver ELS logo";
1576 if (iocb_type != ELS_IOCB_TYPE) {
1577 ql_dbg(ql_dbg_user, vha, 0x5047,
1578 "Completing %s: (%p) type=%d.\n",
1579 type, sp, sp->type);
1584 case SRB_CT_PTHRU_CMD:
1585 /* borrowing sts_entry_24xx.comp_status.
1586 same location as ct_entry_24xx.comp_status
1588 res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt,
1589 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp,
1594 ql_dbg(ql_dbg_user, vha, 0x503e,
1595 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
1599 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
1600 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_1);
1601 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_2);
1603 if (iocb_type == ELS_IOCB_TYPE) {
1604 els = &sp->u.iocb_cmd;
1605 els->u.els_plogi.fw_status[0] = fw_status[0];
1606 els->u.els_plogi.fw_status[1] = fw_status[1];
1607 els->u.els_plogi.fw_status[2] = fw_status[2];
1608 els->u.els_plogi.comp_status = fw_status[0];
1609 if (comp_status == CS_COMPLETE) {
1612 if (comp_status == CS_DATA_UNDERRUN) {
1614 els->u.els_plogi.len =
1615 le16_to_cpu(((struct els_sts_entry_24xx *)
1616 pkt)->total_byte_count);
1618 els->u.els_plogi.len = 0;
1619 res = DID_ERROR << 16;
1622 ql_dbg(ql_dbg_user, vha, 0x503f,
1623 "ELS IOCB Done -%s error hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n",
1624 type, sp->handle, comp_status, fw_status[1], fw_status[2],
1625 le16_to_cpu(((struct els_sts_entry_24xx *)
1626 pkt)->total_byte_count));
1630 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1631 * fc payload to the caller
1633 bsg_job = sp->u.bsg_job;
1634 bsg_reply = bsg_job->reply;
1635 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1636 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1638 if (comp_status != CS_COMPLETE) {
1639 if (comp_status == CS_DATA_UNDERRUN) {
1641 bsg_reply->reply_payload_rcv_len =
1642 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
1644 ql_dbg(ql_dbg_user, vha, 0x503f,
1645 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1646 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1647 type, sp->handle, comp_status, fw_status[1], fw_status[2],
1648 le16_to_cpu(((struct els_sts_entry_24xx *)
1649 pkt)->total_byte_count));
1651 ql_dbg(ql_dbg_user, vha, 0x5040,
1652 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1653 "error subcode 1=0x%x error subcode 2=0x%x.\n",
1654 type, sp->handle, comp_status,
1655 le16_to_cpu(((struct els_sts_entry_24xx *)
1656 pkt)->error_subcode_1),
1657 le16_to_cpu(((struct els_sts_entry_24xx *)
1658 pkt)->error_subcode_2));
1659 res = DID_ERROR << 16;
1660 bsg_reply->reply_payload_rcv_len = 0;
1662 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply),
1663 fw_status, sizeof(fw_status));
1664 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
1669 bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1670 bsg_job->reply_len = 0;
1678 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1679 struct logio_entry_24xx *logio)
1681 const char func[] = "LOGIO-IOCB";
1685 struct srb_iocb *lio;
1689 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1693 lio = &sp->u.iocb_cmd;
1695 fcport = sp->fcport;
1696 data = lio->u.logio.data;
1698 data[0] = MBS_COMMAND_ERROR;
1699 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1700 QLA_LOGIO_LOGIN_RETRIED : 0;
1701 if (logio->entry_status) {
1702 ql_log(ql_log_warn, fcport->vha, 0x5034,
1703 "Async-%s error entry - %8phC hdl=%x"
1704 "portid=%02x%02x%02x entry-status=%x.\n",
1705 type, fcport->port_name, sp->handle, fcport->d_id.b.domain,
1706 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1707 logio->entry_status);
1708 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
1709 logio, sizeof(*logio));
1714 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1715 ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
1716 "Async-%s complete - %8phC hdl=%x portid=%02x%02x%02x "
1717 "iop0=%x.\n", type, fcport->port_name, sp->handle,
1718 fcport->d_id.b.domain,
1719 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1720 le32_to_cpu(logio->io_parameter[0]));
1722 vha->hw->exch_starvation = 0;
1723 data[0] = MBS_COMMAND_COMPLETE;
1725 if (sp->type == SRB_PRLI_CMD) {
1726 lio->u.logio.iop[0] =
1727 le32_to_cpu(logio->io_parameter[0]);
1728 lio->u.logio.iop[1] =
1729 le32_to_cpu(logio->io_parameter[1]);
1733 if (sp->type != SRB_LOGIN_CMD)
1736 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1737 if (iop[0] & BIT_4) {
1738 fcport->port_type = FCT_TARGET;
1740 fcport->flags |= FCF_FCP2_DEVICE;
1741 } else if (iop[0] & BIT_5)
1742 fcport->port_type = FCT_INITIATOR;
1745 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1747 if (logio->io_parameter[7] || logio->io_parameter[8])
1748 fcport->supported_classes |= FC_COS_CLASS2;
1749 if (logio->io_parameter[9] || logio->io_parameter[10])
1750 fcport->supported_classes |= FC_COS_CLASS3;
1755 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1756 iop[1] = le32_to_cpu(logio->io_parameter[1]);
1757 lio->u.logio.iop[0] = iop[0];
1758 lio->u.logio.iop[1] = iop[1];
1760 case LSC_SCODE_PORTID_USED:
1761 data[0] = MBS_PORT_ID_USED;
1762 data[1] = LSW(iop[1]);
1764 case LSC_SCODE_NPORT_USED:
1765 data[0] = MBS_LOOP_ID_USED;
1767 case LSC_SCODE_CMD_FAILED:
1768 if (iop[1] == 0x0606) {
1770 * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI,
1771 * Target side acked.
1773 data[0] = MBS_COMMAND_COMPLETE;
1776 data[0] = MBS_COMMAND_ERROR;
1778 case LSC_SCODE_NOXCB:
1779 vha->hw->exch_starvation++;
1780 if (vha->hw->exch_starvation > 5) {
1781 ql_log(ql_log_warn, vha, 0xd046,
1782 "Exchange starvation. Resetting RISC\n");
1784 vha->hw->exch_starvation = 0;
1786 if (IS_P3P_TYPE(vha->hw))
1787 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1789 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1790 qla2xxx_wake_dpc(vha);
1794 data[0] = MBS_COMMAND_ERROR;
1798 ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
1799 "Async-%s failed - %8phC hdl=%x portid=%02x%02x%02x comp=%x "
1800 "iop0=%x iop1=%x.\n", type, fcport->port_name,
1801 sp->handle, fcport->d_id.b.domain,
1802 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1803 le16_to_cpu(logio->comp_status),
1804 le32_to_cpu(logio->io_parameter[0]),
1805 le32_to_cpu(logio->io_parameter[1]));
1812 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
1814 const char func[] = "TMF-IOCB";
1818 struct srb_iocb *iocb;
1819 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1821 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1825 iocb = &sp->u.iocb_cmd;
1827 fcport = sp->fcport;
1828 iocb->u.tmf.data = QLA_SUCCESS;
1830 if (sts->entry_status) {
1831 ql_log(ql_log_warn, fcport->vha, 0x5038,
1832 "Async-%s error - hdl=%x entry-status(%x).\n",
1833 type, sp->handle, sts->entry_status);
1834 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1835 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
1836 ql_log(ql_log_warn, fcport->vha, 0x5039,
1837 "Async-%s error - hdl=%x completion status(%x).\n",
1838 type, sp->handle, sts->comp_status);
1839 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1840 } else if ((le16_to_cpu(sts->scsi_status) &
1841 SS_RESPONSE_INFO_LEN_VALID)) {
1842 if (le32_to_cpu(sts->rsp_data_len) < 4) {
1843 ql_log(ql_log_warn, fcport->vha, 0x503b,
1844 "Async-%s error - hdl=%x not enough response(%d).\n",
1845 type, sp->handle, sts->rsp_data_len);
1846 } else if (sts->data[3]) {
1847 ql_log(ql_log_warn, fcport->vha, 0x503c,
1848 "Async-%s error - hdl=%x response(%x).\n",
1849 type, sp->handle, sts->data[3]);
1850 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1854 if (iocb->u.tmf.data != QLA_SUCCESS)
1855 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, sp->vha, 0x5055,
1861 static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1862 void *tsk, srb_t *sp)
1865 struct srb_iocb *iocb;
1866 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1867 uint16_t state_flags;
1868 struct nvmefc_fcp_req *fd;
1869 uint16_t ret = QLA_SUCCESS;
1870 uint16_t comp_status = le16_to_cpu(sts->comp_status);
1872 iocb = &sp->u.iocb_cmd;
1873 fcport = sp->fcport;
1874 iocb->u.nvme.comp_status = comp_status;
1875 state_flags = le16_to_cpu(sts->state_flags);
1876 fd = iocb->u.nvme.desc;
1878 if (unlikely(iocb->u.nvme.aen_op))
1879 atomic_dec(&sp->vha->hw->nvme_active_aen_cnt);
1882 * State flags: Bit 6 and 0.
1883 * If 0 is set, we don't care about 6.
1884 * both cases resp was dma'd to host buffer
1885 * if both are 0, that is good path case.
1886 * if six is set and 0 is clear, we need to
1887 * copy resp data from status iocb to resp buffer.
1889 if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) {
1890 iocb->u.nvme.rsp_pyld_len = 0;
1891 } else if ((state_flags & SF_FCP_RSP_DMA)) {
1892 iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len);
1893 } else if (state_flags & SF_NVME_ERSP) {
1894 uint32_t *inbuf, *outbuf;
1897 inbuf = (uint32_t *)&sts->nvme_ersp_data;
1898 outbuf = (uint32_t *)fd->rspaddr;
1899 iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len);
1900 if (unlikely(iocb->u.nvme.rsp_pyld_len >
1901 sizeof(struct nvme_fc_ersp_iu))) {
1902 if (ql_mask_match(ql_dbg_io)) {
1903 WARN_ONCE(1, "Unexpected response payload length %u.\n",
1904 iocb->u.nvme.rsp_pyld_len);
1905 ql_log(ql_log_warn, fcport->vha, 0x5100,
1906 "Unexpected response payload length %u.\n",
1907 iocb->u.nvme.rsp_pyld_len);
1909 iocb->u.nvme.rsp_pyld_len =
1910 sizeof(struct nvme_fc_ersp_iu);
1912 iter = iocb->u.nvme.rsp_pyld_len >> 2;
1913 for (; iter; iter--)
1914 *outbuf++ = swab32(*inbuf++);
1915 } else { /* unhandled case */
1916 ql_log(ql_log_warn, fcport->vha, 0x503a,
1917 "NVME-%s error. Unhandled state_flags of %x\n",
1918 sp->name, state_flags);
1921 fd->transferred_length = fd->payload_length -
1922 le32_to_cpu(sts->residual_len);
1924 if (unlikely(comp_status != CS_COMPLETE))
1925 ql_log(ql_log_warn, fcport->vha, 0x5060,
1926 "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n",
1927 sp->name, sp->handle, comp_status,
1928 fd->transferred_length, le32_to_cpu(sts->residual_len),
1932 * If transport error then Failure (HBA rejects request)
1933 * otherwise transport will handle.
1935 switch (comp_status) {
1940 case CS_PORT_UNAVAILABLE:
1941 case CS_PORT_LOGGED_OUT:
1942 fcport->nvme_flag |= NVME_FLAG_RESETTING;
1946 fd->transferred_length = 0;
1947 iocb->u.nvme.rsp_pyld_len = 0;
1950 case CS_DATA_UNDERRUN:
1953 ret = QLA_FUNCTION_FAILED;
1959 static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req,
1960 struct vp_ctrl_entry_24xx *vce)
1962 const char func[] = "CTRLVP-IOCB";
1964 int rval = QLA_SUCCESS;
1966 sp = qla2x00_get_sp_from_handle(vha, func, req, vce);
1970 if (vce->entry_status != 0) {
1971 ql_dbg(ql_dbg_vport, vha, 0x10c4,
1972 "%s: Failed to complete IOCB -- error status (%x)\n",
1973 sp->name, vce->entry_status);
1974 rval = QLA_FUNCTION_FAILED;
1975 } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
1976 ql_dbg(ql_dbg_vport, vha, 0x10c5,
1977 "%s: Failed to complete IOCB -- completion status (%x) vpidx %x\n",
1978 sp->name, le16_to_cpu(vce->comp_status),
1979 le16_to_cpu(vce->vp_idx_failed));
1980 rval = QLA_FUNCTION_FAILED;
1982 ql_dbg(ql_dbg_vport, vha, 0x10c6,
1983 "Done %s.\n", __func__);
1990 /* Process a single response queue entry. */
1991 static void qla2x00_process_response_entry(struct scsi_qla_host *vha,
1992 struct rsp_que *rsp,
1995 sts21_entry_t *sts21_entry;
1996 sts22_entry_t *sts22_entry;
1997 uint16_t handle_cnt;
2000 switch (pkt->entry_type) {
2002 qla2x00_status_entry(vha, rsp, pkt);
2004 case STATUS_TYPE_21:
2005 sts21_entry = (sts21_entry_t *)pkt;
2006 handle_cnt = sts21_entry->handle_count;
2007 for (cnt = 0; cnt < handle_cnt; cnt++)
2008 qla2x00_process_completed_request(vha, rsp->req,
2009 sts21_entry->handle[cnt]);
2011 case STATUS_TYPE_22:
2012 sts22_entry = (sts22_entry_t *)pkt;
2013 handle_cnt = sts22_entry->handle_count;
2014 for (cnt = 0; cnt < handle_cnt; cnt++)
2015 qla2x00_process_completed_request(vha, rsp->req,
2016 sts22_entry->handle[cnt]);
2018 case STATUS_CONT_TYPE:
2019 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2022 qla2x00_mbx_iocb_entry(vha, rsp->req, (struct mbx_entry *)pkt);
2025 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2028 /* Type Not Supported. */
2029 ql_log(ql_log_warn, vha, 0x504a,
2030 "Received unknown response pkt type %x entry status=%x.\n",
2031 pkt->entry_type, pkt->entry_status);
2037 * qla2x00_process_response_queue() - Process response queue entries.
2038 * @rsp: response queue
2041 qla2x00_process_response_queue(struct rsp_que *rsp)
2043 struct scsi_qla_host *vha;
2044 struct qla_hw_data *ha = rsp->hw;
2045 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2048 vha = pci_get_drvdata(ha->pdev);
2050 if (!vha->flags.online)
2053 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2054 pkt = (sts_entry_t *)rsp->ring_ptr;
2057 if (rsp->ring_index == rsp->length) {
2058 rsp->ring_index = 0;
2059 rsp->ring_ptr = rsp->ring;
2064 if (pkt->entry_status != 0) {
2065 qla2x00_error_entry(vha, rsp, pkt);
2066 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2071 qla2x00_process_response_entry(vha, rsp, pkt);
2072 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2076 /* Adjust ring index */
2077 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
2081 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
2082 uint32_t sense_len, struct rsp_que *rsp, int res)
2084 struct scsi_qla_host *vha = sp->vha;
2085 struct scsi_cmnd *cp = GET_CMD_SP(sp);
2086 uint32_t track_sense_len;
2088 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
2089 sense_len = SCSI_SENSE_BUFFERSIZE;
2091 SET_CMD_SENSE_LEN(sp, sense_len);
2092 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
2093 track_sense_len = sense_len;
2095 if (sense_len > par_sense_len)
2096 sense_len = par_sense_len;
2098 memcpy(cp->sense_buffer, sense_data, sense_len);
2100 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
2101 track_sense_len -= sense_len;
2102 SET_CMD_SENSE_LEN(sp, track_sense_len);
2104 if (track_sense_len != 0) {
2105 rsp->status_srb = sp;
2110 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
2111 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n",
2112 sp->vha->host_no, cp->device->id, cp->device->lun,
2114 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
2115 cp->sense_buffer, sense_len);
2119 struct scsi_dif_tuple {
2120 __be16 guard; /* Checksum */
2121 __be16 app_tag; /* APPL identifier */
2122 __be32 ref_tag; /* Target LBA or indirect LBA */
2126 * Checks the guard or meta-data for the type of error
2127 * detected by the HBA. In case of errors, we set the
2128 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
2129 * to indicate to the kernel that the HBA detected error.
2132 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
2134 struct scsi_qla_host *vha = sp->vha;
2135 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
2136 uint8_t *ap = &sts24->data[12];
2137 uint8_t *ep = &sts24->data[20];
2138 uint32_t e_ref_tag, a_ref_tag;
2139 uint16_t e_app_tag, a_app_tag;
2140 uint16_t e_guard, a_guard;
2143 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
2144 * would make guard field appear at offset 2
2146 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
2147 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
2148 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
2149 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
2150 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
2151 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
2153 ql_dbg(ql_dbg_io, vha, 0x3023,
2154 "iocb(s) %p Returned STATUS.\n", sts24);
2156 ql_dbg(ql_dbg_io, vha, 0x3024,
2157 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
2158 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
2159 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
2160 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
2161 a_app_tag, e_app_tag, a_guard, e_guard);
2165 * For type 3: ref & app tag is all 'f's
2166 * For type 0,1,2: app tag is all 'f's
2168 if ((a_app_tag == T10_PI_APP_ESCAPE) &&
2169 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
2170 (a_ref_tag == T10_PI_REF_ESCAPE))) {
2171 uint32_t blocks_done, resid;
2172 sector_t lba_s = scsi_get_lba(cmd);
2174 /* 2TB boundary case covered automatically with this */
2175 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
2177 resid = scsi_bufflen(cmd) - (blocks_done *
2178 cmd->device->sector_size);
2180 scsi_set_resid(cmd, resid);
2181 cmd->result = DID_OK << 16;
2183 /* Update protection tag */
2184 if (scsi_prot_sg_count(cmd)) {
2185 uint32_t i, j = 0, k = 0, num_ent;
2186 struct scatterlist *sg;
2187 struct t10_pi_tuple *spt;
2189 /* Patch the corresponding protection tags */
2190 scsi_for_each_prot_sg(cmd, sg,
2191 scsi_prot_sg_count(cmd), i) {
2192 num_ent = sg_dma_len(sg) / 8;
2193 if (k + num_ent < blocks_done) {
2197 j = blocks_done - k - 1;
2202 if (k != blocks_done) {
2203 ql_log(ql_log_warn, vha, 0x302f,
2204 "unexpected tag values tag:lba=%x:%llx)\n",
2205 e_ref_tag, (unsigned long long)lba_s);
2209 spt = page_address(sg_page(sg)) + sg->offset;
2212 spt->app_tag = T10_PI_APP_ESCAPE;
2213 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
2214 spt->ref_tag = T10_PI_REF_ESCAPE;
2221 if (e_guard != a_guard) {
2222 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2224 set_driver_byte(cmd, DRIVER_SENSE);
2225 set_host_byte(cmd, DID_ABORT);
2226 cmd->result |= SAM_STAT_CHECK_CONDITION;
2231 if (e_ref_tag != a_ref_tag) {
2232 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2234 set_driver_byte(cmd, DRIVER_SENSE);
2235 set_host_byte(cmd, DID_ABORT);
2236 cmd->result |= SAM_STAT_CHECK_CONDITION;
2240 /* check appl tag */
2241 if (e_app_tag != a_app_tag) {
2242 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
2244 set_driver_byte(cmd, DRIVER_SENSE);
2245 set_host_byte(cmd, DID_ABORT);
2246 cmd->result |= SAM_STAT_CHECK_CONDITION;
2254 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
2255 struct req_que *req, uint32_t index)
2257 struct qla_hw_data *ha = vha->hw;
2259 uint16_t comp_status;
2260 uint16_t scsi_status;
2262 uint32_t rval = EXT_STATUS_OK;
2263 struct bsg_job *bsg_job = NULL;
2264 struct fc_bsg_request *bsg_request;
2265 struct fc_bsg_reply *bsg_reply;
2266 sts_entry_t *sts = pkt;
2267 struct sts_entry_24xx *sts24 = pkt;
2269 /* Validate handle. */
2270 if (index >= req->num_outstanding_cmds) {
2271 ql_log(ql_log_warn, vha, 0x70af,
2272 "Invalid SCSI completion handle 0x%x.\n", index);
2273 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2277 sp = req->outstanding_cmds[index];
2279 ql_log(ql_log_warn, vha, 0x70b0,
2280 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
2283 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2287 /* Free outstanding command slot. */
2288 req->outstanding_cmds[index] = NULL;
2289 bsg_job = sp->u.bsg_job;
2290 bsg_request = bsg_job->request;
2291 bsg_reply = bsg_job->reply;
2293 if (IS_FWI2_CAPABLE(ha)) {
2294 comp_status = le16_to_cpu(sts24->comp_status);
2295 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
2297 comp_status = le16_to_cpu(sts->comp_status);
2298 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
2301 thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
2302 switch (comp_status) {
2304 if (scsi_status == 0) {
2305 bsg_reply->reply_payload_rcv_len =
2306 bsg_job->reply_payload.payload_len;
2307 vha->qla_stats.input_bytes +=
2308 bsg_reply->reply_payload_rcv_len;
2309 vha->qla_stats.input_requests++;
2310 rval = EXT_STATUS_OK;
2314 case CS_DATA_OVERRUN:
2315 ql_dbg(ql_dbg_user, vha, 0x70b1,
2316 "Command completed with data overrun thread_id=%d\n",
2318 rval = EXT_STATUS_DATA_OVERRUN;
2321 case CS_DATA_UNDERRUN:
2322 ql_dbg(ql_dbg_user, vha, 0x70b2,
2323 "Command completed with data underrun thread_id=%d\n",
2325 rval = EXT_STATUS_DATA_UNDERRUN;
2327 case CS_BIDIR_RD_OVERRUN:
2328 ql_dbg(ql_dbg_user, vha, 0x70b3,
2329 "Command completed with read data overrun thread_id=%d\n",
2331 rval = EXT_STATUS_DATA_OVERRUN;
2334 case CS_BIDIR_RD_WR_OVERRUN:
2335 ql_dbg(ql_dbg_user, vha, 0x70b4,
2336 "Command completed with read and write data overrun "
2337 "thread_id=%d\n", thread_id);
2338 rval = EXT_STATUS_DATA_OVERRUN;
2341 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
2342 ql_dbg(ql_dbg_user, vha, 0x70b5,
2343 "Command completed with read data over and write data "
2344 "underrun thread_id=%d\n", thread_id);
2345 rval = EXT_STATUS_DATA_OVERRUN;
2348 case CS_BIDIR_RD_UNDERRUN:
2349 ql_dbg(ql_dbg_user, vha, 0x70b6,
2350 "Command completed with read data underrun "
2351 "thread_id=%d\n", thread_id);
2352 rval = EXT_STATUS_DATA_UNDERRUN;
2355 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
2356 ql_dbg(ql_dbg_user, vha, 0x70b7,
2357 "Command completed with read data under and write data "
2358 "overrun thread_id=%d\n", thread_id);
2359 rval = EXT_STATUS_DATA_UNDERRUN;
2362 case CS_BIDIR_RD_WR_UNDERRUN:
2363 ql_dbg(ql_dbg_user, vha, 0x70b8,
2364 "Command completed with read and write data underrun "
2365 "thread_id=%d\n", thread_id);
2366 rval = EXT_STATUS_DATA_UNDERRUN;
2370 ql_dbg(ql_dbg_user, vha, 0x70b9,
2371 "Command completed with data DMA error thread_id=%d\n",
2373 rval = EXT_STATUS_DMA_ERR;
2377 ql_dbg(ql_dbg_user, vha, 0x70ba,
2378 "Command completed with timeout thread_id=%d\n",
2380 rval = EXT_STATUS_TIMEOUT;
2383 ql_dbg(ql_dbg_user, vha, 0x70bb,
2384 "Command completed with completion status=0x%x "
2385 "thread_id=%d\n", comp_status, thread_id);
2386 rval = EXT_STATUS_ERR;
2389 bsg_reply->reply_payload_rcv_len = 0;
2392 /* Return the vendor specific reply to API */
2393 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
2394 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2395 /* Always return DID_OK, bsg will send the vendor specific response
2396 * in this case only */
2397 sp->done(sp, DID_OK << 16);
2402 * qla2x00_status_entry() - Process a Status IOCB entry.
2403 * @vha: SCSI driver HA context
2404 * @rsp: response queue
2405 * @pkt: Entry pointer
2408 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
2412 struct scsi_cmnd *cp;
2413 sts_entry_t *sts = pkt;
2414 struct sts_entry_24xx *sts24 = pkt;
2415 uint16_t comp_status;
2416 uint16_t scsi_status;
2418 uint8_t lscsi_status;
2420 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
2422 uint8_t *rsp_info, *sense_data;
2423 struct qla_hw_data *ha = vha->hw;
2426 struct req_que *req;
2429 uint16_t state_flags = 0;
2430 uint16_t retry_delay = 0;
2432 if (IS_FWI2_CAPABLE(ha)) {
2433 comp_status = le16_to_cpu(sts24->comp_status);
2434 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
2435 state_flags = le16_to_cpu(sts24->state_flags);
2437 comp_status = le16_to_cpu(sts->comp_status);
2438 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
2440 handle = (uint32_t) LSW(sts->handle);
2441 que = MSW(sts->handle);
2442 req = ha->req_q_map[que];
2444 /* Check for invalid queue pointer */
2446 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
2447 ql_dbg(ql_dbg_io, vha, 0x3059,
2448 "Invalid status handle (0x%x): Bad req pointer. req=%p, "
2449 "que=%u.\n", sts->handle, req, que);
2453 /* Validate handle. */
2454 if (handle < req->num_outstanding_cmds) {
2455 sp = req->outstanding_cmds[handle];
2457 ql_dbg(ql_dbg_io, vha, 0x3075,
2458 "%s(%ld): Already returned command for status handle (0x%x).\n",
2459 __func__, vha->host_no, sts->handle);
2463 ql_dbg(ql_dbg_io, vha, 0x3017,
2464 "Invalid status handle, out of range (0x%x).\n",
2467 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
2468 if (IS_P3P_TYPE(ha))
2469 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2471 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2472 qla2xxx_wake_dpc(vha);
2482 if (sp->cmd_type != TYPE_SRB) {
2483 req->outstanding_cmds[handle] = NULL;
2484 ql_dbg(ql_dbg_io, vha, 0x3015,
2485 "Unknown sp->cmd_type %x %p).\n",
2490 /* NVME completion. */
2491 if (sp->type == SRB_NVME_CMD) {
2492 req->outstanding_cmds[handle] = NULL;
2493 qla24xx_nvme_iocb_entry(vha, req, pkt, sp);
2497 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
2498 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
2502 /* Task Management completion. */
2503 if (sp->type == SRB_TM_CMD) {
2504 qla24xx_tm_iocb_entry(vha, req, pkt);
2508 /* Fast path completion. */
2509 if (comp_status == CS_COMPLETE && scsi_status == 0) {
2510 qla2x00_process_completed_request(vha, req, handle);
2515 req->outstanding_cmds[handle] = NULL;
2516 cp = GET_CMD_SP(sp);
2518 ql_dbg(ql_dbg_io, vha, 0x3018,
2519 "Command already returned (0x%x/%p).\n",
2525 lscsi_status = scsi_status & STATUS_MASK;
2527 fcport = sp->fcport;
2530 sense_len = par_sense_len = rsp_info_len = resid_len =
2532 if (IS_FWI2_CAPABLE(ha)) {
2533 if (scsi_status & SS_SENSE_LEN_VALID)
2534 sense_len = le32_to_cpu(sts24->sense_len);
2535 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2536 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
2537 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
2538 resid_len = le32_to_cpu(sts24->rsp_residual_count);
2539 if (comp_status == CS_DATA_UNDERRUN)
2540 fw_resid_len = le32_to_cpu(sts24->residual_len);
2541 rsp_info = sts24->data;
2542 sense_data = sts24->data;
2543 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
2544 ox_id = le16_to_cpu(sts24->ox_id);
2545 par_sense_len = sizeof(sts24->data);
2546 /* Valid values of the retry delay timer are 0x1-0xffef */
2547 if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1) {
2548 retry_delay = sts24->retry_delay & 0x3fff;
2549 ql_dbg(ql_dbg_io, sp->vha, 0x3033,
2550 "%s: scope=%#x retry_delay=%#x\n", __func__,
2551 sts24->retry_delay >> 14, retry_delay);
2554 if (scsi_status & SS_SENSE_LEN_VALID)
2555 sense_len = le16_to_cpu(sts->req_sense_length);
2556 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2557 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
2558 resid_len = le32_to_cpu(sts->residual_length);
2559 rsp_info = sts->rsp_info;
2560 sense_data = sts->req_sense_data;
2561 par_sense_len = sizeof(sts->req_sense_data);
2564 /* Check for any FCP transport errors. */
2565 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
2566 /* Sense data lies beyond any FCP RESPONSE data. */
2567 if (IS_FWI2_CAPABLE(ha)) {
2568 sense_data += rsp_info_len;
2569 par_sense_len -= rsp_info_len;
2571 if (rsp_info_len > 3 && rsp_info[3]) {
2572 ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
2573 "FCP I/O protocol failure (0x%x/0x%x).\n",
2574 rsp_info_len, rsp_info[3]);
2576 res = DID_BUS_BUSY << 16;
2581 /* Check for overrun. */
2582 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
2583 scsi_status & SS_RESIDUAL_OVER)
2584 comp_status = CS_DATA_OVERRUN;
2587 * Check retry_delay_timer value if we receive a busy or
2590 if (lscsi_status == SAM_STAT_TASK_SET_FULL ||
2591 lscsi_status == SAM_STAT_BUSY)
2592 qla2x00_set_retry_delay_timestamp(fcport, retry_delay);
2595 * Based on Host and scsi status generate status code for Linux
2597 switch (comp_status) {
2600 if (scsi_status == 0) {
2604 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
2606 scsi_set_resid(cp, resid);
2608 if (!lscsi_status &&
2609 ((unsigned)(scsi_bufflen(cp) - resid) <
2611 ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
2612 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
2613 resid, scsi_bufflen(cp));
2615 res = DID_ERROR << 16;
2619 res = DID_OK << 16 | lscsi_status;
2621 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2622 ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
2623 "QUEUE FULL detected.\n");
2627 if (lscsi_status != SS_CHECK_CONDITION)
2630 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2631 if (!(scsi_status & SS_SENSE_LEN_VALID))
2634 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
2638 case CS_DATA_UNDERRUN:
2639 /* Use F/W calculated residual length. */
2640 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
2641 scsi_set_resid(cp, resid);
2642 if (scsi_status & SS_RESIDUAL_UNDER) {
2643 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
2644 ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
2645 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
2646 resid, scsi_bufflen(cp));
2648 res = DID_ERROR << 16 | lscsi_status;
2649 goto check_scsi_status;
2652 if (!lscsi_status &&
2653 ((unsigned)(scsi_bufflen(cp) - resid) <
2655 ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
2656 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n",
2657 resid, scsi_bufflen(cp));
2659 res = DID_ERROR << 16;
2662 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
2663 lscsi_status != SAM_STAT_BUSY) {
2665 * scsi status of task set and busy are considered to be
2666 * task not completed.
2669 ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
2670 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
2671 resid, scsi_bufflen(cp));
2673 res = DID_ERROR << 16 | lscsi_status;
2674 goto check_scsi_status;
2676 ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
2677 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
2678 scsi_status, lscsi_status);
2681 res = DID_OK << 16 | lscsi_status;
2686 * Check to see if SCSI Status is non zero. If so report SCSI
2689 if (lscsi_status != 0) {
2690 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2691 ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
2692 "QUEUE FULL detected.\n");
2696 if (lscsi_status != SS_CHECK_CONDITION)
2699 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2700 if (!(scsi_status & SS_SENSE_LEN_VALID))
2703 qla2x00_handle_sense(sp, sense_data, par_sense_len,
2704 sense_len, rsp, res);
2708 case CS_PORT_LOGGED_OUT:
2709 case CS_PORT_CONFIG_CHG:
2712 case CS_PORT_UNAVAILABLE:
2717 * We are going to have the fc class block the rport
2718 * while we try to recover so instruct the mid layer
2719 * to requeue until the class decides how to handle this.
2721 res = DID_TRANSPORT_DISRUPTED << 16;
2723 if (comp_status == CS_TIMEOUT) {
2724 if (IS_FWI2_CAPABLE(ha))
2726 else if ((le16_to_cpu(sts->status_flags) &
2727 SF_LOGOUT_SENT) == 0)
2731 if (atomic_read(&fcport->state) == FCS_ONLINE) {
2732 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
2733 "Port to be marked lost on fcport=%02x%02x%02x, current "
2734 "port state= %s comp_status %x.\n", fcport->d_id.b.domain,
2735 fcport->d_id.b.area, fcport->d_id.b.al_pa,
2736 port_state_str[FCS_ONLINE],
2739 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
2740 qlt_schedule_sess_for_deletion(fcport);
2746 res = DID_RESET << 16;
2750 logit = qla2x00_handle_dif_error(sp, sts24);
2755 res = DID_ERROR << 16;
2757 if (!IS_PI_SPLIT_DET_CAPABLE(ha))
2760 if (state_flags & BIT_4)
2761 scmd_printk(KERN_WARNING, cp,
2762 "Unsupported device '%s' found.\n",
2763 cp->device->vendor);
2767 ql_log(ql_log_info, fcport->vha, 0x3022,
2768 "CS_DMA error: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%06x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
2769 comp_status, scsi_status, res, vha->host_no,
2770 cp->device->id, cp->device->lun, fcport->d_id.b24,
2771 ox_id, cp->cmnd, scsi_bufflen(cp), rsp_info_len,
2772 resid_len, fw_resid_len, sp, cp);
2773 ql_dump_buffer(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe0ee,
2774 pkt, sizeof(*sts24));
2775 res = DID_ERROR << 16;
2778 res = DID_ERROR << 16;
2784 ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
2785 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu "
2786 "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x "
2787 "rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
2788 comp_status, scsi_status, res, vha->host_no,
2789 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
2790 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
2791 cp->cmnd, scsi_bufflen(cp), rsp_info_len,
2792 resid_len, fw_resid_len, sp, cp);
2794 if (rsp->status_srb == NULL)
2799 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
2800 * @rsp: response queue
2801 * @pkt: Entry pointer
2803 * Extended sense data.
2806 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
2808 uint8_t sense_sz = 0;
2809 struct qla_hw_data *ha = rsp->hw;
2810 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
2811 srb_t *sp = rsp->status_srb;
2812 struct scsi_cmnd *cp;
2816 if (!sp || !GET_CMD_SENSE_LEN(sp))
2819 sense_len = GET_CMD_SENSE_LEN(sp);
2820 sense_ptr = GET_CMD_SENSE_PTR(sp);
2822 cp = GET_CMD_SP(sp);
2824 ql_log(ql_log_warn, vha, 0x3025,
2825 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
2827 rsp->status_srb = NULL;
2831 if (sense_len > sizeof(pkt->data))
2832 sense_sz = sizeof(pkt->data);
2834 sense_sz = sense_len;
2836 /* Move sense data. */
2837 if (IS_FWI2_CAPABLE(ha))
2838 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
2839 memcpy(sense_ptr, pkt->data, sense_sz);
2840 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
2841 sense_ptr, sense_sz);
2843 sense_len -= sense_sz;
2844 sense_ptr += sense_sz;
2846 SET_CMD_SENSE_PTR(sp, sense_ptr);
2847 SET_CMD_SENSE_LEN(sp, sense_len);
2849 /* Place command on done queue. */
2850 if (sense_len == 0) {
2851 rsp->status_srb = NULL;
2852 sp->done(sp, cp->result);
2857 * qla2x00_error_entry() - Process an error entry.
2858 * @vha: SCSI driver HA context
2859 * @rsp: response queue
2860 * @pkt: Entry pointer
2861 * return : 1=allow further error analysis. 0=no additional error analysis.
2864 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
2867 struct qla_hw_data *ha = vha->hw;
2868 const char func[] = "ERROR-IOCB";
2869 uint16_t que = MSW(pkt->handle);
2870 struct req_que *req = NULL;
2871 int res = DID_ERROR << 16;
2873 ql_dbg(ql_dbg_async, vha, 0x502a,
2874 "iocb type %xh with error status %xh, handle %xh, rspq id %d\n",
2875 pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id);
2877 if (que >= ha->max_req_queues || !ha->req_q_map[que])
2880 req = ha->req_q_map[que];
2882 if (pkt->entry_status & RF_BUSY)
2883 res = DID_BUS_BUSY << 16;
2885 if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE)
2888 switch (pkt->entry_type) {
2889 case NOTIFY_ACK_TYPE:
2891 case STATUS_CONT_TYPE:
2892 case LOGINOUT_PORT_IOCB_TYPE:
2895 case ABORT_IOCB_TYPE:
2898 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2905 case ABTS_RESP_24XX:
2911 ql_log(ql_log_warn, vha, 0x5030,
2912 "Error entry - invalid handle/queue (%04x).\n", que);
2917 * qla24xx_mbx_completion() - Process mailbox command completions.
2918 * @vha: SCSI driver HA context
2919 * @mb0: Mailbox0 register
2922 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
2926 uint16_t __iomem *wptr;
2927 struct qla_hw_data *ha = vha->hw;
2928 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2930 /* Read all mbox registers? */
2931 WARN_ON_ONCE(ha->mbx_count > 32);
2932 mboxes = (1ULL << ha->mbx_count) - 1;
2934 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
2936 mboxes = ha->mcp->in_mb;
2938 /* Load return mailbox registers. */
2939 ha->flags.mbox_int = 1;
2940 ha->mailbox_out[0] = mb0;
2942 wptr = (uint16_t __iomem *)®->mailbox1;
2944 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
2946 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
2954 qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2955 struct abort_entry_24xx *pkt)
2957 const char func[] = "ABT_IOCB";
2959 struct srb_iocb *abt;
2961 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2965 abt = &sp->u.iocb_cmd;
2966 abt->u.abt.comp_status = le16_to_cpu(pkt->nport_handle);
2970 void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha,
2971 struct pt_ls4_request *pkt, struct req_que *req)
2974 const char func[] = "LS4_IOCB";
2975 uint16_t comp_status;
2977 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2981 comp_status = le16_to_cpu(pkt->status);
2982 sp->done(sp, comp_status);
2986 * qla24xx_process_response_queue() - Process response queue entries.
2987 * @vha: SCSI driver HA context
2988 * @rsp: response queue
2990 void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2991 struct rsp_que *rsp)
2993 struct sts_entry_24xx *pkt;
2994 struct qla_hw_data *ha = vha->hw;
2996 if (!ha->flags.fw_started)
2999 if (rsp->qpair->cpuid != smp_processor_id())
3000 qla_cpu_update(rsp->qpair, smp_processor_id());
3002 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
3003 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
3006 if (rsp->ring_index == rsp->length) {
3007 rsp->ring_index = 0;
3008 rsp->ring_ptr = rsp->ring;
3013 if (pkt->entry_status != 0) {
3014 if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt))
3017 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
3023 switch (pkt->entry_type) {
3025 qla2x00_status_entry(vha, rsp, pkt);
3027 case STATUS_CONT_TYPE:
3028 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
3030 case VP_RPT_ID_IOCB_TYPE:
3031 qla24xx_report_id_acquisition(vha,
3032 (struct vp_rpt_id_entry_24xx *)pkt);
3034 case LOGINOUT_PORT_IOCB_TYPE:
3035 qla24xx_logio_entry(vha, rsp->req,
3036 (struct logio_entry_24xx *)pkt);
3039 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
3042 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
3044 case ABTS_RECV_24XX:
3045 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
3047 /* ensure that the ATIO queue is empty */
3048 qlt_handle_abts_recv(vha, rsp,
3052 qlt_24xx_process_atio_queue(vha, 1);
3055 case ABTS_RESP_24XX:
3058 qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt);
3060 case PT_LS4_REQUEST:
3061 qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt,
3064 case NOTIFY_ACK_TYPE:
3065 if (pkt->handle == QLA_TGT_SKIP_HANDLE)
3066 qlt_response_pkt_all_vps(vha, rsp,
3069 qla24xxx_nack_iocb_entry(vha, rsp->req,
3070 (struct nack_to_isp *)pkt);
3073 /* Do nothing in this case, this check is to prevent it
3074 * from falling into default case
3077 case ABORT_IOCB_TYPE:
3078 qla24xx_abort_iocb_entry(vha, rsp->req,
3079 (struct abort_entry_24xx *)pkt);
3082 qla24xx_mbx_iocb_entry(vha, rsp->req,
3083 (struct mbx_24xx_entry *)pkt);
3085 case VP_CTRL_IOCB_TYPE:
3086 qla_ctrlvp_completed(vha, rsp->req,
3087 (struct vp_ctrl_entry_24xx *)pkt);
3090 /* Type Not Supported. */
3091 ql_dbg(ql_dbg_async, vha, 0x5042,
3092 "Received unknown response pkt type %x "
3093 "entry status=%x.\n",
3094 pkt->entry_type, pkt->entry_status);
3097 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
3101 /* Adjust ring index */
3102 if (IS_P3P_TYPE(ha)) {
3103 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
3105 WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index);
3107 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
3112 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
3116 struct qla_hw_data *ha = vha->hw;
3117 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3119 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
3120 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
3124 WRT_REG_DWORD(®->iobase_addr, 0x7C00);
3125 RD_REG_DWORD(®->iobase_addr);
3126 WRT_REG_DWORD(®->iobase_window, 0x0001);
3127 for (cnt = 10000; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 &&
3128 rval == QLA_SUCCESS; cnt--) {
3130 WRT_REG_DWORD(®->iobase_window, 0x0001);
3133 rval = QLA_FUNCTION_TIMEOUT;
3135 if (rval == QLA_SUCCESS)
3139 WRT_REG_DWORD(®->iobase_window, 0x0003);
3140 for (cnt = 100; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 &&
3141 rval == QLA_SUCCESS; cnt--) {
3143 WRT_REG_DWORD(®->iobase_window, 0x0003);
3146 rval = QLA_FUNCTION_TIMEOUT;
3148 if (rval != QLA_SUCCESS)
3152 if (RD_REG_DWORD(®->iobase_c8) & BIT_3)
3153 ql_log(ql_log_info, vha, 0x504c,
3154 "Additional code -- 0x55AA.\n");
3157 WRT_REG_DWORD(®->iobase_window, 0x0000);
3158 RD_REG_DWORD(®->iobase_window);
3162 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
3163 * @irq: interrupt number
3164 * @dev_id: SCSI driver HA context
3166 * Called by system whenever the host adapter generates an interrupt.
3168 * Returns handled flag.
3171 qla24xx_intr_handler(int irq, void *dev_id)
3173 scsi_qla_host_t *vha;
3174 struct qla_hw_data *ha;
3175 struct device_reg_24xx __iomem *reg;
3181 struct rsp_que *rsp;
3182 unsigned long flags;
3183 bool process_atio = false;
3185 rsp = (struct rsp_que *) dev_id;
3187 ql_log(ql_log_info, NULL, 0x5059,
3188 "%s: NULL response queue pointer.\n", __func__);
3193 reg = &ha->iobase->isp24;
3196 if (unlikely(pci_channel_offline(ha->pdev)))
3199 spin_lock_irqsave(&ha->hardware_lock, flags);
3200 vha = pci_get_drvdata(ha->pdev);
3201 for (iter = 50; iter--; ) {
3202 stat = RD_REG_DWORD(®->host_status);
3203 if (qla2x00_check_reg32_for_disconnect(vha, stat))
3205 if (stat & HSRX_RISC_PAUSED) {
3206 if (unlikely(pci_channel_offline(ha->pdev)))
3209 hccr = RD_REG_DWORD(®->hccr);
3211 ql_log(ql_log_warn, vha, 0x504b,
3212 "RISC paused -- HCCR=%x, Dumping firmware.\n",
3215 qla2xxx_check_risc_status(vha);
3217 ha->isp_ops->fw_dump(vha, 1);
3218 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3220 } else if ((stat & HSRX_RISC_INT) == 0)
3223 switch (stat & 0xff) {
3224 case INTR_ROM_MB_SUCCESS:
3225 case INTR_ROM_MB_FAILED:
3226 case INTR_MB_SUCCESS:
3227 case INTR_MB_FAILED:
3228 qla24xx_mbx_completion(vha, MSW(stat));
3229 status |= MBX_INTERRUPT;
3232 case INTR_ASYNC_EVENT:
3234 mb[1] = RD_REG_WORD(®->mailbox1);
3235 mb[2] = RD_REG_WORD(®->mailbox2);
3236 mb[3] = RD_REG_WORD(®->mailbox3);
3237 qla2x00_async_event(vha, rsp, mb);
3239 case INTR_RSP_QUE_UPDATE:
3240 case INTR_RSP_QUE_UPDATE_83XX:
3241 qla24xx_process_response_queue(vha, rsp);
3243 case INTR_ATIO_QUE_UPDATE_27XX:
3244 case INTR_ATIO_QUE_UPDATE:
3245 process_atio = true;
3247 case INTR_ATIO_RSP_QUE_UPDATE:
3248 process_atio = true;
3249 qla24xx_process_response_queue(vha, rsp);
3252 ql_dbg(ql_dbg_async, vha, 0x504f,
3253 "Unrecognized interrupt type (%d).\n", stat * 0xff);
3256 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
3257 RD_REG_DWORD_RELAXED(®->hccr);
3258 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
3261 qla2x00_handle_mbx_completion(ha, status);
3262 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3265 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
3266 qlt_24xx_process_atio_queue(vha, 0);
3267 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
3274 qla24xx_msix_rsp_q(int irq, void *dev_id)
3276 struct qla_hw_data *ha;
3277 struct rsp_que *rsp;
3278 struct device_reg_24xx __iomem *reg;
3279 struct scsi_qla_host *vha;
3280 unsigned long flags;
3282 rsp = (struct rsp_que *) dev_id;
3284 ql_log(ql_log_info, NULL, 0x505a,
3285 "%s: NULL response queue pointer.\n", __func__);
3289 reg = &ha->iobase->isp24;
3291 spin_lock_irqsave(&ha->hardware_lock, flags);
3293 vha = pci_get_drvdata(ha->pdev);
3294 qla24xx_process_response_queue(vha, rsp);
3295 if (!ha->flags.disable_msix_handshake) {
3296 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
3297 RD_REG_DWORD_RELAXED(®->hccr);
3299 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3305 qla24xx_msix_default(int irq, void *dev_id)
3307 scsi_qla_host_t *vha;
3308 struct qla_hw_data *ha;
3309 struct rsp_que *rsp;
3310 struct device_reg_24xx __iomem *reg;
3315 unsigned long flags;
3316 bool process_atio = false;
3318 rsp = (struct rsp_que *) dev_id;
3320 ql_log(ql_log_info, NULL, 0x505c,
3321 "%s: NULL response queue pointer.\n", __func__);
3325 reg = &ha->iobase->isp24;
3328 spin_lock_irqsave(&ha->hardware_lock, flags);
3329 vha = pci_get_drvdata(ha->pdev);
3331 stat = RD_REG_DWORD(®->host_status);
3332 if (qla2x00_check_reg32_for_disconnect(vha, stat))
3334 if (stat & HSRX_RISC_PAUSED) {
3335 if (unlikely(pci_channel_offline(ha->pdev)))
3338 hccr = RD_REG_DWORD(®->hccr);
3340 ql_log(ql_log_info, vha, 0x5050,
3341 "RISC paused -- HCCR=%x, Dumping firmware.\n",
3344 qla2xxx_check_risc_status(vha);
3346 ha->isp_ops->fw_dump(vha, 1);
3347 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3349 } else if ((stat & HSRX_RISC_INT) == 0)
3352 switch (stat & 0xff) {
3353 case INTR_ROM_MB_SUCCESS:
3354 case INTR_ROM_MB_FAILED:
3355 case INTR_MB_SUCCESS:
3356 case INTR_MB_FAILED:
3357 qla24xx_mbx_completion(vha, MSW(stat));
3358 status |= MBX_INTERRUPT;
3361 case INTR_ASYNC_EVENT:
3363 mb[1] = RD_REG_WORD(®->mailbox1);
3364 mb[2] = RD_REG_WORD(®->mailbox2);
3365 mb[3] = RD_REG_WORD(®->mailbox3);
3366 qla2x00_async_event(vha, rsp, mb);
3368 case INTR_RSP_QUE_UPDATE:
3369 case INTR_RSP_QUE_UPDATE_83XX:
3370 qla24xx_process_response_queue(vha, rsp);
3372 case INTR_ATIO_QUE_UPDATE_27XX:
3373 case INTR_ATIO_QUE_UPDATE:
3374 process_atio = true;
3376 case INTR_ATIO_RSP_QUE_UPDATE:
3377 process_atio = true;
3378 qla24xx_process_response_queue(vha, rsp);
3381 ql_dbg(ql_dbg_async, vha, 0x5051,
3382 "Unrecognized interrupt type (%d).\n", stat & 0xff);
3385 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
3387 qla2x00_handle_mbx_completion(ha, status);
3388 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3391 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
3392 qlt_24xx_process_atio_queue(vha, 0);
3393 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
3400 qla2xxx_msix_rsp_q(int irq, void *dev_id)
3402 struct qla_hw_data *ha;
3403 struct qla_qpair *qpair;
3404 struct device_reg_24xx __iomem *reg;
3405 unsigned long flags;
3409 ql_log(ql_log_info, NULL, 0x505b,
3410 "%s: NULL response queue pointer.\n", __func__);
3415 /* Clear the interrupt, if enabled, for this response queue */
3416 if (unlikely(!ha->flags.disable_msix_handshake)) {
3417 reg = &ha->iobase->isp24;
3418 spin_lock_irqsave(&ha->hardware_lock, flags);
3419 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);
3420 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3423 queue_work(ha->wq, &qpair->q_work);
3428 /* Interrupt handling helpers. */
3430 struct qla_init_msix_entry {
3432 irq_handler_t handler;
3435 static const struct qla_init_msix_entry msix_entries[] = {
3436 { "default", qla24xx_msix_default },
3437 { "rsp_q", qla24xx_msix_rsp_q },
3438 { "atio_q", qla83xx_msix_atio_q },
3439 { "qpair_multiq", qla2xxx_msix_rsp_q },
3442 static const struct qla_init_msix_entry qla82xx_msix_entries[] = {
3443 { "qla2xxx (default)", qla82xx_msix_default },
3444 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
3448 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3451 struct qla_msix_entry *qentry;
3452 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3453 int min_vecs = QLA_BASE_VECTORS;
3454 struct irq_affinity desc = {
3455 .pre_vectors = QLA_BASE_VECTORS,
3458 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
3459 IS_ATIO_MSIX_CAPABLE(ha)) {
3464 if (USER_CTRL_IRQ(ha) || !ha->mqiobase) {
3465 /* user wants to control IRQ setting for target mode */
3466 ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
3467 ha->msix_count, PCI_IRQ_MSIX);
3469 ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
3470 ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
3474 ql_log(ql_log_fatal, vha, 0x00c7,
3475 "MSI-X: Failed to enable support, "
3476 "giving up -- %d/%d.\n",
3477 ha->msix_count, ret);
3479 } else if (ret < ha->msix_count) {
3480 ql_log(ql_log_info, vha, 0x00c6,
3481 "MSI-X: Using %d vectors\n", ret);
3482 ha->msix_count = ret;
3483 /* Recalculate queue values */
3484 if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) {
3485 ha->max_req_queues = ha->msix_count - 1;
3487 /* ATIOQ needs 1 vector. That's 1 less QPair */
3488 if (QLA_TGT_MODE_ENABLED())
3489 ha->max_req_queues--;
3491 ha->max_rsp_queues = ha->max_req_queues;
3493 ha->max_qpairs = ha->max_req_queues - 1;
3494 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
3495 "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
3498 vha->irq_offset = desc.pre_vectors;
3499 ha->msix_entries = kcalloc(ha->msix_count,
3500 sizeof(struct qla_msix_entry),
3502 if (!ha->msix_entries) {
3503 ql_log(ql_log_fatal, vha, 0x00c8,
3504 "Failed to allocate memory for ha->msix_entries.\n");
3508 ha->flags.msix_enabled = 1;
3510 for (i = 0; i < ha->msix_count; i++) {
3511 qentry = &ha->msix_entries[i];
3512 qentry->vector = pci_irq_vector(ha->pdev, i);
3514 qentry->have_irq = 0;
3516 qentry->handle = NULL;
3519 /* Enable MSI-X vectors for the base queue */
3520 for (i = 0; i < QLA_BASE_VECTORS; i++) {
3521 qentry = &ha->msix_entries[i];
3522 qentry->handle = rsp;
3524 scnprintf(qentry->name, sizeof(qentry->name),
3525 "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name);
3526 if (IS_P3P_TYPE(ha))
3527 ret = request_irq(qentry->vector,
3528 qla82xx_msix_entries[i].handler,
3529 0, qla82xx_msix_entries[i].name, rsp);
3531 ret = request_irq(qentry->vector,
3532 msix_entries[i].handler,
3533 0, qentry->name, rsp);
3535 goto msix_register_fail;
3536 qentry->have_irq = 1;
3541 * If target mode is enable, also request the vector for the ATIO
3544 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) &&
3545 IS_ATIO_MSIX_CAPABLE(ha)) {
3546 qentry = &ha->msix_entries[QLA_ATIO_VECTOR];
3548 qentry->handle = rsp;
3549 scnprintf(qentry->name, sizeof(qentry->name),
3550 "qla2xxx%lu_%s", vha->host_no,
3551 msix_entries[QLA_ATIO_VECTOR].name);
3553 ret = request_irq(qentry->vector,
3554 msix_entries[QLA_ATIO_VECTOR].handler,
3555 0, qentry->name, rsp);
3556 qentry->have_irq = 1;
3561 ql_log(ql_log_fatal, vha, 0x00cb,
3562 "MSI-X: unable to register handler -- %x/%d.\n",
3563 qentry->vector, ret);
3564 qla2x00_free_irqs(vha);
3569 /* Enable MSI-X vector for response queue update for queue 0 */
3570 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
3571 if (ha->msixbase && ha->mqiobase &&
3572 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
3577 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
3580 ql_dbg(ql_dbg_multiq, vha, 0xc005,
3581 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
3582 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
3583 ql_dbg(ql_dbg_init, vha, 0x0055,
3584 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
3585 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
3591 pci_free_irq_vectors(ha->pdev);
3596 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
3598 int ret = QLA_FUNCTION_FAILED;
3599 device_reg_t *reg = ha->iobase;
3600 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3602 /* If possible, enable MSI-X. */
3603 if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
3604 !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) &&
3605 !IS_QLAFX00(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)))
3608 if (ql2xenablemsix == 2)
3611 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
3612 (ha->pdev->subsystem_device == 0x7040 ||
3613 ha->pdev->subsystem_device == 0x7041 ||
3614 ha->pdev->subsystem_device == 0x1705)) {
3615 ql_log(ql_log_warn, vha, 0x0034,
3616 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
3617 ha->pdev->subsystem_vendor,
3618 ha->pdev->subsystem_device);
3622 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
3623 ql_log(ql_log_warn, vha, 0x0035,
3624 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
3625 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
3629 ret = qla24xx_enable_msix(ha, rsp);
3631 ql_dbg(ql_dbg_init, vha, 0x0036,
3632 "MSI-X: Enabled (0x%X, 0x%X).\n",
3633 ha->chip_revision, ha->fw_attributes);
3634 goto clear_risc_ints;
3639 ql_log(ql_log_info, vha, 0x0037,
3640 "Falling back-to MSI mode -- ret=%d.\n", ret);
3642 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
3643 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
3644 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
3647 ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
3649 ql_dbg(ql_dbg_init, vha, 0x0038,
3651 ha->flags.msi_enabled = 1;
3653 ql_log(ql_log_warn, vha, 0x0039,
3654 "Falling back-to INTa mode -- ret=%d.\n", ret);
3657 /* Skip INTx on ISP82xx. */
3658 if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
3659 return QLA_FUNCTION_FAILED;
3661 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
3662 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
3663 QLA2XXX_DRIVER_NAME, rsp);
3665 ql_log(ql_log_warn, vha, 0x003a,
3666 "Failed to reserve interrupt %d already in use.\n",
3669 } else if (!ha->flags.msi_enabled) {
3670 ql_dbg(ql_dbg_init, vha, 0x0125,
3671 "INTa mode: Enabled.\n");
3672 ha->flags.mr_intr_valid = 1;
3676 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
3679 spin_lock_irq(&ha->hardware_lock);
3680 WRT_REG_WORD(®->isp.semaphore, 0);
3681 spin_unlock_irq(&ha->hardware_lock);
3688 qla2x00_free_irqs(scsi_qla_host_t *vha)
3690 struct qla_hw_data *ha = vha->hw;
3691 struct rsp_que *rsp;
3692 struct qla_msix_entry *qentry;
3696 * We need to check that ha->rsp_q_map is valid in case we are called
3697 * from a probe failure context.
3699 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
3701 rsp = ha->rsp_q_map[0];
3703 if (ha->flags.msix_enabled) {
3704 for (i = 0; i < ha->msix_count; i++) {
3705 qentry = &ha->msix_entries[i];
3706 if (qentry->have_irq) {
3707 irq_set_affinity_notifier(qentry->vector, NULL);
3708 free_irq(pci_irq_vector(ha->pdev, i), qentry->handle);
3711 kfree(ha->msix_entries);
3712 ha->msix_entries = NULL;
3713 ha->flags.msix_enabled = 0;
3714 ql_dbg(ql_dbg_init, vha, 0x0042,
3715 "Disabled MSI-X.\n");
3717 free_irq(pci_irq_vector(ha->pdev, 0), rsp);
3721 pci_free_irq_vectors(ha->pdev);
3724 int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
3725 struct qla_msix_entry *msix, int vector_type)
3727 const struct qla_init_msix_entry *intr = &msix_entries[vector_type];
3728 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3731 scnprintf(msix->name, sizeof(msix->name),
3732 "qla2xxx%lu_qpair%d", vha->host_no, qpair->id);
3733 ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair);
3735 ql_log(ql_log_fatal, vha, 0x00e6,
3736 "MSI-X: Unable to register handler -- %x/%d.\n",
3741 msix->handle = qpair;