Linux-libre 3.17.4-gnu
[librecmc/linux-libre.git] / drivers / scsi / qla2xxx / qla_iocb.c
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2014 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_target.h"
9
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
12
13 #include <scsi/scsi_tcq.h>
14
15 static void qla25xx_set_que(srb_t *, struct rsp_que **);
16 /**
17  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
18  * @cmd: SCSI command
19  *
20  * Returns the proper CF_* direction based on CDB.
21  */
22 static inline uint16_t
23 qla2x00_get_cmd_direction(srb_t *sp)
24 {
25         uint16_t cflags;
26         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
27         struct scsi_qla_host *vha = sp->fcport->vha;
28
29         cflags = 0;
30
31         /* Set transfer direction */
32         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
33                 cflags = CF_WRITE;
34                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
35                 vha->qla_stats.output_requests++;
36         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
37                 cflags = CF_READ;
38                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
39                 vha->qla_stats.input_requests++;
40         }
41         return (cflags);
42 }
43
44 /**
45  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
46  * Continuation Type 0 IOCBs to allocate.
47  *
48  * @dsds: number of data segment decriptors needed
49  *
50  * Returns the number of IOCB entries needed to store @dsds.
51  */
52 uint16_t
53 qla2x00_calc_iocbs_32(uint16_t dsds)
54 {
55         uint16_t iocbs;
56
57         iocbs = 1;
58         if (dsds > 3) {
59                 iocbs += (dsds - 3) / 7;
60                 if ((dsds - 3) % 7)
61                         iocbs++;
62         }
63         return (iocbs);
64 }
65
66 /**
67  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
68  * Continuation Type 1 IOCBs to allocate.
69  *
70  * @dsds: number of data segment decriptors needed
71  *
72  * Returns the number of IOCB entries needed to store @dsds.
73  */
74 uint16_t
75 qla2x00_calc_iocbs_64(uint16_t dsds)
76 {
77         uint16_t iocbs;
78
79         iocbs = 1;
80         if (dsds > 2) {
81                 iocbs += (dsds - 2) / 5;
82                 if ((dsds - 2) % 5)
83                         iocbs++;
84         }
85         return (iocbs);
86 }
87
88 /**
89  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
90  * @ha: HA context
91  *
92  * Returns a pointer to the Continuation Type 0 IOCB packet.
93  */
94 static inline cont_entry_t *
95 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
96 {
97         cont_entry_t *cont_pkt;
98         struct req_que *req = vha->req;
99         /* Adjust ring index. */
100         req->ring_index++;
101         if (req->ring_index == req->length) {
102                 req->ring_index = 0;
103                 req->ring_ptr = req->ring;
104         } else {
105                 req->ring_ptr++;
106         }
107
108         cont_pkt = (cont_entry_t *)req->ring_ptr;
109
110         /* Load packet defaults. */
111         *((uint32_t *)(&cont_pkt->entry_type)) =
112             __constant_cpu_to_le32(CONTINUE_TYPE);
113
114         return (cont_pkt);
115 }
116
117 /**
118  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
119  * @ha: HA context
120  *
121  * Returns a pointer to the continuation type 1 IOCB packet.
122  */
123 static inline cont_a64_entry_t *
124 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
125 {
126         cont_a64_entry_t *cont_pkt;
127
128         /* Adjust ring index. */
129         req->ring_index++;
130         if (req->ring_index == req->length) {
131                 req->ring_index = 0;
132                 req->ring_ptr = req->ring;
133         } else {
134                 req->ring_ptr++;
135         }
136
137         cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
138
139         /* Load packet defaults. */
140         *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
141             __constant_cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
142             __constant_cpu_to_le32(CONTINUE_A64_TYPE);
143
144         return (cont_pkt);
145 }
146
147 static inline int
148 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
149 {
150         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
151         uint8_t guard = scsi_host_get_guard(cmd->device->host);
152
153         /* We always use DIFF Bundling for best performance */
154         *fw_prot_opts = 0;
155
156         /* Translate SCSI opcode to a protection opcode */
157         switch (scsi_get_prot_op(cmd)) {
158         case SCSI_PROT_READ_STRIP:
159                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
160                 break;
161         case SCSI_PROT_WRITE_INSERT:
162                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
163                 break;
164         case SCSI_PROT_READ_INSERT:
165                 *fw_prot_opts |= PO_MODE_DIF_INSERT;
166                 break;
167         case SCSI_PROT_WRITE_STRIP:
168                 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
169                 break;
170         case SCSI_PROT_READ_PASS:
171         case SCSI_PROT_WRITE_PASS:
172                 if (guard & SHOST_DIX_GUARD_IP)
173                         *fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
174                 else
175                         *fw_prot_opts |= PO_MODE_DIF_PASS;
176                 break;
177         default:        /* Normal Request */
178                 *fw_prot_opts |= PO_MODE_DIF_PASS;
179                 break;
180         }
181
182         return scsi_prot_sg_count(cmd);
183 }
184
185 /*
186  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
187  * capable IOCB types.
188  *
189  * @sp: SRB command to process
190  * @cmd_pkt: Command type 2 IOCB
191  * @tot_dsds: Total number of segments to transfer
192  */
193 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
194     uint16_t tot_dsds)
195 {
196         uint16_t        avail_dsds;
197         uint32_t        *cur_dsd;
198         scsi_qla_host_t *vha;
199         struct scsi_cmnd *cmd;
200         struct scatterlist *sg;
201         int i;
202
203         cmd = GET_CMD_SP(sp);
204
205         /* Update entry type to indicate Command Type 2 IOCB */
206         *((uint32_t *)(&cmd_pkt->entry_type)) =
207             __constant_cpu_to_le32(COMMAND_TYPE);
208
209         /* No data transfer */
210         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
211                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
212                 return;
213         }
214
215         vha = sp->fcport->vha;
216         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
217
218         /* Three DSDs are available in the Command Type 2 IOCB */
219         avail_dsds = 3;
220         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
221
222         /* Load data segments */
223         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
224                 cont_entry_t *cont_pkt;
225
226                 /* Allocate additional continuation packets? */
227                 if (avail_dsds == 0) {
228                         /*
229                          * Seven DSDs are available in the Continuation
230                          * Type 0 IOCB.
231                          */
232                         cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
233                         cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
234                         avail_dsds = 7;
235                 }
236
237                 *cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
238                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
239                 avail_dsds--;
240         }
241 }
242
243 /**
244  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
245  * capable IOCB types.
246  *
247  * @sp: SRB command to process
248  * @cmd_pkt: Command type 3 IOCB
249  * @tot_dsds: Total number of segments to transfer
250  */
251 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
252     uint16_t tot_dsds)
253 {
254         uint16_t        avail_dsds;
255         uint32_t        *cur_dsd;
256         scsi_qla_host_t *vha;
257         struct scsi_cmnd *cmd;
258         struct scatterlist *sg;
259         int i;
260
261         cmd = GET_CMD_SP(sp);
262
263         /* Update entry type to indicate Command Type 3 IOCB */
264         *((uint32_t *)(&cmd_pkt->entry_type)) =
265             __constant_cpu_to_le32(COMMAND_A64_TYPE);
266
267         /* No data transfer */
268         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
269                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
270                 return;
271         }
272
273         vha = sp->fcport->vha;
274         cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
275
276         /* Two DSDs are available in the Command Type 3 IOCB */
277         avail_dsds = 2;
278         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
279
280         /* Load data segments */
281         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
282                 dma_addr_t      sle_dma;
283                 cont_a64_entry_t *cont_pkt;
284
285                 /* Allocate additional continuation packets? */
286                 if (avail_dsds == 0) {
287                         /*
288                          * Five DSDs are available in the Continuation
289                          * Type 1 IOCB.
290                          */
291                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
292                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
293                         avail_dsds = 5;
294                 }
295
296                 sle_dma = sg_dma_address(sg);
297                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
298                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
299                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
300                 avail_dsds--;
301         }
302 }
303
304 /**
305  * qla2x00_start_scsi() - Send a SCSI command to the ISP
306  * @sp: command to send to the ISP
307  *
308  * Returns non-zero if a failure occurred, else zero.
309  */
310 int
311 qla2x00_start_scsi(srb_t *sp)
312 {
313         int             ret, nseg;
314         unsigned long   flags;
315         scsi_qla_host_t *vha;
316         struct scsi_cmnd *cmd;
317         uint32_t        *clr_ptr;
318         uint32_t        index;
319         uint32_t        handle;
320         cmd_entry_t     *cmd_pkt;
321         uint16_t        cnt;
322         uint16_t        req_cnt;
323         uint16_t        tot_dsds;
324         struct device_reg_2xxx __iomem *reg;
325         struct qla_hw_data *ha;
326         struct req_que *req;
327         struct rsp_que *rsp;
328         char            tag[2];
329
330         /* Setup device pointers. */
331         ret = 0;
332         vha = sp->fcport->vha;
333         ha = vha->hw;
334         reg = &ha->iobase->isp;
335         cmd = GET_CMD_SP(sp);
336         req = ha->req_q_map[0];
337         rsp = ha->rsp_q_map[0];
338         /* So we know we haven't pci_map'ed anything yet */
339         tot_dsds = 0;
340
341         /* Send marker if required */
342         if (vha->marker_needed != 0) {
343                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
344                     QLA_SUCCESS) {
345                         return (QLA_FUNCTION_FAILED);
346                 }
347                 vha->marker_needed = 0;
348         }
349
350         /* Acquire ring specific lock */
351         spin_lock_irqsave(&ha->hardware_lock, flags);
352
353         /* Check for room in outstanding command list. */
354         handle = req->current_outstanding_cmd;
355         for (index = 1; index < req->num_outstanding_cmds; index++) {
356                 handle++;
357                 if (handle == req->num_outstanding_cmds)
358                         handle = 1;
359                 if (!req->outstanding_cmds[handle])
360                         break;
361         }
362         if (index == req->num_outstanding_cmds)
363                 goto queuing_error;
364
365         /* Map the sg table so we have an accurate count of sg entries needed */
366         if (scsi_sg_count(cmd)) {
367                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
368                     scsi_sg_count(cmd), cmd->sc_data_direction);
369                 if (unlikely(!nseg))
370                         goto queuing_error;
371         } else
372                 nseg = 0;
373
374         tot_dsds = nseg;
375
376         /* Calculate the number of request entries needed. */
377         req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
378         if (req->cnt < (req_cnt + 2)) {
379                 cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
380                 if (req->ring_index < cnt)
381                         req->cnt = cnt - req->ring_index;
382                 else
383                         req->cnt = req->length -
384                             (req->ring_index - cnt);
385                 /* If still no head room then bail out */
386                 if (req->cnt < (req_cnt + 2))
387                         goto queuing_error;
388         }
389
390         /* Build command packet */
391         req->current_outstanding_cmd = handle;
392         req->outstanding_cmds[handle] = sp;
393         sp->handle = handle;
394         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
395         req->cnt -= req_cnt;
396
397         cmd_pkt = (cmd_entry_t *)req->ring_ptr;
398         cmd_pkt->handle = handle;
399         /* Zero out remaining portion of packet. */
400         clr_ptr = (uint32_t *)cmd_pkt + 2;
401         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
402         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
403
404         /* Set target ID and LUN number*/
405         SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
406         cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
407
408         /* Update tagged queuing modifier */
409         if (scsi_populate_tag_msg(cmd, tag)) {
410                 switch (tag[0]) {
411                 case HEAD_OF_QUEUE_TAG:
412                         cmd_pkt->control_flags =
413                             __constant_cpu_to_le16(CF_HEAD_TAG);
414                         break;
415                 case ORDERED_QUEUE_TAG:
416                         cmd_pkt->control_flags =
417                             __constant_cpu_to_le16(CF_ORDERED_TAG);
418                         break;
419                 default:
420                         cmd_pkt->control_flags =
421                             __constant_cpu_to_le16(CF_SIMPLE_TAG);
422                         break;
423                 }
424         } else {
425                 cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
426         }
427
428         /* Load SCSI command packet. */
429         memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
430         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
431
432         /* Build IOCB segments */
433         ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
434
435         /* Set total data segment count. */
436         cmd_pkt->entry_count = (uint8_t)req_cnt;
437         wmb();
438
439         /* Adjust ring index. */
440         req->ring_index++;
441         if (req->ring_index == req->length) {
442                 req->ring_index = 0;
443                 req->ring_ptr = req->ring;
444         } else
445                 req->ring_ptr++;
446
447         sp->flags |= SRB_DMA_VALID;
448
449         /* Set chip new ring index. */
450         WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
451         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));     /* PCI Posting. */
452
453         /* Manage unprocessed RIO/ZIO commands in response queue. */
454         if (vha->flags.process_response_queue &&
455             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
456                 qla2x00_process_response_queue(rsp);
457
458         spin_unlock_irqrestore(&ha->hardware_lock, flags);
459         return (QLA_SUCCESS);
460
461 queuing_error:
462         if (tot_dsds)
463                 scsi_dma_unmap(cmd);
464
465         spin_unlock_irqrestore(&ha->hardware_lock, flags);
466
467         return (QLA_FUNCTION_FAILED);
468 }
469
470 /**
471  * qla2x00_start_iocbs() - Execute the IOCB command
472  */
473 void
474 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
475 {
476         struct qla_hw_data *ha = vha->hw;
477         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
478
479         if (IS_P3P_TYPE(ha)) {
480                 qla82xx_start_iocbs(vha);
481         } else {
482                 /* Adjust ring index. */
483                 req->ring_index++;
484                 if (req->ring_index == req->length) {
485                         req->ring_index = 0;
486                         req->ring_ptr = req->ring;
487                 } else
488                         req->ring_ptr++;
489
490                 /* Set chip new ring index. */
491                 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
492                         WRT_REG_DWORD(req->req_q_in, req->ring_index);
493                         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
494                 } else if (IS_QLAFX00(ha)) {
495                         WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
496                         RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
497                         QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
498                 } else if (IS_FWI2_CAPABLE(ha)) {
499                         WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
500                         RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
501                 } else {
502                         WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
503                                 req->ring_index);
504                         RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
505                 }
506         }
507 }
508
509 /**
510  * qla2x00_marker() - Send a marker IOCB to the firmware.
511  * @ha: HA context
512  * @loop_id: loop ID
513  * @lun: LUN
514  * @type: marker modifier
515  *
516  * Can be called from both normal and interrupt context.
517  *
518  * Returns non-zero if a failure occurred, else zero.
519  */
520 static int
521 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
522                         struct rsp_que *rsp, uint16_t loop_id,
523                         uint64_t lun, uint8_t type)
524 {
525         mrk_entry_t *mrk;
526         struct mrk_entry_24xx *mrk24 = NULL;
527
528         struct qla_hw_data *ha = vha->hw;
529         scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
530
531         req = ha->req_q_map[0];
532         mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
533         if (mrk == NULL) {
534                 ql_log(ql_log_warn, base_vha, 0x3026,
535                     "Failed to allocate Marker IOCB.\n");
536
537                 return (QLA_FUNCTION_FAILED);
538         }
539
540         mrk->entry_type = MARKER_TYPE;
541         mrk->modifier = type;
542         if (type != MK_SYNC_ALL) {
543                 if (IS_FWI2_CAPABLE(ha)) {
544                         mrk24 = (struct mrk_entry_24xx *) mrk;
545                         mrk24->nport_handle = cpu_to_le16(loop_id);
546                         int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
547                         host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
548                         mrk24->vp_index = vha->vp_idx;
549                         mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
550                 } else {
551                         SET_TARGET_ID(ha, mrk->target, loop_id);
552                         mrk->lun = cpu_to_le16((uint16_t)lun);
553                 }
554         }
555         wmb();
556
557         qla2x00_start_iocbs(vha, req);
558
559         return (QLA_SUCCESS);
560 }
561
562 int
563 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
564                 struct rsp_que *rsp, uint16_t loop_id, uint64_t lun,
565                 uint8_t type)
566 {
567         int ret;
568         unsigned long flags = 0;
569
570         spin_lock_irqsave(&vha->hw->hardware_lock, flags);
571         ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
572         spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
573
574         return (ret);
575 }
576
577 /*
578  * qla2x00_issue_marker
579  *
580  * Issue marker
581  * Caller CAN have hardware lock held as specified by ha_locked parameter.
582  * Might release it, then reaquire.
583  */
584 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
585 {
586         if (ha_locked) {
587                 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
588                                         MK_SYNC_ALL) != QLA_SUCCESS)
589                         return QLA_FUNCTION_FAILED;
590         } else {
591                 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
592                                         MK_SYNC_ALL) != QLA_SUCCESS)
593                         return QLA_FUNCTION_FAILED;
594         }
595         vha->marker_needed = 0;
596
597         return QLA_SUCCESS;
598 }
599
600 static inline int
601 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
602         uint16_t tot_dsds)
603 {
604         uint32_t *cur_dsd = NULL;
605         scsi_qla_host_t *vha;
606         struct qla_hw_data *ha;
607         struct scsi_cmnd *cmd;
608         struct  scatterlist *cur_seg;
609         uint32_t *dsd_seg;
610         void *next_dsd;
611         uint8_t avail_dsds;
612         uint8_t first_iocb = 1;
613         uint32_t dsd_list_len;
614         struct dsd_dma *dsd_ptr;
615         struct ct6_dsd *ctx;
616
617         cmd = GET_CMD_SP(sp);
618
619         /* Update entry type to indicate Command Type 3 IOCB */
620         *((uint32_t *)(&cmd_pkt->entry_type)) =
621                 __constant_cpu_to_le32(COMMAND_TYPE_6);
622
623         /* No data transfer */
624         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
625                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
626                 return 0;
627         }
628
629         vha = sp->fcport->vha;
630         ha = vha->hw;
631
632         /* Set transfer direction */
633         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
634                 cmd_pkt->control_flags =
635                     __constant_cpu_to_le16(CF_WRITE_DATA);
636                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
637                 vha->qla_stats.output_requests++;
638         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
639                 cmd_pkt->control_flags =
640                     __constant_cpu_to_le16(CF_READ_DATA);
641                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
642                 vha->qla_stats.input_requests++;
643         }
644
645         cur_seg = scsi_sglist(cmd);
646         ctx = GET_CMD_CTX_SP(sp);
647
648         while (tot_dsds) {
649                 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
650                     QLA_DSDS_PER_IOCB : tot_dsds;
651                 tot_dsds -= avail_dsds;
652                 dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
653
654                 dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
655                     struct dsd_dma, list);
656                 next_dsd = dsd_ptr->dsd_addr;
657                 list_del(&dsd_ptr->list);
658                 ha->gbl_dsd_avail--;
659                 list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
660                 ctx->dsd_use_cnt++;
661                 ha->gbl_dsd_inuse++;
662
663                 if (first_iocb) {
664                         first_iocb = 0;
665                         dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
666                         *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
667                         *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
668                         cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
669                 } else {
670                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
671                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
672                         *cur_dsd++ = cpu_to_le32(dsd_list_len);
673                 }
674                 cur_dsd = (uint32_t *)next_dsd;
675                 while (avail_dsds) {
676                         dma_addr_t      sle_dma;
677
678                         sle_dma = sg_dma_address(cur_seg);
679                         *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
680                         *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
681                         *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
682                         cur_seg = sg_next(cur_seg);
683                         avail_dsds--;
684                 }
685         }
686
687         /* Null termination */
688         *cur_dsd++ =  0;
689         *cur_dsd++ = 0;
690         *cur_dsd++ = 0;
691         cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
692         return 0;
693 }
694
695 /*
696  * qla24xx_calc_dsd_lists() - Determine number of DSD list required
697  * for Command Type 6.
698  *
699  * @dsds: number of data segment decriptors needed
700  *
701  * Returns the number of dsd list needed to store @dsds.
702  */
703 inline uint16_t
704 qla24xx_calc_dsd_lists(uint16_t dsds)
705 {
706         uint16_t dsd_lists = 0;
707
708         dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
709         if (dsds % QLA_DSDS_PER_IOCB)
710                 dsd_lists++;
711         return dsd_lists;
712 }
713
714
715 /**
716  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
717  * IOCB types.
718  *
719  * @sp: SRB command to process
720  * @cmd_pkt: Command type 3 IOCB
721  * @tot_dsds: Total number of segments to transfer
722  */
723 inline void
724 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
725     uint16_t tot_dsds)
726 {
727         uint16_t        avail_dsds;
728         uint32_t        *cur_dsd;
729         scsi_qla_host_t *vha;
730         struct scsi_cmnd *cmd;
731         struct scatterlist *sg;
732         int i;
733         struct req_que *req;
734
735         cmd = GET_CMD_SP(sp);
736
737         /* Update entry type to indicate Command Type 3 IOCB */
738         *((uint32_t *)(&cmd_pkt->entry_type)) =
739             __constant_cpu_to_le32(COMMAND_TYPE_7);
740
741         /* No data transfer */
742         if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
743                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
744                 return;
745         }
746
747         vha = sp->fcport->vha;
748         req = vha->req;
749
750         /* Set transfer direction */
751         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
752                 cmd_pkt->task_mgmt_flags =
753                     __constant_cpu_to_le16(TMF_WRITE_DATA);
754                 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
755                 vha->qla_stats.output_requests++;
756         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
757                 cmd_pkt->task_mgmt_flags =
758                     __constant_cpu_to_le16(TMF_READ_DATA);
759                 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
760                 vha->qla_stats.input_requests++;
761         }
762
763         /* One DSD is available in the Command Type 3 IOCB */
764         avail_dsds = 1;
765         cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
766
767         /* Load data segments */
768
769         scsi_for_each_sg(cmd, sg, tot_dsds, i) {
770                 dma_addr_t      sle_dma;
771                 cont_a64_entry_t *cont_pkt;
772
773                 /* Allocate additional continuation packets? */
774                 if (avail_dsds == 0) {
775                         /*
776                          * Five DSDs are available in the Continuation
777                          * Type 1 IOCB.
778                          */
779                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
780                         cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
781                         avail_dsds = 5;
782                 }
783
784                 sle_dma = sg_dma_address(sg);
785                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
786                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
787                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
788                 avail_dsds--;
789         }
790 }
791
792 struct fw_dif_context {
793         uint32_t ref_tag;
794         uint16_t app_tag;
795         uint8_t ref_tag_mask[4];        /* Validation/Replacement Mask*/
796         uint8_t app_tag_mask[2];        /* Validation/Replacement Mask*/
797 };
798
799 /*
800  * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
801  *
802  */
803 static inline void
804 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
805     unsigned int protcnt)
806 {
807         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
808
809         switch (scsi_get_prot_type(cmd)) {
810         case SCSI_PROT_DIF_TYPE0:
811                 /*
812                  * No check for ql2xenablehba_err_chk, as it would be an
813                  * I/O error if hba tag generation is not done.
814                  */
815                 pkt->ref_tag = cpu_to_le32((uint32_t)
816                     (0xffffffff & scsi_get_lba(cmd)));
817
818                 if (!qla2x00_hba_err_chk_enabled(sp))
819                         break;
820
821                 pkt->ref_tag_mask[0] = 0xff;
822                 pkt->ref_tag_mask[1] = 0xff;
823                 pkt->ref_tag_mask[2] = 0xff;
824                 pkt->ref_tag_mask[3] = 0xff;
825                 break;
826
827         /*
828          * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
829          * match LBA in CDB + N
830          */
831         case SCSI_PROT_DIF_TYPE2:
832                 pkt->app_tag = __constant_cpu_to_le16(0);
833                 pkt->app_tag_mask[0] = 0x0;
834                 pkt->app_tag_mask[1] = 0x0;
835
836                 pkt->ref_tag = cpu_to_le32((uint32_t)
837                     (0xffffffff & scsi_get_lba(cmd)));
838
839                 if (!qla2x00_hba_err_chk_enabled(sp))
840                         break;
841
842                 /* enable ALL bytes of the ref tag */
843                 pkt->ref_tag_mask[0] = 0xff;
844                 pkt->ref_tag_mask[1] = 0xff;
845                 pkt->ref_tag_mask[2] = 0xff;
846                 pkt->ref_tag_mask[3] = 0xff;
847                 break;
848
849         /* For Type 3 protection: 16 bit GUARD only */
850         case SCSI_PROT_DIF_TYPE3:
851                 pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
852                         pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
853                                                                 0x00;
854                 break;
855
856         /*
857          * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
858          * 16 bit app tag.
859          */
860         case SCSI_PROT_DIF_TYPE1:
861                 pkt->ref_tag = cpu_to_le32((uint32_t)
862                     (0xffffffff & scsi_get_lba(cmd)));
863                 pkt->app_tag = __constant_cpu_to_le16(0);
864                 pkt->app_tag_mask[0] = 0x0;
865                 pkt->app_tag_mask[1] = 0x0;
866
867                 if (!qla2x00_hba_err_chk_enabled(sp))
868                         break;
869
870                 /* enable ALL bytes of the ref tag */
871                 pkt->ref_tag_mask[0] = 0xff;
872                 pkt->ref_tag_mask[1] = 0xff;
873                 pkt->ref_tag_mask[2] = 0xff;
874                 pkt->ref_tag_mask[3] = 0xff;
875                 break;
876         }
877 }
878
879 struct qla2_sgx {
880         dma_addr_t              dma_addr;       /* OUT */
881         uint32_t                dma_len;        /* OUT */
882
883         uint32_t                tot_bytes;      /* IN */
884         struct scatterlist      *cur_sg;        /* IN */
885
886         /* for book keeping, bzero on initial invocation */
887         uint32_t                bytes_consumed;
888         uint32_t                num_bytes;
889         uint32_t                tot_partial;
890
891         /* for debugging */
892         uint32_t                num_sg;
893         srb_t                   *sp;
894 };
895
896 static int
897 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
898         uint32_t *partial)
899 {
900         struct scatterlist *sg;
901         uint32_t cumulative_partial, sg_len;
902         dma_addr_t sg_dma_addr;
903
904         if (sgx->num_bytes == sgx->tot_bytes)
905                 return 0;
906
907         sg = sgx->cur_sg;
908         cumulative_partial = sgx->tot_partial;
909
910         sg_dma_addr = sg_dma_address(sg);
911         sg_len = sg_dma_len(sg);
912
913         sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
914
915         if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
916                 sgx->dma_len = (blk_sz - cumulative_partial);
917                 sgx->tot_partial = 0;
918                 sgx->num_bytes += blk_sz;
919                 *partial = 0;
920         } else {
921                 sgx->dma_len = sg_len - sgx->bytes_consumed;
922                 sgx->tot_partial += sgx->dma_len;
923                 *partial = 1;
924         }
925
926         sgx->bytes_consumed += sgx->dma_len;
927
928         if (sg_len == sgx->bytes_consumed) {
929                 sg = sg_next(sg);
930                 sgx->num_sg++;
931                 sgx->cur_sg = sg;
932                 sgx->bytes_consumed = 0;
933         }
934
935         return 1;
936 }
937
938 int
939 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
940         uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
941 {
942         void *next_dsd;
943         uint8_t avail_dsds = 0;
944         uint32_t dsd_list_len;
945         struct dsd_dma *dsd_ptr;
946         struct scatterlist *sg_prot;
947         uint32_t *cur_dsd = dsd;
948         uint16_t        used_dsds = tot_dsds;
949
950         uint32_t        prot_int; /* protection interval */
951         uint32_t        partial;
952         struct qla2_sgx sgx;
953         dma_addr_t      sle_dma;
954         uint32_t        sle_dma_len, tot_prot_dma_len = 0;
955         struct scsi_cmnd *cmd;
956         struct scsi_qla_host *vha;
957
958         memset(&sgx, 0, sizeof(struct qla2_sgx));
959         if (sp) {
960                 vha = sp->fcport->vha;
961                 cmd = GET_CMD_SP(sp);
962                 prot_int = cmd->device->sector_size;
963
964                 sgx.tot_bytes = scsi_bufflen(cmd);
965                 sgx.cur_sg = scsi_sglist(cmd);
966                 sgx.sp = sp;
967
968                 sg_prot = scsi_prot_sglist(cmd);
969         } else if (tc) {
970                 vha = tc->vha;
971                 prot_int      = tc->blk_sz;
972                 sgx.tot_bytes = tc->bufflen;
973                 sgx.cur_sg    = tc->sg;
974                 sg_prot       = tc->prot_sg;
975         } else {
976                 BUG();
977                 return 1;
978         }
979
980         while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
981
982                 sle_dma = sgx.dma_addr;
983                 sle_dma_len = sgx.dma_len;
984 alloc_and_fill:
985                 /* Allocate additional continuation packets? */
986                 if (avail_dsds == 0) {
987                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
988                                         QLA_DSDS_PER_IOCB : used_dsds;
989                         dsd_list_len = (avail_dsds + 1) * 12;
990                         used_dsds -= avail_dsds;
991
992                         /* allocate tracking DS */
993                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
994                         if (!dsd_ptr)
995                                 return 1;
996
997                         /* allocate new list */
998                         dsd_ptr->dsd_addr = next_dsd =
999                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1000                                 &dsd_ptr->dsd_list_dma);
1001
1002                         if (!next_dsd) {
1003                                 /*
1004                                  * Need to cleanup only this dsd_ptr, rest
1005                                  * will be done by sp_free_dma()
1006                                  */
1007                                 kfree(dsd_ptr);
1008                                 return 1;
1009                         }
1010
1011                         if (sp) {
1012                                 list_add_tail(&dsd_ptr->list,
1013                                     &((struct crc_context *)
1014                                             sp->u.scmd.ctx)->dsd_list);
1015
1016                                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1017                         } else {
1018                                 list_add_tail(&dsd_ptr->list,
1019                                     &(tc->ctx->dsd_list));
1020                                 tc->ctx_dsd_alloced = 1;
1021                         }
1022
1023
1024                         /* add new list to cmd iocb or last list */
1025                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1026                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1027                         *cur_dsd++ = dsd_list_len;
1028                         cur_dsd = (uint32_t *)next_dsd;
1029                 }
1030                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1031                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1032                 *cur_dsd++ = cpu_to_le32(sle_dma_len);
1033                 avail_dsds--;
1034
1035                 if (partial == 0) {
1036                         /* Got a full protection interval */
1037                         sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
1038                         sle_dma_len = 8;
1039
1040                         tot_prot_dma_len += sle_dma_len;
1041                         if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
1042                                 tot_prot_dma_len = 0;
1043                                 sg_prot = sg_next(sg_prot);
1044                         }
1045
1046                         partial = 1; /* So as to not re-enter this block */
1047                         goto alloc_and_fill;
1048                 }
1049         }
1050         /* Null termination */
1051         *cur_dsd++ = 0;
1052         *cur_dsd++ = 0;
1053         *cur_dsd++ = 0;
1054         return 0;
1055 }
1056
1057 int
1058 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1059         uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1060 {
1061         void *next_dsd;
1062         uint8_t avail_dsds = 0;
1063         uint32_t dsd_list_len;
1064         struct dsd_dma *dsd_ptr;
1065         struct scatterlist *sg, *sgl;
1066         uint32_t *cur_dsd = dsd;
1067         int     i;
1068         uint16_t        used_dsds = tot_dsds;
1069         struct scsi_cmnd *cmd;
1070         struct scsi_qla_host *vha;
1071
1072         if (sp) {
1073                 cmd = GET_CMD_SP(sp);
1074                 sgl = scsi_sglist(cmd);
1075                 vha = sp->fcport->vha;
1076         } else if (tc) {
1077                 sgl = tc->sg;
1078                 vha = tc->vha;
1079         } else {
1080                 BUG();
1081                 return 1;
1082         }
1083
1084
1085         for_each_sg(sgl, sg, tot_dsds, i) {
1086                 dma_addr_t      sle_dma;
1087
1088                 /* Allocate additional continuation packets? */
1089                 if (avail_dsds == 0) {
1090                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1091                                         QLA_DSDS_PER_IOCB : used_dsds;
1092                         dsd_list_len = (avail_dsds + 1) * 12;
1093                         used_dsds -= avail_dsds;
1094
1095                         /* allocate tracking DS */
1096                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1097                         if (!dsd_ptr)
1098                                 return 1;
1099
1100                         /* allocate new list */
1101                         dsd_ptr->dsd_addr = next_dsd =
1102                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1103                                 &dsd_ptr->dsd_list_dma);
1104
1105                         if (!next_dsd) {
1106                                 /*
1107                                  * Need to cleanup only this dsd_ptr, rest
1108                                  * will be done by sp_free_dma()
1109                                  */
1110                                 kfree(dsd_ptr);
1111                                 return 1;
1112                         }
1113
1114                         if (sp) {
1115                                 list_add_tail(&dsd_ptr->list,
1116                                     &((struct crc_context *)
1117                                             sp->u.scmd.ctx)->dsd_list);
1118
1119                                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1120                         } else {
1121                                 list_add_tail(&dsd_ptr->list,
1122                                     &(tc->ctx->dsd_list));
1123                                 tc->ctx_dsd_alloced = 1;
1124                         }
1125
1126                         /* add new list to cmd iocb or last list */
1127                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1128                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1129                         *cur_dsd++ = dsd_list_len;
1130                         cur_dsd = (uint32_t *)next_dsd;
1131                 }
1132                 sle_dma = sg_dma_address(sg);
1133
1134                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1135                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1136                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1137                 avail_dsds--;
1138
1139         }
1140         /* Null termination */
1141         *cur_dsd++ = 0;
1142         *cur_dsd++ = 0;
1143         *cur_dsd++ = 0;
1144         return 0;
1145 }
1146
1147 int
1148 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1149         uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1150 {
1151         void *next_dsd;
1152         uint8_t avail_dsds = 0;
1153         uint32_t dsd_list_len;
1154         struct dsd_dma *dsd_ptr;
1155         struct scatterlist *sg, *sgl;
1156         int     i;
1157         struct scsi_cmnd *cmd;
1158         uint32_t *cur_dsd = dsd;
1159         uint16_t used_dsds = tot_dsds;
1160         struct scsi_qla_host *vha;
1161
1162         if (sp) {
1163                 cmd = GET_CMD_SP(sp);
1164                 sgl = scsi_prot_sglist(cmd);
1165                 vha = sp->fcport->vha;
1166         } else if (tc) {
1167                 vha = tc->vha;
1168                 sgl = tc->prot_sg;
1169         } else {
1170                 BUG();
1171                 return 1;
1172         }
1173
1174         ql_dbg(ql_dbg_tgt, vha, 0xe021,
1175                 "%s: enter\n", __func__);
1176
1177         for_each_sg(sgl, sg, tot_dsds, i) {
1178                 dma_addr_t      sle_dma;
1179
1180                 /* Allocate additional continuation packets? */
1181                 if (avail_dsds == 0) {
1182                         avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1183                                                 QLA_DSDS_PER_IOCB : used_dsds;
1184                         dsd_list_len = (avail_dsds + 1) * 12;
1185                         used_dsds -= avail_dsds;
1186
1187                         /* allocate tracking DS */
1188                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1189                         if (!dsd_ptr)
1190                                 return 1;
1191
1192                         /* allocate new list */
1193                         dsd_ptr->dsd_addr = next_dsd =
1194                             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1195                                 &dsd_ptr->dsd_list_dma);
1196
1197                         if (!next_dsd) {
1198                                 /*
1199                                  * Need to cleanup only this dsd_ptr, rest
1200                                  * will be done by sp_free_dma()
1201                                  */
1202                                 kfree(dsd_ptr);
1203                                 return 1;
1204                         }
1205
1206                         if (sp) {
1207                                 list_add_tail(&dsd_ptr->list,
1208                                     &((struct crc_context *)
1209                                             sp->u.scmd.ctx)->dsd_list);
1210
1211                                 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1212                         } else {
1213                                 list_add_tail(&dsd_ptr->list,
1214                                     &(tc->ctx->dsd_list));
1215                                 tc->ctx_dsd_alloced = 1;
1216                         }
1217
1218                         /* add new list to cmd iocb or last list */
1219                         *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1220                         *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1221                         *cur_dsd++ = dsd_list_len;
1222                         cur_dsd = (uint32_t *)next_dsd;
1223                 }
1224                 sle_dma = sg_dma_address(sg);
1225
1226                 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1227                 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1228                 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1229
1230                 avail_dsds--;
1231         }
1232         /* Null termination */
1233         *cur_dsd++ = 0;
1234         *cur_dsd++ = 0;
1235         *cur_dsd++ = 0;
1236         return 0;
1237 }
1238
1239 /**
1240  * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1241  *                                                      Type 6 IOCB types.
1242  *
1243  * @sp: SRB command to process
1244  * @cmd_pkt: Command type 3 IOCB
1245  * @tot_dsds: Total number of segments to transfer
1246  */
1247 static inline int
1248 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1249     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1250 {
1251         uint32_t                *cur_dsd, *fcp_dl;
1252         scsi_qla_host_t         *vha;
1253         struct scsi_cmnd        *cmd;
1254         int                     sgc;
1255         uint32_t                total_bytes = 0;
1256         uint32_t                data_bytes;
1257         uint32_t                dif_bytes;
1258         uint8_t                 bundling = 1;
1259         uint16_t                blk_size;
1260         uint8_t                 *clr_ptr;
1261         struct crc_context      *crc_ctx_pkt = NULL;
1262         struct qla_hw_data      *ha;
1263         uint8_t                 additional_fcpcdb_len;
1264         uint16_t                fcp_cmnd_len;
1265         struct fcp_cmnd         *fcp_cmnd;
1266         dma_addr_t              crc_ctx_dma;
1267         char                    tag[2];
1268
1269         cmd = GET_CMD_SP(sp);
1270
1271         sgc = 0;
1272         /* Update entry type to indicate Command Type CRC_2 IOCB */
1273         *((uint32_t *)(&cmd_pkt->entry_type)) =
1274             __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
1275
1276         vha = sp->fcport->vha;
1277         ha = vha->hw;
1278
1279         /* No data transfer */
1280         data_bytes = scsi_bufflen(cmd);
1281         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1282                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1283                 return QLA_SUCCESS;
1284         }
1285
1286         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1287
1288         /* Set transfer direction */
1289         if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1290                 cmd_pkt->control_flags =
1291                     __constant_cpu_to_le16(CF_WRITE_DATA);
1292         } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1293                 cmd_pkt->control_flags =
1294                     __constant_cpu_to_le16(CF_READ_DATA);
1295         }
1296
1297         if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1298             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1299             (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1300             (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1301                 bundling = 0;
1302
1303         /* Allocate CRC context from global pool */
1304         crc_ctx_pkt = sp->u.scmd.ctx =
1305             dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1306
1307         if (!crc_ctx_pkt)
1308                 goto crc_queuing_error;
1309
1310         /* Zero out CTX area. */
1311         clr_ptr = (uint8_t *)crc_ctx_pkt;
1312         memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1313
1314         crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1315
1316         sp->flags |= SRB_CRC_CTX_DMA_VALID;
1317
1318         /* Set handle */
1319         crc_ctx_pkt->handle = cmd_pkt->handle;
1320
1321         INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1322
1323         qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1324             &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1325
1326         cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1327         cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1328         cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1329
1330         /* Determine SCSI command length -- align to 4 byte boundary */
1331         if (cmd->cmd_len > 16) {
1332                 additional_fcpcdb_len = cmd->cmd_len - 16;
1333                 if ((cmd->cmd_len % 4) != 0) {
1334                         /* SCSI cmd > 16 bytes must be multiple of 4 */
1335                         goto crc_queuing_error;
1336                 }
1337                 fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1338         } else {
1339                 additional_fcpcdb_len = 0;
1340                 fcp_cmnd_len = 12 + 16 + 4;
1341         }
1342
1343         fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1344
1345         fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1346         if (cmd->sc_data_direction == DMA_TO_DEVICE)
1347                 fcp_cmnd->additional_cdb_len |= 1;
1348         else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1349                 fcp_cmnd->additional_cdb_len |= 2;
1350
1351         int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1352         memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1353         cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1354         cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1355             LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1356         cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1357             MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1358         fcp_cmnd->task_management = 0;
1359
1360         /*
1361          * Update tagged queuing modifier if using command tag queuing
1362          */
1363         if (scsi_populate_tag_msg(cmd, tag)) {
1364                 switch (tag[0]) {
1365                 case HEAD_OF_QUEUE_TAG:
1366                     fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
1367                     break;
1368                 case ORDERED_QUEUE_TAG:
1369                     fcp_cmnd->task_attribute = TSK_ORDERED;
1370                     break;
1371                 default:
1372                     fcp_cmnd->task_attribute = TSK_SIMPLE;
1373                     break;
1374                 }
1375         } else {
1376                 fcp_cmnd->task_attribute = TSK_SIMPLE;
1377         }
1378
1379         cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1380
1381         /* Compute dif len and adjust data len to incude protection */
1382         dif_bytes = 0;
1383         blk_size = cmd->device->sector_size;
1384         dif_bytes = (data_bytes / blk_size) * 8;
1385
1386         switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1387         case SCSI_PROT_READ_INSERT:
1388         case SCSI_PROT_WRITE_STRIP:
1389             total_bytes = data_bytes;
1390             data_bytes += dif_bytes;
1391             break;
1392
1393         case SCSI_PROT_READ_STRIP:
1394         case SCSI_PROT_WRITE_INSERT:
1395         case SCSI_PROT_READ_PASS:
1396         case SCSI_PROT_WRITE_PASS:
1397             total_bytes = data_bytes + dif_bytes;
1398             break;
1399         default:
1400             BUG();
1401         }
1402
1403         if (!qla2x00_hba_err_chk_enabled(sp))
1404                 fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1405         /* HBA error checking enabled */
1406         else if (IS_PI_UNINIT_CAPABLE(ha)) {
1407                 if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1408                     || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1409                         SCSI_PROT_DIF_TYPE2))
1410                         fw_prot_opts |= BIT_10;
1411                 else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1412                     SCSI_PROT_DIF_TYPE3)
1413                         fw_prot_opts |= BIT_11;
1414         }
1415
1416         if (!bundling) {
1417                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1418         } else {
1419                 /*
1420                  * Configure Bundling if we need to fetch interlaving
1421                  * protection PCI accesses
1422                  */
1423                 fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1424                 crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1425                 crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1426                                                         tot_prot_dsds);
1427                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1428         }
1429
1430         /* Finish the common fields of CRC pkt */
1431         crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1432         crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1433         crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1434         crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1435         /* Fibre channel byte count */
1436         cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1437         fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1438             additional_fcpcdb_len);
1439         *fcp_dl = htonl(total_bytes);
1440
1441         if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1442                 cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1443                 return QLA_SUCCESS;
1444         }
1445         /* Walks data segments */
1446
1447         cmd_pkt->control_flags |=
1448             __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1449
1450         if (!bundling && tot_prot_dsds) {
1451                 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1452                         cur_dsd, tot_dsds, NULL))
1453                         goto crc_queuing_error;
1454         } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1455                         (tot_dsds - tot_prot_dsds), NULL))
1456                 goto crc_queuing_error;
1457
1458         if (bundling && tot_prot_dsds) {
1459                 /* Walks dif segments */
1460                 cmd_pkt->control_flags |=
1461                         __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1462                 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1463                 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1464                                 tot_prot_dsds, NULL))
1465                         goto crc_queuing_error;
1466         }
1467         return QLA_SUCCESS;
1468
1469 crc_queuing_error:
1470         /* Cleanup will be performed by the caller */
1471
1472         return QLA_FUNCTION_FAILED;
1473 }
1474
1475 /**
1476  * qla24xx_start_scsi() - Send a SCSI command to the ISP
1477  * @sp: command to send to the ISP
1478  *
1479  * Returns non-zero if a failure occurred, else zero.
1480  */
1481 int
1482 qla24xx_start_scsi(srb_t *sp)
1483 {
1484         int             ret, nseg;
1485         unsigned long   flags;
1486         uint32_t        *clr_ptr;
1487         uint32_t        index;
1488         uint32_t        handle;
1489         struct cmd_type_7 *cmd_pkt;
1490         uint16_t        cnt;
1491         uint16_t        req_cnt;
1492         uint16_t        tot_dsds;
1493         struct req_que *req = NULL;
1494         struct rsp_que *rsp = NULL;
1495         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1496         struct scsi_qla_host *vha = sp->fcport->vha;
1497         struct qla_hw_data *ha = vha->hw;
1498         char            tag[2];
1499
1500         /* Setup device pointers. */
1501         ret = 0;
1502
1503         qla25xx_set_que(sp, &rsp);
1504         req = vha->req;
1505
1506         /* So we know we haven't pci_map'ed anything yet */
1507         tot_dsds = 0;
1508
1509         /* Send marker if required */
1510         if (vha->marker_needed != 0) {
1511                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1512                     QLA_SUCCESS)
1513                         return QLA_FUNCTION_FAILED;
1514                 vha->marker_needed = 0;
1515         }
1516
1517         /* Acquire ring specific lock */
1518         spin_lock_irqsave(&ha->hardware_lock, flags);
1519
1520         /* Check for room in outstanding command list. */
1521         handle = req->current_outstanding_cmd;
1522         for (index = 1; index < req->num_outstanding_cmds; index++) {
1523                 handle++;
1524                 if (handle == req->num_outstanding_cmds)
1525                         handle = 1;
1526                 if (!req->outstanding_cmds[handle])
1527                         break;
1528         }
1529         if (index == req->num_outstanding_cmds)
1530                 goto queuing_error;
1531
1532         /* Map the sg table so we have an accurate count of sg entries needed */
1533         if (scsi_sg_count(cmd)) {
1534                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1535                     scsi_sg_count(cmd), cmd->sc_data_direction);
1536                 if (unlikely(!nseg))
1537                         goto queuing_error;
1538         } else
1539                 nseg = 0;
1540
1541         tot_dsds = nseg;
1542         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1543         if (req->cnt < (req_cnt + 2)) {
1544                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1545                     RD_REG_DWORD_RELAXED(req->req_q_out);
1546                 if (req->ring_index < cnt)
1547                         req->cnt = cnt - req->ring_index;
1548                 else
1549                         req->cnt = req->length -
1550                                 (req->ring_index - cnt);
1551                 if (req->cnt < (req_cnt + 2))
1552                         goto queuing_error;
1553         }
1554
1555         /* Build command packet. */
1556         req->current_outstanding_cmd = handle;
1557         req->outstanding_cmds[handle] = sp;
1558         sp->handle = handle;
1559         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1560         req->cnt -= req_cnt;
1561
1562         cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1563         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1564
1565         /* Zero out remaining portion of packet. */
1566         /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1567         clr_ptr = (uint32_t *)cmd_pkt + 2;
1568         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1569         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1570
1571         /* Set NPORT-ID and LUN number*/
1572         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1573         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1574         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1575         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1576         cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1577
1578         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1579         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1580
1581         /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1582         if (scsi_populate_tag_msg(cmd, tag)) {
1583                 switch (tag[0]) {
1584                 case HEAD_OF_QUEUE_TAG:
1585                         cmd_pkt->task = TSK_HEAD_OF_QUEUE;
1586                         break;
1587                 case ORDERED_QUEUE_TAG:
1588                         cmd_pkt->task = TSK_ORDERED;
1589                         break;
1590                 default:
1591                     cmd_pkt->task = TSK_SIMPLE;
1592                     break;
1593                 }
1594         } else {
1595                 cmd_pkt->task = TSK_SIMPLE;
1596         }
1597
1598         /* Load SCSI command packet. */
1599         memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1600         host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1601
1602         cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1603
1604         /* Build IOCB segments */
1605         qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1606
1607         /* Set total data segment count. */
1608         cmd_pkt->entry_count = (uint8_t)req_cnt;
1609         /* Specify response queue number where completion should happen */
1610         cmd_pkt->entry_status = (uint8_t) rsp->id;
1611         wmb();
1612         /* Adjust ring index. */
1613         req->ring_index++;
1614         if (req->ring_index == req->length) {
1615                 req->ring_index = 0;
1616                 req->ring_ptr = req->ring;
1617         } else
1618                 req->ring_ptr++;
1619
1620         sp->flags |= SRB_DMA_VALID;
1621
1622         /* Set chip new ring index. */
1623         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1624         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1625
1626         /* Manage unprocessed RIO/ZIO commands in response queue. */
1627         if (vha->flags.process_response_queue &&
1628                 rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1629                 qla24xx_process_response_queue(vha, rsp);
1630
1631         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1632         return QLA_SUCCESS;
1633
1634 queuing_error:
1635         if (tot_dsds)
1636                 scsi_dma_unmap(cmd);
1637
1638         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1639
1640         return QLA_FUNCTION_FAILED;
1641 }
1642
1643 /**
1644  * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1645  * @sp: command to send to the ISP
1646  *
1647  * Returns non-zero if a failure occurred, else zero.
1648  */
1649 int
1650 qla24xx_dif_start_scsi(srb_t *sp)
1651 {
1652         int                     nseg;
1653         unsigned long           flags;
1654         uint32_t                *clr_ptr;
1655         uint32_t                index;
1656         uint32_t                handle;
1657         uint16_t                cnt;
1658         uint16_t                req_cnt = 0;
1659         uint16_t                tot_dsds;
1660         uint16_t                tot_prot_dsds;
1661         uint16_t                fw_prot_opts = 0;
1662         struct req_que          *req = NULL;
1663         struct rsp_que          *rsp = NULL;
1664         struct scsi_cmnd        *cmd = GET_CMD_SP(sp);
1665         struct scsi_qla_host    *vha = sp->fcport->vha;
1666         struct qla_hw_data      *ha = vha->hw;
1667         struct cmd_type_crc_2   *cmd_pkt;
1668         uint32_t                status = 0;
1669
1670 #define QDSS_GOT_Q_SPACE        BIT_0
1671
1672         /* Only process protection or >16 cdb in this routine */
1673         if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1674                 if (cmd->cmd_len <= 16)
1675                         return qla24xx_start_scsi(sp);
1676         }
1677
1678         /* Setup device pointers. */
1679
1680         qla25xx_set_que(sp, &rsp);
1681         req = vha->req;
1682
1683         /* So we know we haven't pci_map'ed anything yet */
1684         tot_dsds = 0;
1685
1686         /* Send marker if required */
1687         if (vha->marker_needed != 0) {
1688                 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1689                     QLA_SUCCESS)
1690                         return QLA_FUNCTION_FAILED;
1691                 vha->marker_needed = 0;
1692         }
1693
1694         /* Acquire ring specific lock */
1695         spin_lock_irqsave(&ha->hardware_lock, flags);
1696
1697         /* Check for room in outstanding command list. */
1698         handle = req->current_outstanding_cmd;
1699         for (index = 1; index < req->num_outstanding_cmds; index++) {
1700                 handle++;
1701                 if (handle == req->num_outstanding_cmds)
1702                         handle = 1;
1703                 if (!req->outstanding_cmds[handle])
1704                         break;
1705         }
1706
1707         if (index == req->num_outstanding_cmds)
1708                 goto queuing_error;
1709
1710         /* Compute number of required data segments */
1711         /* Map the sg table so we have an accurate count of sg entries needed */
1712         if (scsi_sg_count(cmd)) {
1713                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1714                     scsi_sg_count(cmd), cmd->sc_data_direction);
1715                 if (unlikely(!nseg))
1716                         goto queuing_error;
1717                 else
1718                         sp->flags |= SRB_DMA_VALID;
1719
1720                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1721                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1722                         struct qla2_sgx sgx;
1723                         uint32_t        partial;
1724
1725                         memset(&sgx, 0, sizeof(struct qla2_sgx));
1726                         sgx.tot_bytes = scsi_bufflen(cmd);
1727                         sgx.cur_sg = scsi_sglist(cmd);
1728                         sgx.sp = sp;
1729
1730                         nseg = 0;
1731                         while (qla24xx_get_one_block_sg(
1732                             cmd->device->sector_size, &sgx, &partial))
1733                                 nseg++;
1734                 }
1735         } else
1736                 nseg = 0;
1737
1738         /* number of required data segments */
1739         tot_dsds = nseg;
1740
1741         /* Compute number of required protection segments */
1742         if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1743                 nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1744                     scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1745                 if (unlikely(!nseg))
1746                         goto queuing_error;
1747                 else
1748                         sp->flags |= SRB_CRC_PROT_DMA_VALID;
1749
1750                 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1751                     (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1752                         nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1753                 }
1754         } else {
1755                 nseg = 0;
1756         }
1757
1758         req_cnt = 1;
1759         /* Total Data and protection sg segment(s) */
1760         tot_prot_dsds = nseg;
1761         tot_dsds += nseg;
1762         if (req->cnt < (req_cnt + 2)) {
1763                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1764                     RD_REG_DWORD_RELAXED(req->req_q_out);
1765                 if (req->ring_index < cnt)
1766                         req->cnt = cnt - req->ring_index;
1767                 else
1768                         req->cnt = req->length -
1769                                 (req->ring_index - cnt);
1770                 if (req->cnt < (req_cnt + 2))
1771                         goto queuing_error;
1772         }
1773
1774         status |= QDSS_GOT_Q_SPACE;
1775
1776         /* Build header part of command packet (excluding the OPCODE). */
1777         req->current_outstanding_cmd = handle;
1778         req->outstanding_cmds[handle] = sp;
1779         sp->handle = handle;
1780         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1781         req->cnt -= req_cnt;
1782
1783         /* Fill-in common area */
1784         cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1785         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1786
1787         clr_ptr = (uint32_t *)cmd_pkt + 2;
1788         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1789
1790         /* Set NPORT-ID and LUN number*/
1791         cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1792         cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1793         cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1794         cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1795
1796         int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1797         host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1798
1799         /* Total Data and protection segment(s) */
1800         cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1801
1802         /* Build IOCB segments and adjust for data protection segments */
1803         if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1804             req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1805                 QLA_SUCCESS)
1806                 goto queuing_error;
1807
1808         cmd_pkt->entry_count = (uint8_t)req_cnt;
1809         /* Specify response queue number where completion should happen */
1810         cmd_pkt->entry_status = (uint8_t) rsp->id;
1811         cmd_pkt->timeout = __constant_cpu_to_le16(0);
1812         wmb();
1813
1814         /* Adjust ring index. */
1815         req->ring_index++;
1816         if (req->ring_index == req->length) {
1817                 req->ring_index = 0;
1818                 req->ring_ptr = req->ring;
1819         } else
1820                 req->ring_ptr++;
1821
1822         /* Set chip new ring index. */
1823         WRT_REG_DWORD(req->req_q_in, req->ring_index);
1824         RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1825
1826         /* Manage unprocessed RIO/ZIO commands in response queue. */
1827         if (vha->flags.process_response_queue &&
1828             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1829                 qla24xx_process_response_queue(vha, rsp);
1830
1831         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1832
1833         return QLA_SUCCESS;
1834
1835 queuing_error:
1836         if (status & QDSS_GOT_Q_SPACE) {
1837                 req->outstanding_cmds[handle] = NULL;
1838                 req->cnt += req_cnt;
1839         }
1840         /* Cleanup will be performed by the caller (queuecommand) */
1841
1842         spin_unlock_irqrestore(&ha->hardware_lock, flags);
1843         return QLA_FUNCTION_FAILED;
1844 }
1845
1846
1847 static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
1848 {
1849         struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1850         struct qla_hw_data *ha = sp->fcport->vha->hw;
1851         int affinity = cmd->request->cpu;
1852
1853         if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
1854                 affinity < ha->max_rsp_queues - 1)
1855                 *rsp = ha->rsp_q_map[affinity + 1];
1856          else
1857                 *rsp = ha->rsp_q_map[0];
1858 }
1859
1860 /* Generic Control-SRB manipulation functions. */
1861 void *
1862 qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1863 {
1864         struct qla_hw_data *ha = vha->hw;
1865         struct req_que *req = ha->req_q_map[0];
1866         device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1867         uint32_t index, handle;
1868         request_t *pkt;
1869         uint16_t cnt, req_cnt;
1870
1871         pkt = NULL;
1872         req_cnt = 1;
1873         handle = 0;
1874
1875         if (!sp)
1876                 goto skip_cmd_array;
1877
1878         /* Check for room in outstanding command list. */
1879         handle = req->current_outstanding_cmd;
1880         for (index = 1; index < req->num_outstanding_cmds; index++) {
1881                 handle++;
1882                 if (handle == req->num_outstanding_cmds)
1883                         handle = 1;
1884                 if (!req->outstanding_cmds[handle])
1885                         break;
1886         }
1887         if (index == req->num_outstanding_cmds) {
1888                 ql_log(ql_log_warn, vha, 0x700b,
1889                     "No room on outstanding cmd array.\n");
1890                 goto queuing_error;
1891         }
1892
1893         /* Prep command array. */
1894         req->current_outstanding_cmd = handle;
1895         req->outstanding_cmds[handle] = sp;
1896         sp->handle = handle;
1897
1898         /* Adjust entry-counts as needed. */
1899         if (sp->type != SRB_SCSI_CMD)
1900                 req_cnt = sp->iocbs;
1901
1902 skip_cmd_array:
1903         /* Check for room on request queue. */
1904         if (req->cnt < req_cnt) {
1905                 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
1906                         cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
1907                 else if (IS_P3P_TYPE(ha))
1908                         cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
1909                 else if (IS_FWI2_CAPABLE(ha))
1910                         cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
1911                 else if (IS_QLAFX00(ha))
1912                         cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
1913                 else
1914                         cnt = qla2x00_debounce_register(
1915                             ISP_REQ_Q_OUT(ha, &reg->isp));
1916
1917                 if  (req->ring_index < cnt)
1918                         req->cnt = cnt - req->ring_index;
1919                 else
1920                         req->cnt = req->length -
1921                             (req->ring_index - cnt);
1922         }
1923         if (req->cnt < req_cnt)
1924                 goto queuing_error;
1925
1926         /* Prep packet */
1927         req->cnt -= req_cnt;
1928         pkt = req->ring_ptr;
1929         memset(pkt, 0, REQUEST_ENTRY_SIZE);
1930         if (IS_QLAFX00(ha)) {
1931                 WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
1932                 WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
1933         } else {
1934                 pkt->entry_count = req_cnt;
1935                 pkt->handle = handle;
1936         }
1937
1938 queuing_error:
1939         return pkt;
1940 }
1941
1942 static void
1943 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1944 {
1945         struct srb_iocb *lio = &sp->u.iocb_cmd;
1946
1947         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1948         logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
1949         if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
1950                 logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
1951         if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
1952                 logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1953         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1954         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1955         logio->port_id[1] = sp->fcport->d_id.b.area;
1956         logio->port_id[2] = sp->fcport->d_id.b.domain;
1957         logio->vp_index = sp->fcport->vha->vp_idx;
1958 }
1959
1960 static void
1961 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1962 {
1963         struct qla_hw_data *ha = sp->fcport->vha->hw;
1964         struct srb_iocb *lio = &sp->u.iocb_cmd;
1965         uint16_t opts;
1966
1967         mbx->entry_type = MBX_IOCB_TYPE;
1968         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1969         mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
1970         opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1971         opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
1972         if (HAS_EXTENDED_IDS(ha)) {
1973                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1974                 mbx->mb10 = cpu_to_le16(opts);
1975         } else {
1976                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1977         }
1978         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1979         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1980             sp->fcport->d_id.b.al_pa);
1981         mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1982 }
1983
1984 static void
1985 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1986 {
1987         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1988         logio->control_flags =
1989             cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1990         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1991         logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1992         logio->port_id[1] = sp->fcport->d_id.b.area;
1993         logio->port_id[2] = sp->fcport->d_id.b.domain;
1994         logio->vp_index = sp->fcport->vha->vp_idx;
1995 }
1996
1997 static void
1998 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1999 {
2000         struct qla_hw_data *ha = sp->fcport->vha->hw;
2001
2002         mbx->entry_type = MBX_IOCB_TYPE;
2003         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2004         mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2005         mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2006             cpu_to_le16(sp->fcport->loop_id):
2007             cpu_to_le16(sp->fcport->loop_id << 8);
2008         mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2009         mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2010             sp->fcport->d_id.b.al_pa);
2011         mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
2012         /* Implicit: mbx->mbx10 = 0. */
2013 }
2014
2015 static void
2016 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2017 {
2018         logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2019         logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2020         logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2021         logio->vp_index = sp->fcport->vha->vp_idx;
2022 }
2023
2024 static void
2025 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2026 {
2027         struct qla_hw_data *ha = sp->fcport->vha->hw;
2028
2029         mbx->entry_type = MBX_IOCB_TYPE;
2030         SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2031         mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2032         if (HAS_EXTENDED_IDS(ha)) {
2033                 mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2034                 mbx->mb10 = cpu_to_le16(BIT_0);
2035         } else {
2036                 mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2037         }
2038         mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2039         mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2040         mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2041         mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2042         mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
2043 }
2044
2045 static void
2046 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2047 {
2048         uint32_t flags;
2049         uint64_t lun;
2050         struct fc_port *fcport = sp->fcport;
2051         scsi_qla_host_t *vha = fcport->vha;
2052         struct qla_hw_data *ha = vha->hw;
2053         struct srb_iocb *iocb = &sp->u.iocb_cmd;
2054         struct req_que *req = vha->req;
2055
2056         flags = iocb->u.tmf.flags;
2057         lun = iocb->u.tmf.lun;
2058
2059         tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2060         tsk->entry_count = 1;
2061         tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2062         tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2063         tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2064         tsk->control_flags = cpu_to_le32(flags);
2065         tsk->port_id[0] = fcport->d_id.b.al_pa;
2066         tsk->port_id[1] = fcport->d_id.b.area;
2067         tsk->port_id[2] = fcport->d_id.b.domain;
2068         tsk->vp_index = fcport->vha->vp_idx;
2069
2070         if (flags == TCF_LUN_RESET) {
2071                 int_to_scsilun(lun, &tsk->lun);
2072                 host_to_fcp_swap((uint8_t *)&tsk->lun,
2073                         sizeof(tsk->lun));
2074         }
2075 }
2076
2077 static void
2078 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2079 {
2080         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2081
2082         els_iocb->entry_type = ELS_IOCB_TYPE;
2083         els_iocb->entry_count = 1;
2084         els_iocb->sys_define = 0;
2085         els_iocb->entry_status = 0;
2086         els_iocb->handle = sp->handle;
2087         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2088         els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2089         els_iocb->vp_index = sp->fcport->vha->vp_idx;
2090         els_iocb->sof_type = EST_SOFI3;
2091         els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2092
2093         els_iocb->opcode =
2094             sp->type == SRB_ELS_CMD_RPT ?
2095             bsg_job->request->rqst_data.r_els.els_code :
2096             bsg_job->request->rqst_data.h_els.command_code;
2097         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2098         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2099         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2100         els_iocb->control_flags = 0;
2101         els_iocb->rx_byte_count =
2102             cpu_to_le32(bsg_job->reply_payload.payload_len);
2103         els_iocb->tx_byte_count =
2104             cpu_to_le32(bsg_job->request_payload.payload_len);
2105
2106         els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2107             (bsg_job->request_payload.sg_list)));
2108         els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2109             (bsg_job->request_payload.sg_list)));
2110         els_iocb->tx_len = cpu_to_le32(sg_dma_len
2111             (bsg_job->request_payload.sg_list));
2112
2113         els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2114             (bsg_job->reply_payload.sg_list)));
2115         els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2116             (bsg_job->reply_payload.sg_list)));
2117         els_iocb->rx_len = cpu_to_le32(sg_dma_len
2118             (bsg_job->reply_payload.sg_list));
2119
2120         sp->fcport->vha->qla_stats.control_requests++;
2121 }
2122
2123 static void
2124 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2125 {
2126         uint16_t        avail_dsds;
2127         uint32_t        *cur_dsd;
2128         struct scatterlist *sg;
2129         int index;
2130         uint16_t tot_dsds;
2131         scsi_qla_host_t *vha = sp->fcport->vha;
2132         struct qla_hw_data *ha = vha->hw;
2133         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2134         int loop_iterartion = 0;
2135         int cont_iocb_prsnt = 0;
2136         int entry_count = 1;
2137
2138         memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2139         ct_iocb->entry_type = CT_IOCB_TYPE;
2140         ct_iocb->entry_status = 0;
2141         ct_iocb->handle1 = sp->handle;
2142         SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2143         ct_iocb->status = __constant_cpu_to_le16(0);
2144         ct_iocb->control_flags = __constant_cpu_to_le16(0);
2145         ct_iocb->timeout = 0;
2146         ct_iocb->cmd_dsd_count =
2147             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2148         ct_iocb->total_dsd_count =
2149             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2150         ct_iocb->req_bytecount =
2151             cpu_to_le32(bsg_job->request_payload.payload_len);
2152         ct_iocb->rsp_bytecount =
2153             cpu_to_le32(bsg_job->reply_payload.payload_len);
2154
2155         ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2156             (bsg_job->request_payload.sg_list)));
2157         ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2158             (bsg_job->request_payload.sg_list)));
2159         ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2160
2161         ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2162             (bsg_job->reply_payload.sg_list)));
2163         ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2164             (bsg_job->reply_payload.sg_list)));
2165         ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2166
2167         avail_dsds = 1;
2168         cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2169         index = 0;
2170         tot_dsds = bsg_job->reply_payload.sg_cnt;
2171
2172         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2173                 dma_addr_t       sle_dma;
2174                 cont_a64_entry_t *cont_pkt;
2175
2176                 /* Allocate additional continuation packets? */
2177                 if (avail_dsds == 0) {
2178                         /*
2179                         * Five DSDs are available in the Cont.
2180                         * Type 1 IOCB.
2181                                */
2182                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2183                             vha->hw->req_q_map[0]);
2184                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2185                         avail_dsds = 5;
2186                         cont_iocb_prsnt = 1;
2187                         entry_count++;
2188                 }
2189
2190                 sle_dma = sg_dma_address(sg);
2191                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2192                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2193                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2194                 loop_iterartion++;
2195                 avail_dsds--;
2196         }
2197         ct_iocb->entry_count = entry_count;
2198
2199         sp->fcport->vha->qla_stats.control_requests++;
2200 }
2201
2202 static void
2203 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2204 {
2205         uint16_t        avail_dsds;
2206         uint32_t        *cur_dsd;
2207         struct scatterlist *sg;
2208         int index;
2209         uint16_t tot_dsds;
2210         scsi_qla_host_t *vha = sp->fcport->vha;
2211         struct qla_hw_data *ha = vha->hw;
2212         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2213         int loop_iterartion = 0;
2214         int cont_iocb_prsnt = 0;
2215         int entry_count = 1;
2216
2217         ct_iocb->entry_type = CT_IOCB_TYPE;
2218         ct_iocb->entry_status = 0;
2219         ct_iocb->sys_define = 0;
2220         ct_iocb->handle = sp->handle;
2221
2222         ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2223         ct_iocb->vp_index = sp->fcport->vha->vp_idx;
2224         ct_iocb->comp_status = __constant_cpu_to_le16(0);
2225
2226         ct_iocb->cmd_dsd_count =
2227             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2228         ct_iocb->timeout = 0;
2229         ct_iocb->rsp_dsd_count =
2230             __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2231         ct_iocb->rsp_byte_count =
2232             cpu_to_le32(bsg_job->reply_payload.payload_len);
2233         ct_iocb->cmd_byte_count =
2234             cpu_to_le32(bsg_job->request_payload.payload_len);
2235         ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2236             (bsg_job->request_payload.sg_list)));
2237         ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2238            (bsg_job->request_payload.sg_list)));
2239         ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2240             (bsg_job->request_payload.sg_list));
2241
2242         avail_dsds = 1;
2243         cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2244         index = 0;
2245         tot_dsds = bsg_job->reply_payload.sg_cnt;
2246
2247         for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2248                 dma_addr_t       sle_dma;
2249                 cont_a64_entry_t *cont_pkt;
2250
2251                 /* Allocate additional continuation packets? */
2252                 if (avail_dsds == 0) {
2253                         /*
2254                         * Five DSDs are available in the Cont.
2255                         * Type 1 IOCB.
2256                                */
2257                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2258                             ha->req_q_map[0]);
2259                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2260                         avail_dsds = 5;
2261                         cont_iocb_prsnt = 1;
2262                         entry_count++;
2263                 }
2264
2265                 sle_dma = sg_dma_address(sg);
2266                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2267                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2268                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2269                 loop_iterartion++;
2270                 avail_dsds--;
2271         }
2272         ct_iocb->entry_count = entry_count;
2273 }
2274
2275 /*
2276  * qla82xx_start_scsi() - Send a SCSI command to the ISP
2277  * @sp: command to send to the ISP
2278  *
2279  * Returns non-zero if a failure occurred, else zero.
2280  */
2281 int
2282 qla82xx_start_scsi(srb_t *sp)
2283 {
2284         int             ret, nseg;
2285         unsigned long   flags;
2286         struct scsi_cmnd *cmd;
2287         uint32_t        *clr_ptr;
2288         uint32_t        index;
2289         uint32_t        handle;
2290         uint16_t        cnt;
2291         uint16_t        req_cnt;
2292         uint16_t        tot_dsds;
2293         struct device_reg_82xx __iomem *reg;
2294         uint32_t dbval;
2295         uint32_t *fcp_dl;
2296         uint8_t additional_cdb_len;
2297         struct ct6_dsd *ctx;
2298         struct scsi_qla_host *vha = sp->fcport->vha;
2299         struct qla_hw_data *ha = vha->hw;
2300         struct req_que *req = NULL;
2301         struct rsp_que *rsp = NULL;
2302         char tag[2];
2303
2304         /* Setup device pointers. */
2305         ret = 0;
2306         reg = &ha->iobase->isp82;
2307         cmd = GET_CMD_SP(sp);
2308         req = vha->req;
2309         rsp = ha->rsp_q_map[0];
2310
2311         /* So we know we haven't pci_map'ed anything yet */
2312         tot_dsds = 0;
2313
2314         dbval = 0x04 | (ha->portnum << 5);
2315
2316         /* Send marker if required */
2317         if (vha->marker_needed != 0) {
2318                 if (qla2x00_marker(vha, req,
2319                         rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2320                         ql_log(ql_log_warn, vha, 0x300c,
2321                             "qla2x00_marker failed for cmd=%p.\n", cmd);
2322                         return QLA_FUNCTION_FAILED;
2323                 }
2324                 vha->marker_needed = 0;
2325         }
2326
2327         /* Acquire ring specific lock */
2328         spin_lock_irqsave(&ha->hardware_lock, flags);
2329
2330         /* Check for room in outstanding command list. */
2331         handle = req->current_outstanding_cmd;
2332         for (index = 1; index < req->num_outstanding_cmds; index++) {
2333                 handle++;
2334                 if (handle == req->num_outstanding_cmds)
2335                         handle = 1;
2336                 if (!req->outstanding_cmds[handle])
2337                         break;
2338         }
2339         if (index == req->num_outstanding_cmds)
2340                 goto queuing_error;
2341
2342         /* Map the sg table so we have an accurate count of sg entries needed */
2343         if (scsi_sg_count(cmd)) {
2344                 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2345                     scsi_sg_count(cmd), cmd->sc_data_direction);
2346                 if (unlikely(!nseg))
2347                         goto queuing_error;
2348         } else
2349                 nseg = 0;
2350
2351         tot_dsds = nseg;
2352
2353         if (tot_dsds > ql2xshiftctondsd) {
2354                 struct cmd_type_6 *cmd_pkt;
2355                 uint16_t more_dsd_lists = 0;
2356                 struct dsd_dma *dsd_ptr;
2357                 uint16_t i;
2358
2359                 more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2360                 if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2361                         ql_dbg(ql_dbg_io, vha, 0x300d,
2362                             "Num of DSD list %d is than %d for cmd=%p.\n",
2363                             more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2364                             cmd);
2365                         goto queuing_error;
2366                 }
2367
2368                 if (more_dsd_lists <= ha->gbl_dsd_avail)
2369                         goto sufficient_dsds;
2370                 else
2371                         more_dsd_lists -= ha->gbl_dsd_avail;
2372
2373                 for (i = 0; i < more_dsd_lists; i++) {
2374                         dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2375                         if (!dsd_ptr) {
2376                                 ql_log(ql_log_fatal, vha, 0x300e,
2377                                     "Failed to allocate memory for dsd_dma "
2378                                     "for cmd=%p.\n", cmd);
2379                                 goto queuing_error;
2380                         }
2381
2382                         dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2383                                 GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2384                         if (!dsd_ptr->dsd_addr) {
2385                                 kfree(dsd_ptr);
2386                                 ql_log(ql_log_fatal, vha, 0x300f,
2387                                     "Failed to allocate memory for dsd_addr "
2388                                     "for cmd=%p.\n", cmd);
2389                                 goto queuing_error;
2390                         }
2391                         list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2392                         ha->gbl_dsd_avail++;
2393                 }
2394
2395 sufficient_dsds:
2396                 req_cnt = 1;
2397
2398                 if (req->cnt < (req_cnt + 2)) {
2399                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2400                                 &reg->req_q_out[0]);
2401                         if (req->ring_index < cnt)
2402                                 req->cnt = cnt - req->ring_index;
2403                         else
2404                                 req->cnt = req->length -
2405                                         (req->ring_index - cnt);
2406                         if (req->cnt < (req_cnt + 2))
2407                                 goto queuing_error;
2408                 }
2409
2410                 ctx = sp->u.scmd.ctx =
2411                     mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2412                 if (!ctx) {
2413                         ql_log(ql_log_fatal, vha, 0x3010,
2414                             "Failed to allocate ctx for cmd=%p.\n", cmd);
2415                         goto queuing_error;
2416                 }
2417
2418                 memset(ctx, 0, sizeof(struct ct6_dsd));
2419                 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2420                         GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2421                 if (!ctx->fcp_cmnd) {
2422                         ql_log(ql_log_fatal, vha, 0x3011,
2423                             "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2424                         goto queuing_error;
2425                 }
2426
2427                 /* Initialize the DSD list and dma handle */
2428                 INIT_LIST_HEAD(&ctx->dsd_list);
2429                 ctx->dsd_use_cnt = 0;
2430
2431                 if (cmd->cmd_len > 16) {
2432                         additional_cdb_len = cmd->cmd_len - 16;
2433                         if ((cmd->cmd_len % 4) != 0) {
2434                                 /* SCSI command bigger than 16 bytes must be
2435                                  * multiple of 4
2436                                  */
2437                                 ql_log(ql_log_warn, vha, 0x3012,
2438                                     "scsi cmd len %d not multiple of 4 "
2439                                     "for cmd=%p.\n", cmd->cmd_len, cmd);
2440                                 goto queuing_error_fcp_cmnd;
2441                         }
2442                         ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2443                 } else {
2444                         additional_cdb_len = 0;
2445                         ctx->fcp_cmnd_len = 12 + 16 + 4;
2446                 }
2447
2448                 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2449                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2450
2451                 /* Zero out remaining portion of packet. */
2452                 /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
2453                 clr_ptr = (uint32_t *)cmd_pkt + 2;
2454                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2455                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2456
2457                 /* Set NPORT-ID and LUN number*/
2458                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2459                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2460                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2461                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2462                 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2463
2464                 /* Build IOCB segments */
2465                 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2466                         goto queuing_error_fcp_cmnd;
2467
2468                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2469                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2470
2471                 /* build FCP_CMND IU */
2472                 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2473                 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
2474                 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2475
2476                 if (cmd->sc_data_direction == DMA_TO_DEVICE)
2477                         ctx->fcp_cmnd->additional_cdb_len |= 1;
2478                 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2479                         ctx->fcp_cmnd->additional_cdb_len |= 2;
2480
2481                 /*
2482                  * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2483                  */
2484                 if (scsi_populate_tag_msg(cmd, tag)) {
2485                         switch (tag[0]) {
2486                         case HEAD_OF_QUEUE_TAG:
2487                                 ctx->fcp_cmnd->task_attribute =
2488                                     TSK_HEAD_OF_QUEUE;
2489                                 break;
2490                         case ORDERED_QUEUE_TAG:
2491                                 ctx->fcp_cmnd->task_attribute =
2492                                     TSK_ORDERED;
2493                                 break;
2494                         }
2495                 }
2496
2497                 /* Populate the FCP_PRIO. */
2498                 if (ha->flags.fcp_prio_enabled)
2499                         ctx->fcp_cmnd->task_attribute |=
2500                             sp->fcport->fcp_prio << 3;
2501
2502                 memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2503
2504                 fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2505                     additional_cdb_len);
2506                 *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2507
2508                 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2509                 cmd_pkt->fcp_cmnd_dseg_address[0] =
2510                     cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2511                 cmd_pkt->fcp_cmnd_dseg_address[1] =
2512                     cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2513
2514                 sp->flags |= SRB_FCP_CMND_DMA_VALID;
2515                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2516                 /* Set total data segment count. */
2517                 cmd_pkt->entry_count = (uint8_t)req_cnt;
2518                 /* Specify response queue number where
2519                  * completion should happen
2520                  */
2521                 cmd_pkt->entry_status = (uint8_t) rsp->id;
2522         } else {
2523                 struct cmd_type_7 *cmd_pkt;
2524                 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2525                 if (req->cnt < (req_cnt + 2)) {
2526                         cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2527                             &reg->req_q_out[0]);
2528                         if (req->ring_index < cnt)
2529                                 req->cnt = cnt - req->ring_index;
2530                         else
2531                                 req->cnt = req->length -
2532                                         (req->ring_index - cnt);
2533                 }
2534                 if (req->cnt < (req_cnt + 2))
2535                         goto queuing_error;
2536
2537                 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2538                 cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2539
2540                 /* Zero out remaining portion of packet. */
2541                 /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2542                 clr_ptr = (uint32_t *)cmd_pkt + 2;
2543                 memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2544                 cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2545
2546                 /* Set NPORT-ID and LUN number*/
2547                 cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2548                 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2549                 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2550                 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2551                 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2552
2553                 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2554                 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2555                     sizeof(cmd_pkt->lun));
2556
2557                 /*
2558                  * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2559                  */
2560                 if (scsi_populate_tag_msg(cmd, tag)) {
2561                         switch (tag[0]) {
2562                         case HEAD_OF_QUEUE_TAG:
2563                                 cmd_pkt->task = TSK_HEAD_OF_QUEUE;
2564                                 break;
2565                         case ORDERED_QUEUE_TAG:
2566                                 cmd_pkt->task = TSK_ORDERED;
2567                                 break;
2568                         }
2569                 }
2570
2571                 /* Populate the FCP_PRIO. */
2572                 if (ha->flags.fcp_prio_enabled)
2573                         cmd_pkt->task |= sp->fcport->fcp_prio << 3;
2574
2575                 /* Load SCSI command packet. */
2576                 memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2577                 host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2578
2579                 cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2580
2581                 /* Build IOCB segments */
2582                 qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2583
2584                 /* Set total data segment count. */
2585                 cmd_pkt->entry_count = (uint8_t)req_cnt;
2586                 /* Specify response queue number where
2587                  * completion should happen.
2588                  */
2589                 cmd_pkt->entry_status = (uint8_t) rsp->id;
2590
2591         }
2592         /* Build command packet. */
2593         req->current_outstanding_cmd = handle;
2594         req->outstanding_cmds[handle] = sp;
2595         sp->handle = handle;
2596         cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2597         req->cnt -= req_cnt;
2598         wmb();
2599
2600         /* Adjust ring index. */
2601         req->ring_index++;
2602         if (req->ring_index == req->length) {
2603                 req->ring_index = 0;
2604                 req->ring_ptr = req->ring;
2605         } else
2606                 req->ring_ptr++;
2607
2608         sp->flags |= SRB_DMA_VALID;
2609
2610         /* Set chip new ring index. */
2611         /* write, read and verify logic */
2612         dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2613         if (ql2xdbwr)
2614                 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2615         else {
2616                 WRT_REG_DWORD(
2617                         (unsigned long __iomem *)ha->nxdb_wr_ptr,
2618                         dbval);
2619                 wmb();
2620                 while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) {
2621                         WRT_REG_DWORD(
2622                                 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2623                                 dbval);
2624                         wmb();
2625                 }
2626         }
2627
2628         /* Manage unprocessed RIO/ZIO commands in response queue. */
2629         if (vha->flags.process_response_queue &&
2630             rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2631                 qla24xx_process_response_queue(vha, rsp);
2632
2633         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2634         return QLA_SUCCESS;
2635
2636 queuing_error_fcp_cmnd:
2637         dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2638 queuing_error:
2639         if (tot_dsds)
2640                 scsi_dma_unmap(cmd);
2641
2642         if (sp->u.scmd.ctx) {
2643                 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
2644                 sp->u.scmd.ctx = NULL;
2645         }
2646         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2647
2648         return QLA_FUNCTION_FAILED;
2649 }
2650
2651 void
2652 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
2653 {
2654         struct srb_iocb *aio = &sp->u.iocb_cmd;
2655         scsi_qla_host_t *vha = sp->fcport->vha;
2656         struct req_que *req = vha->req;
2657
2658         memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
2659         abt_iocb->entry_type = ABORT_IOCB_TYPE;
2660         abt_iocb->entry_count = 1;
2661         abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
2662         abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2663         abt_iocb->handle_to_abort =
2664             cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl));
2665         abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2666         abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
2667         abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2668         abt_iocb->vp_index = vha->vp_idx;
2669         abt_iocb->req_que_no = cpu_to_le16(req->id);
2670         /* Send the command to the firmware */
2671         wmb();
2672 }
2673
2674 int
2675 qla2x00_start_sp(srb_t *sp)
2676 {
2677         int rval;
2678         struct qla_hw_data *ha = sp->fcport->vha->hw;
2679         void *pkt;
2680         unsigned long flags;
2681
2682         rval = QLA_FUNCTION_FAILED;
2683         spin_lock_irqsave(&ha->hardware_lock, flags);
2684         pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
2685         if (!pkt) {
2686                 ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
2687                     "qla2x00_alloc_iocbs failed.\n");
2688                 goto done;
2689         }
2690
2691         rval = QLA_SUCCESS;
2692         switch (sp->type) {
2693         case SRB_LOGIN_CMD:
2694                 IS_FWI2_CAPABLE(ha) ?
2695                     qla24xx_login_iocb(sp, pkt) :
2696                     qla2x00_login_iocb(sp, pkt);
2697                 break;
2698         case SRB_LOGOUT_CMD:
2699                 IS_FWI2_CAPABLE(ha) ?
2700                     qla24xx_logout_iocb(sp, pkt) :
2701                     qla2x00_logout_iocb(sp, pkt);
2702                 break;
2703         case SRB_ELS_CMD_RPT:
2704         case SRB_ELS_CMD_HST:
2705                 qla24xx_els_iocb(sp, pkt);
2706                 break;
2707         case SRB_CT_CMD:
2708                 IS_FWI2_CAPABLE(ha) ?
2709                     qla24xx_ct_iocb(sp, pkt) :
2710                     qla2x00_ct_iocb(sp, pkt);
2711                 break;
2712         case SRB_ADISC_CMD:
2713                 IS_FWI2_CAPABLE(ha) ?
2714                     qla24xx_adisc_iocb(sp, pkt) :
2715                     qla2x00_adisc_iocb(sp, pkt);
2716                 break;
2717         case SRB_TM_CMD:
2718                 IS_QLAFX00(ha) ?
2719                     qlafx00_tm_iocb(sp, pkt) :
2720                     qla24xx_tm_iocb(sp, pkt);
2721                 break;
2722         case SRB_FXIOCB_DCMD:
2723         case SRB_FXIOCB_BCMD:
2724                 qlafx00_fxdisc_iocb(sp, pkt);
2725                 break;
2726         case SRB_ABT_CMD:
2727                 IS_QLAFX00(ha) ?
2728                         qlafx00_abort_iocb(sp, pkt) :
2729                         qla24xx_abort_iocb(sp, pkt);
2730                 break;
2731         default:
2732                 break;
2733         }
2734
2735         wmb();
2736         qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
2737 done:
2738         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2739         return rval;
2740 }
2741
2742 static void
2743 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
2744                                 struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
2745 {
2746         uint16_t avail_dsds;
2747         uint32_t *cur_dsd;
2748         uint32_t req_data_len = 0;
2749         uint32_t rsp_data_len = 0;
2750         struct scatterlist *sg;
2751         int index;
2752         int entry_count = 1;
2753         struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2754
2755         /*Update entry type to indicate bidir command */
2756         *((uint32_t *)(&cmd_pkt->entry_type)) =
2757                 __constant_cpu_to_le32(COMMAND_BIDIRECTIONAL);
2758
2759         /* Set the transfer direction, in this set both flags
2760          * Also set the BD_WRAP_BACK flag, firmware will take care
2761          * assigning DID=SID for outgoing pkts.
2762          */
2763         cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2764         cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2765         cmd_pkt->control_flags =
2766                         __constant_cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
2767                                                         BD_WRAP_BACK);
2768
2769         req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
2770         cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
2771         cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
2772         cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
2773
2774         vha->bidi_stats.transfer_bytes += req_data_len;
2775         vha->bidi_stats.io_count++;
2776
2777         vha->qla_stats.output_bytes += req_data_len;
2778         vha->qla_stats.output_requests++;
2779
2780         /* Only one dsd is available for bidirectional IOCB, remaining dsds
2781          * are bundled in continuation iocb
2782          */
2783         avail_dsds = 1;
2784         cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2785
2786         index = 0;
2787
2788         for_each_sg(bsg_job->request_payload.sg_list, sg,
2789                                 bsg_job->request_payload.sg_cnt, index) {
2790                 dma_addr_t sle_dma;
2791                 cont_a64_entry_t *cont_pkt;
2792
2793                 /* Allocate additional continuation packets */
2794                 if (avail_dsds == 0) {
2795                         /* Continuation type 1 IOCB can accomodate
2796                          * 5 DSDS
2797                          */
2798                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2799                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2800                         avail_dsds = 5;
2801                         entry_count++;
2802                 }
2803                 sle_dma = sg_dma_address(sg);
2804                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2805                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2806                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2807                 avail_dsds--;
2808         }
2809         /* For read request DSD will always goes to continuation IOCB
2810          * and follow the write DSD. If there is room on the current IOCB
2811          * then it is added to that IOCB else new continuation IOCB is
2812          * allocated.
2813          */
2814         for_each_sg(bsg_job->reply_payload.sg_list, sg,
2815                                 bsg_job->reply_payload.sg_cnt, index) {
2816                 dma_addr_t sle_dma;
2817                 cont_a64_entry_t *cont_pkt;
2818
2819                 /* Allocate additional continuation packets */
2820                 if (avail_dsds == 0) {
2821                         /* Continuation type 1 IOCB can accomodate
2822                          * 5 DSDS
2823                          */
2824                         cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2825                         cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2826                         avail_dsds = 5;
2827                         entry_count++;
2828                 }
2829                 sle_dma = sg_dma_address(sg);
2830                 *cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2831                 *cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2832                 *cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2833                 avail_dsds--;
2834         }
2835         /* This value should be same as number of IOCB required for this cmd */
2836         cmd_pkt->entry_count = entry_count;
2837 }
2838
2839 int
2840 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
2841 {
2842
2843         struct qla_hw_data *ha = vha->hw;
2844         unsigned long flags;
2845         uint32_t handle;
2846         uint32_t index;
2847         uint16_t req_cnt;
2848         uint16_t cnt;
2849         uint32_t *clr_ptr;
2850         struct cmd_bidir *cmd_pkt = NULL;
2851         struct rsp_que *rsp;
2852         struct req_que *req;
2853         int rval = EXT_STATUS_OK;
2854
2855         rval = QLA_SUCCESS;
2856
2857         rsp = ha->rsp_q_map[0];
2858         req = vha->req;
2859
2860         /* Send marker if required */
2861         if (vha->marker_needed != 0) {
2862                 if (qla2x00_marker(vha, req,
2863                         rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
2864                         return EXT_STATUS_MAILBOX;
2865                 vha->marker_needed = 0;
2866         }
2867
2868         /* Acquire ring specific lock */
2869         spin_lock_irqsave(&ha->hardware_lock, flags);
2870
2871         /* Check for room in outstanding command list. */
2872         handle = req->current_outstanding_cmd;
2873         for (index = 1; index < req->num_outstanding_cmds; index++) {
2874                 handle++;
2875         if (handle == req->num_outstanding_cmds)
2876                 handle = 1;
2877         if (!req->outstanding_cmds[handle])
2878                 break;
2879         }
2880
2881         if (index == req->num_outstanding_cmds) {
2882                 rval = EXT_STATUS_BUSY;
2883                 goto queuing_error;
2884         }
2885
2886         /* Calculate number of IOCB required */
2887         req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2888
2889         /* Check for room on request queue. */
2890         if (req->cnt < req_cnt + 2) {
2891                 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2892                     RD_REG_DWORD_RELAXED(req->req_q_out);
2893                 if  (req->ring_index < cnt)
2894                         req->cnt = cnt - req->ring_index;
2895                 else
2896                         req->cnt = req->length -
2897                                 (req->ring_index - cnt);
2898         }
2899         if (req->cnt < req_cnt + 2) {
2900                 rval = EXT_STATUS_BUSY;
2901                 goto queuing_error;
2902         }
2903
2904         cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
2905         cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2906
2907         /* Zero out remaining portion of packet. */
2908         /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2909         clr_ptr = (uint32_t *)cmd_pkt + 2;
2910         memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2911
2912         /* Set NPORT-ID  (of vha)*/
2913         cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
2914         cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
2915         cmd_pkt->port_id[1] = vha->d_id.b.area;
2916         cmd_pkt->port_id[2] = vha->d_id.b.domain;
2917
2918         qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
2919         cmd_pkt->entry_status = (uint8_t) rsp->id;
2920         /* Build command packet. */
2921         req->current_outstanding_cmd = handle;
2922         req->outstanding_cmds[handle] = sp;
2923         sp->handle = handle;
2924         req->cnt -= req_cnt;
2925
2926         /* Send the command to the firmware */
2927         wmb();
2928         qla2x00_start_iocbs(vha, req);
2929 queuing_error:
2930         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2931         return rval;
2932 }