Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / drivers / scsi / lpfc / lpfc_nvme.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
30
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
38
39 #include <linux/nvme.h>
40 #include <linux/nvme-fc-driver.h>
41 #include <linux/nvme-fc.h>
42 #include "lpfc_version.h"
43 #include "lpfc_hw4.h"
44 #include "lpfc_hw.h"
45 #include "lpfc_sli.h"
46 #include "lpfc_sli4.h"
47 #include "lpfc_nl.h"
48 #include "lpfc_disc.h"
49 #include "lpfc.h"
50 #include "lpfc_nvme.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_logmsg.h"
53 #include "lpfc_crtn.h"
54 #include "lpfc_vport.h"
55 #include "lpfc_debugfs.h"
56
57 /* NVME initiator-based functions */
58
59 static struct lpfc_io_buf *
60 lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
61                   int idx, int expedite);
62
63 static void
64 lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_io_buf *);
65
66 static struct nvme_fc_port_template lpfc_nvme_template;
67
68 static union lpfc_wqe128 lpfc_iread_cmd_template;
69 static union lpfc_wqe128 lpfc_iwrite_cmd_template;
70 static union lpfc_wqe128 lpfc_icmnd_cmd_template;
71
72 /* Setup WQE templates for NVME IOs */
73 void
74 lpfc_nvme_cmd_template(void)
75 {
76         union lpfc_wqe128 *wqe;
77
78         /* IREAD template */
79         wqe = &lpfc_iread_cmd_template;
80         memset(wqe, 0, sizeof(union lpfc_wqe128));
81
82         /* Word 0, 1, 2 - BDE is variable */
83
84         /* Word 3 - cmd_buff_len, payload_offset_len is zero */
85
86         /* Word 4 - total_xfer_len is variable */
87
88         /* Word 5 - is zero */
89
90         /* Word 6 - ctxt_tag, xri_tag is variable */
91
92         /* Word 7 */
93         bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
94         bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
95         bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
96         bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
97
98         /* Word 8 - abort_tag is variable */
99
100         /* Word 9  - reqtag is variable */
101
102         /* Word 10 - dbde, wqes is variable */
103         bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
104         bf_set(wqe_nvme, &wqe->fcp_iread.wqe_com, 1);
105         bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
106         bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
107         bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
108         bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
109
110         /* Word 11 - pbde is variable */
111         bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, NVME_READ_CMD);
112         bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
113         bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
114
115         /* Word 12 - is zero */
116
117         /* Word 13, 14, 15 - PBDE is variable */
118
119         /* IWRITE template */
120         wqe = &lpfc_iwrite_cmd_template;
121         memset(wqe, 0, sizeof(union lpfc_wqe128));
122
123         /* Word 0, 1, 2 - BDE is variable */
124
125         /* Word 3 - cmd_buff_len, payload_offset_len is zero */
126
127         /* Word 4 - total_xfer_len is variable */
128
129         /* Word 5 - initial_xfer_len is variable */
130
131         /* Word 6 - ctxt_tag, xri_tag is variable */
132
133         /* Word 7 */
134         bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
135         bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
136         bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
137         bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
138
139         /* Word 8 - abort_tag is variable */
140
141         /* Word 9  - reqtag is variable */
142
143         /* Word 10 - dbde, wqes is variable */
144         bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
145         bf_set(wqe_nvme, &wqe->fcp_iwrite.wqe_com, 1);
146         bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
147         bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
148         bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
149         bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
150
151         /* Word 11 - pbde is variable */
152         bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, NVME_WRITE_CMD);
153         bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
154         bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
155
156         /* Word 12 - is zero */
157
158         /* Word 13, 14, 15 - PBDE is variable */
159
160         /* ICMND template */
161         wqe = &lpfc_icmnd_cmd_template;
162         memset(wqe, 0, sizeof(union lpfc_wqe128));
163
164         /* Word 0, 1, 2 - BDE is variable */
165
166         /* Word 3 - payload_offset_len is variable */
167
168         /* Word 4, 5 - is zero */
169
170         /* Word 6 - ctxt_tag, xri_tag is variable */
171
172         /* Word 7 */
173         bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
174         bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
175         bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
176         bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
177
178         /* Word 8 - abort_tag is variable */
179
180         /* Word 9  - reqtag is variable */
181
182         /* Word 10 - dbde, wqes is variable */
183         bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
184         bf_set(wqe_nvme, &wqe->fcp_icmd.wqe_com, 1);
185         bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
186         bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
187         bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
188         bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
189
190         /* Word 11 */
191         bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, FCP_COMMAND);
192         bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
193         bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
194
195         /* Word 12, 13, 14, 15 - is zero */
196 }
197
198 /**
199  * lpfc_nvme_create_queue -
200  * @lpfc_pnvme: Pointer to the driver's nvme instance data
201  * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
202  * @handle: An opaque driver handle used in follow-up calls.
203  *
204  * Driver registers this routine to preallocate and initialize any
205  * internal data structures to bind the @qidx to its internal IO queues.
206  * A hardware queue maps (qidx) to a specific driver MSI-X vector/EQ/CQ/WQ.
207  *
208  * Return value :
209  *   0 - Success
210  *   -EINVAL - Unsupported input value.
211  *   -ENOMEM - Could not alloc necessary memory
212  **/
213 static int
214 lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
215                        unsigned int qidx, u16 qsize,
216                        void **handle)
217 {
218         struct lpfc_nvme_lport *lport;
219         struct lpfc_vport *vport;
220         struct lpfc_nvme_qhandle *qhandle;
221         char *str;
222
223         if (!pnvme_lport->private)
224                 return -ENOMEM;
225
226         lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
227         vport = lport->vport;
228         qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
229         if (qhandle == NULL)
230                 return -ENOMEM;
231
232         qhandle->cpu_id = raw_smp_processor_id();
233         qhandle->qidx = qidx;
234         /*
235          * NVME qidx == 0 is the admin queue, so both admin queue
236          * and first IO queue will use MSI-X vector and associated
237          * EQ/CQ/WQ at index 0. After that they are sequentially assigned.
238          */
239         if (qidx) {
240                 str = "IO ";  /* IO queue */
241                 qhandle->index = ((qidx - 1) %
242                         lpfc_nvme_template.max_hw_queues);
243         } else {
244                 str = "ADM";  /* Admin queue */
245                 qhandle->index = qidx;
246         }
247
248         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
249                          "6073 Binding %s HdwQueue %d  (cpu %d) to "
250                          "hdw_queue %d qhandle %p\n", str,
251                          qidx, qhandle->cpu_id, qhandle->index, qhandle);
252         *handle = (void *)qhandle;
253         return 0;
254 }
255
256 /**
257  * lpfc_nvme_delete_queue -
258  * @lpfc_pnvme: Pointer to the driver's nvme instance data
259  * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
260  * @handle: An opaque driver handle from lpfc_nvme_create_queue
261  *
262  * Driver registers this routine to free
263  * any internal data structures to bind the @qidx to its internal
264  * IO queues.
265  *
266  * Return value :
267  *   0 - Success
268  *   TODO:  What are the failure codes.
269  **/
270 static void
271 lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
272                        unsigned int qidx,
273                        void *handle)
274 {
275         struct lpfc_nvme_lport *lport;
276         struct lpfc_vport *vport;
277
278         if (!pnvme_lport->private)
279                 return;
280
281         lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
282         vport = lport->vport;
283
284         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
285                         "6001 ENTER.  lpfc_pnvme %p, qidx x%x qhandle %p\n",
286                         lport, qidx, handle);
287         kfree(handle);
288 }
289
290 static void
291 lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
292 {
293         struct lpfc_nvme_lport *lport = localport->private;
294
295         lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME,
296                          "6173 localport %p delete complete\n",
297                          lport);
298
299         /* release any threads waiting for the unreg to complete */
300         if (lport->vport->localport)
301                 complete(lport->lport_unreg_cmp);
302 }
303
304 /* lpfc_nvme_remoteport_delete
305  *
306  * @remoteport: Pointer to an nvme transport remoteport instance.
307  *
308  * This is a template downcall.  NVME transport calls this function
309  * when it has completed the unregistration of a previously
310  * registered remoteport.
311  *
312  * Return value :
313  * None
314  */
315 static void
316 lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
317 {
318         struct lpfc_nvme_rport *rport = remoteport->private;
319         struct lpfc_vport *vport;
320         struct lpfc_nodelist *ndlp;
321
322         ndlp = rport->ndlp;
323         if (!ndlp)
324                 goto rport_err;
325
326         vport = ndlp->vport;
327         if (!vport)
328                 goto rport_err;
329
330         /* Remove this rport from the lport's list - memory is owned by the
331          * transport. Remove the ndlp reference for the NVME transport before
332          * calling state machine to remove the node.
333          */
334         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
335                         "6146 remoteport delete of remoteport %p\n",
336                         remoteport);
337         spin_lock_irq(&vport->phba->hbalock);
338
339         /* The register rebind might have occurred before the delete
340          * downcall.  Guard against this race.
341          */
342         if (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG) {
343                 ndlp->nrport = NULL;
344                 ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
345         }
346         spin_unlock_irq(&vport->phba->hbalock);
347
348         /* Remove original register reference. The host transport
349          * won't reference this rport/remoteport any further.
350          */
351         lpfc_nlp_put(ndlp);
352
353  rport_err:
354         return;
355 }
356
357 static void
358 lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
359                        struct lpfc_wcqe_complete *wcqe)
360 {
361         struct lpfc_vport *vport = cmdwqe->vport;
362         struct lpfc_nvme_lport *lport;
363         uint32_t status;
364         struct nvmefc_ls_req *pnvme_lsreq;
365         struct lpfc_dmabuf *buf_ptr;
366         struct lpfc_nodelist *ndlp;
367
368         pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
369         status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
370
371         if (vport->localport) {
372                 lport = (struct lpfc_nvme_lport *)vport->localport->private;
373                 if (lport) {
374                         atomic_inc(&lport->fc4NvmeLsCmpls);
375                         if (status) {
376                                 if (bf_get(lpfc_wcqe_c_xb, wcqe))
377                                         atomic_inc(&lport->cmpl_ls_xb);
378                                 atomic_inc(&lport->cmpl_ls_err);
379                         }
380                 }
381         }
382
383         ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
384         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
385                          "6047 nvme cmpl Enter "
386                          "Data %p DID %x Xri: %x status %x reason x%x cmd:%p "
387                          "lsreg:%p bmp:%p ndlp:%p\n",
388                          pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
389                          cmdwqe->sli4_xritag, status,
390                          (wcqe->parameter & 0xffff),
391                          cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp);
392
393         lpfc_nvmeio_data(phba, "NVME LS  CMPL: xri x%x stat x%x parm x%x\n",
394                          cmdwqe->sli4_xritag, status, wcqe->parameter);
395
396         if (cmdwqe->context3) {
397                 buf_ptr = (struct lpfc_dmabuf *)cmdwqe->context3;
398                 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
399                 kfree(buf_ptr);
400                 cmdwqe->context3 = NULL;
401         }
402         if (pnvme_lsreq->done)
403                 pnvme_lsreq->done(pnvme_lsreq, status);
404         else
405                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
406                                  "6046 nvme cmpl without done call back? "
407                                  "Data %p DID %x Xri: %x status %x\n",
408                                 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
409                                 cmdwqe->sli4_xritag, status);
410         if (ndlp) {
411                 lpfc_nlp_put(ndlp);
412                 cmdwqe->context1 = NULL;
413         }
414         lpfc_sli_release_iocbq(phba, cmdwqe);
415 }
416
417 static int
418 lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
419                   struct lpfc_dmabuf *inp,
420                   struct nvmefc_ls_req *pnvme_lsreq,
421                   void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
422                                struct lpfc_wcqe_complete *),
423                   struct lpfc_nodelist *ndlp, uint32_t num_entry,
424                   uint32_t tmo, uint8_t retry)
425 {
426         struct lpfc_hba *phba = vport->phba;
427         union lpfc_wqe128 *wqe;
428         struct lpfc_iocbq *genwqe;
429         struct ulp_bde64 *bpl;
430         struct ulp_bde64 bde;
431         int i, rc, xmit_len, first_len;
432
433         /* Allocate buffer for  command WQE */
434         genwqe = lpfc_sli_get_iocbq(phba);
435         if (genwqe == NULL)
436                 return 1;
437
438         wqe = &genwqe->wqe;
439         memset(wqe, 0, sizeof(union lpfc_wqe));
440
441         genwqe->context3 = (uint8_t *)bmp;
442         genwqe->iocb_flag |= LPFC_IO_NVME_LS;
443
444         /* Save for completion so we can release these resources */
445         genwqe->context1 = lpfc_nlp_get(ndlp);
446         genwqe->context2 = (uint8_t *)pnvme_lsreq;
447         /* Fill in payload, bp points to frame payload */
448
449         if (!tmo)
450                 /* FC spec states we need 3 * ratov for CT requests */
451                 tmo = (3 * phba->fc_ratov);
452
453         /* For this command calculate the xmit length of the request bde. */
454         xmit_len = 0;
455         first_len = 0;
456         bpl = (struct ulp_bde64 *)bmp->virt;
457         for (i = 0; i < num_entry; i++) {
458                 bde.tus.w = bpl[i].tus.w;
459                 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
460                         break;
461                 xmit_len += bde.tus.f.bdeSize;
462                 if (i == 0)
463                         first_len = xmit_len;
464         }
465
466         genwqe->rsvd2 = num_entry;
467         genwqe->hba_wqidx = 0;
468
469         /* Words 0 - 2 */
470         wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
471         wqe->generic.bde.tus.f.bdeSize = first_len;
472         wqe->generic.bde.addrLow = bpl[0].addrLow;
473         wqe->generic.bde.addrHigh = bpl[0].addrHigh;
474
475         /* Word 3 */
476         wqe->gen_req.request_payload_len = first_len;
477
478         /* Word 4 */
479
480         /* Word 5 */
481         bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0);
482         bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1);
483         bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1);
484         bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ);
485         bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME);
486
487         /* Word 6 */
488         bf_set(wqe_ctxt_tag, &wqe->gen_req.wqe_com,
489                phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
490         bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag);
491
492         /* Word 7 */
493         bf_set(wqe_tmo, &wqe->gen_req.wqe_com, (vport->phba->fc_ratov-1));
494         bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3);
495         bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE);
496         bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI);
497
498         /* Word 8 */
499         wqe->gen_req.wqe_com.abort_tag = genwqe->iotag;
500
501         /* Word 9 */
502         bf_set(wqe_reqtag, &wqe->gen_req.wqe_com, genwqe->iotag);
503
504         /* Word 10 */
505         bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
506         bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
507         bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
508         bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
509         bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
510
511         /* Word 11 */
512         bf_set(wqe_cqid, &wqe->gen_req.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
513         bf_set(wqe_cmd_type, &wqe->gen_req.wqe_com, OTHER_COMMAND);
514
515
516         /* Issue GEN REQ WQE for NPORT <did> */
517         lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
518                          "6050 Issue GEN REQ WQE to NPORT x%x "
519                          "Data: x%x x%x wq:%p lsreq:%p bmp:%p xmit:%d 1st:%d\n",
520                          ndlp->nlp_DID, genwqe->iotag,
521                          vport->port_state,
522                         genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
523         genwqe->wqe_cmpl = cmpl;
524         genwqe->iocb_cmpl = NULL;
525         genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
526         genwqe->vport = vport;
527         genwqe->retry = retry;
528
529         lpfc_nvmeio_data(phba, "NVME LS  XMIT: xri x%x iotag x%x to x%06x\n",
530                          genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID);
531
532         rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe);
533         if (rc) {
534                 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
535                                  "6045 Issue GEN REQ WQE to NPORT x%x "
536                                  "Data: x%x x%x\n",
537                                  ndlp->nlp_DID, genwqe->iotag,
538                                  vport->port_state);
539                 lpfc_sli_release_iocbq(phba, genwqe);
540                 return 1;
541         }
542         return 0;
543 }
544
545 /**
546  * lpfc_nvme_ls_req - Issue an Link Service request
547  * @lpfc_pnvme: Pointer to the driver's nvme instance data
548  * @lpfc_nvme_lport: Pointer to the driver's local port data
549  * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
550  *
551  * Driver registers this routine to handle any link service request
552  * from the nvme_fc transport to a remote nvme-aware port.
553  *
554  * Return value :
555  *   0 - Success
556  *   TODO: What are the failure codes.
557  **/
558 static int
559 lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
560                  struct nvme_fc_remote_port *pnvme_rport,
561                  struct nvmefc_ls_req *pnvme_lsreq)
562 {
563         int ret = 0;
564         struct lpfc_nvme_lport *lport;
565         struct lpfc_nvme_rport *rport;
566         struct lpfc_vport *vport;
567         struct lpfc_nodelist *ndlp;
568         struct ulp_bde64 *bpl;
569         struct lpfc_dmabuf *bmp;
570         uint16_t ntype, nstate;
571
572         /* there are two dma buf in the request, actually there is one and
573          * the second one is just the start address + cmd size.
574          * Before calling lpfc_nvme_gen_req these buffers need to be wrapped
575          * in a lpfc_dmabuf struct. When freeing we just free the wrapper
576          * because the nvem layer owns the data bufs.
577          * We do not have to break these packets open, we don't care what is in
578          * them. And we do not have to look at the resonse data, we only care
579          * that we got a response. All of the caring is going to happen in the
580          * nvme-fc layer.
581          */
582
583         lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
584         rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
585         if (unlikely(!lport) || unlikely(!rport))
586                 return -EINVAL;
587
588         vport = lport->vport;
589
590         if (vport->load_flag & FC_UNLOADING)
591                 return -ENODEV;
592
593         /* Need the ndlp.  It is stored in the driver's rport. */
594         ndlp = rport->ndlp;
595         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
596                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
597                                  "6051 Remoteport %p, rport has invalid ndlp. "
598                                  "Failing LS Req\n", pnvme_rport);
599                 return -ENODEV;
600         }
601
602         /* The remote node has to be a mapped nvme target or an
603          * unmapped nvme initiator or it's an error.
604          */
605         ntype = ndlp->nlp_type;
606         nstate = ndlp->nlp_state;
607         if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) ||
608             (ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) {
609                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
610                                  "6088 DID x%06x not ready for "
611                                  "IO. State x%x, Type x%x\n",
612                                  pnvme_rport->port_id,
613                                  ndlp->nlp_state, ndlp->nlp_type);
614                 return -ENODEV;
615         }
616         bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
617         if (!bmp) {
618
619                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
620                                  "6044 Could not find node for DID %x\n",
621                                  pnvme_rport->port_id);
622                 return 2;
623         }
624         INIT_LIST_HEAD(&bmp->list);
625         bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys));
626         if (!bmp->virt) {
627                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
628                                  "6042 Could not find node for DID %x\n",
629                                  pnvme_rport->port_id);
630                 kfree(bmp);
631                 return 3;
632         }
633         bpl = (struct ulp_bde64 *)bmp->virt;
634         bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma));
635         bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma));
636         bpl->tus.f.bdeFlags = 0;
637         bpl->tus.f.bdeSize = pnvme_lsreq->rqstlen;
638         bpl->tus.w = le32_to_cpu(bpl->tus.w);
639         bpl++;
640
641         bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rspdma));
642         bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rspdma));
643         bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
644         bpl->tus.f.bdeSize = pnvme_lsreq->rsplen;
645         bpl->tus.w = le32_to_cpu(bpl->tus.w);
646
647         /* Expand print to include key fields. */
648         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
649                          "6149 Issue LS Req to DID 0x%06x lport %p, rport %p "
650                          "lsreq%p rqstlen:%d rsplen:%d %pad %pad\n",
651                          ndlp->nlp_DID,
652                          pnvme_lport, pnvme_rport,
653                          pnvme_lsreq, pnvme_lsreq->rqstlen,
654                          pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
655                          &pnvme_lsreq->rspdma);
656
657         atomic_inc(&lport->fc4NvmeLsRequests);
658
659         /* Hardcode the wait to 30 seconds.  Connections are failing otherwise.
660          * This code allows it all to work.
661          */
662         ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr,
663                                 pnvme_lsreq, lpfc_nvme_cmpl_gen_req,
664                                 ndlp, 2, 30, 0);
665         if (ret != WQE_SUCCESS) {
666                 atomic_inc(&lport->xmt_ls_err);
667                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
668                                  "6052 EXIT. issue ls wqe failed lport %p, "
669                                  "rport %p lsreq%p Status %x DID %x\n",
670                                  pnvme_lport, pnvme_rport, pnvme_lsreq,
671                                  ret, ndlp->nlp_DID);
672                 lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys);
673                 kfree(bmp);
674                 return ret;
675         }
676
677         /* Stub in routine and return 0 for now. */
678         return ret;
679 }
680
681 /**
682  * lpfc_nvme_ls_abort - Issue an Link Service request
683  * @lpfc_pnvme: Pointer to the driver's nvme instance data
684  * @lpfc_nvme_lport: Pointer to the driver's local port data
685  * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
686  *
687  * Driver registers this routine to handle any link service request
688  * from the nvme_fc transport to a remote nvme-aware port.
689  *
690  * Return value :
691  *   0 - Success
692  *   TODO: What are the failure codes.
693  **/
694 static void
695 lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
696                    struct nvme_fc_remote_port *pnvme_rport,
697                    struct nvmefc_ls_req *pnvme_lsreq)
698 {
699         struct lpfc_nvme_lport *lport;
700         struct lpfc_vport *vport;
701         struct lpfc_hba *phba;
702         struct lpfc_nodelist *ndlp;
703         LIST_HEAD(abort_list);
704         struct lpfc_sli_ring *pring;
705         struct lpfc_iocbq *wqe, *next_wqe;
706
707         lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
708         if (unlikely(!lport))
709                 return;
710         vport = lport->vport;
711         phba = vport->phba;
712
713         if (vport->load_flag & FC_UNLOADING)
714                 return;
715
716         ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
717         if (!ndlp) {
718                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
719                                  "6049 Could not find node for DID %x\n",
720                                  pnvme_rport->port_id);
721                 return;
722         }
723
724         /* Expand print to include key fields. */
725         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
726                          "6040 ENTER.  lport %p, rport %p lsreq %p rqstlen:%d "
727                          "rsplen:%d %pad %pad\n",
728                          pnvme_lport, pnvme_rport,
729                          pnvme_lsreq, pnvme_lsreq->rqstlen,
730                          pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
731                          &pnvme_lsreq->rspdma);
732
733         /*
734          * Lock the ELS ring txcmplq and build a local list of all ELS IOs
735          * that need an ABTS.  The IOs need to stay on the txcmplq so that
736          * the abort operation completes them successfully.
737          */
738         pring = phba->sli4_hba.nvmels_wq->pring;
739         spin_lock_irq(&phba->hbalock);
740         spin_lock(&pring->ring_lock);
741         list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) {
742                 /* Add to abort_list on on NDLP match. */
743                 if (lpfc_check_sli_ndlp(phba, pring, wqe, ndlp)) {
744                         wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
745                         list_add_tail(&wqe->dlist, &abort_list);
746                 }
747         }
748         spin_unlock(&pring->ring_lock);
749         spin_unlock_irq(&phba->hbalock);
750
751         /* Abort the targeted IOs and remove them from the abort list. */
752         list_for_each_entry_safe(wqe, next_wqe, &abort_list, dlist) {
753                 atomic_inc(&lport->xmt_ls_abort);
754                 spin_lock_irq(&phba->hbalock);
755                 list_del_init(&wqe->dlist);
756                 lpfc_sli_issue_abort_iotag(phba, pring, wqe);
757                 spin_unlock_irq(&phba->hbalock);
758         }
759 }
760
761 /* Fix up the existing sgls for NVME IO. */
762 static inline void
763 lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
764                        struct lpfc_io_buf *lpfc_ncmd,
765                        struct nvmefc_fcp_req *nCmd)
766 {
767         struct lpfc_hba  *phba = vport->phba;
768         struct sli4_sge *sgl;
769         union lpfc_wqe128 *wqe;
770         uint32_t *wptr, *dptr;
771
772         /*
773          * Get a local pointer to the built-in wqe and correct
774          * the cmd size to match NVME's 96 bytes and fix
775          * the dma address.
776          */
777
778         wqe = &lpfc_ncmd->cur_iocbq.wqe;
779
780         /*
781          * Adjust the FCP_CMD and FCP_RSP DMA data and sge_len to
782          * match NVME.  NVME sends 96 bytes. Also, use the
783          * nvme commands command and response dma addresses
784          * rather than the virtual memory to ease the restore
785          * operation.
786          */
787         sgl = lpfc_ncmd->dma_sgl;
788         sgl->sge_len = cpu_to_le32(nCmd->cmdlen);
789         if (phba->cfg_nvme_embed_cmd) {
790                 sgl->addr_hi = 0;
791                 sgl->addr_lo = 0;
792
793                 /* Word 0-2 - NVME CMND IU (embedded payload) */
794                 wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED;
795                 wqe->generic.bde.tus.f.bdeSize = 56;
796                 wqe->generic.bde.addrHigh = 0;
797                 wqe->generic.bde.addrLow =  64;  /* Word 16 */
798
799                 /* Word 10  - dbde is 0, wqes is 1 in template */
800
801                 /*
802                  * Embed the payload in the last half of the WQE
803                  * WQE words 16-30 get the NVME CMD IU payload
804                  *
805                  * WQE words 16-19 get payload Words 1-4
806                  * WQE words 20-21 get payload Words 6-7
807                  * WQE words 22-29 get payload Words 16-23
808                  */
809                 wptr = &wqe->words[16];  /* WQE ptr */
810                 dptr = (uint32_t *)nCmd->cmdaddr;  /* payload ptr */
811                 dptr++;                 /* Skip Word 0 in payload */
812
813                 *wptr++ = *dptr++;      /* Word 1 */
814                 *wptr++ = *dptr++;      /* Word 2 */
815                 *wptr++ = *dptr++;      /* Word 3 */
816                 *wptr++ = *dptr++;      /* Word 4 */
817                 dptr++;                 /* Skip Word 5 in payload */
818                 *wptr++ = *dptr++;      /* Word 6 */
819                 *wptr++ = *dptr++;      /* Word 7 */
820                 dptr += 8;              /* Skip Words 8-15 in payload */
821                 *wptr++ = *dptr++;      /* Word 16 */
822                 *wptr++ = *dptr++;      /* Word 17 */
823                 *wptr++ = *dptr++;      /* Word 18 */
824                 *wptr++ = *dptr++;      /* Word 19 */
825                 *wptr++ = *dptr++;      /* Word 20 */
826                 *wptr++ = *dptr++;      /* Word 21 */
827                 *wptr++ = *dptr++;      /* Word 22 */
828                 *wptr   = *dptr;        /* Word 23 */
829         } else {
830                 sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->cmddma));
831                 sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->cmddma));
832
833                 /* Word 0-2 - NVME CMND IU Inline BDE */
834                 wqe->generic.bde.tus.f.bdeFlags =  BUFF_TYPE_BDE_64;
835                 wqe->generic.bde.tus.f.bdeSize = nCmd->cmdlen;
836                 wqe->generic.bde.addrHigh = sgl->addr_hi;
837                 wqe->generic.bde.addrLow =  sgl->addr_lo;
838
839                 /* Word 10 */
840                 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
841                 bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
842         }
843
844         sgl++;
845
846         /* Setup the physical region for the FCP RSP */
847         sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->rspdma));
848         sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->rspdma));
849         sgl->word2 = le32_to_cpu(sgl->word2);
850         if (nCmd->sg_cnt)
851                 bf_set(lpfc_sli4_sge_last, sgl, 0);
852         else
853                 bf_set(lpfc_sli4_sge_last, sgl, 1);
854         sgl->word2 = cpu_to_le32(sgl->word2);
855         sgl->sge_len = cpu_to_le32(nCmd->rsplen);
856 }
857
858 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
859 static void
860 lpfc_nvme_ktime(struct lpfc_hba *phba,
861                 struct lpfc_io_buf *lpfc_ncmd)
862 {
863         uint64_t seg1, seg2, seg3, seg4;
864         uint64_t segsum;
865
866         if (!lpfc_ncmd->ts_last_cmd ||
867             !lpfc_ncmd->ts_cmd_start ||
868             !lpfc_ncmd->ts_cmd_wqput ||
869             !lpfc_ncmd->ts_isr_cmpl ||
870             !lpfc_ncmd->ts_data_nvme)
871                 return;
872
873         if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_cmd_start)
874                 return;
875         if (lpfc_ncmd->ts_cmd_start < lpfc_ncmd->ts_last_cmd)
876                 return;
877         if (lpfc_ncmd->ts_cmd_wqput < lpfc_ncmd->ts_cmd_start)
878                 return;
879         if (lpfc_ncmd->ts_isr_cmpl < lpfc_ncmd->ts_cmd_wqput)
880                 return;
881         if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_isr_cmpl)
882                 return;
883         /*
884          * Segment 1 - Time from Last FCP command cmpl is handed
885          * off to NVME Layer to start of next command.
886          * Segment 2 - Time from Driver receives a IO cmd start
887          * from NVME Layer to WQ put is done on IO cmd.
888          * Segment 3 - Time from Driver WQ put is done on IO cmd
889          * to MSI-X ISR for IO cmpl.
890          * Segment 4 - Time from MSI-X ISR for IO cmpl to when
891          * cmpl is handled off to the NVME Layer.
892          */
893         seg1 = lpfc_ncmd->ts_cmd_start - lpfc_ncmd->ts_last_cmd;
894         if (seg1 > 5000000)  /* 5 ms - for sequential IOs only */
895                 seg1 = 0;
896
897         /* Calculate times relative to start of IO */
898         seg2 = (lpfc_ncmd->ts_cmd_wqput - lpfc_ncmd->ts_cmd_start);
899         segsum = seg2;
900         seg3 = lpfc_ncmd->ts_isr_cmpl - lpfc_ncmd->ts_cmd_start;
901         if (segsum > seg3)
902                 return;
903         seg3 -= segsum;
904         segsum += seg3;
905
906         seg4 = lpfc_ncmd->ts_data_nvme - lpfc_ncmd->ts_cmd_start;
907         if (segsum > seg4)
908                 return;
909         seg4 -= segsum;
910
911         phba->ktime_data_samples++;
912         phba->ktime_seg1_total += seg1;
913         if (seg1 < phba->ktime_seg1_min)
914                 phba->ktime_seg1_min = seg1;
915         else if (seg1 > phba->ktime_seg1_max)
916                 phba->ktime_seg1_max = seg1;
917         phba->ktime_seg2_total += seg2;
918         if (seg2 < phba->ktime_seg2_min)
919                 phba->ktime_seg2_min = seg2;
920         else if (seg2 > phba->ktime_seg2_max)
921                 phba->ktime_seg2_max = seg2;
922         phba->ktime_seg3_total += seg3;
923         if (seg3 < phba->ktime_seg3_min)
924                 phba->ktime_seg3_min = seg3;
925         else if (seg3 > phba->ktime_seg3_max)
926                 phba->ktime_seg3_max = seg3;
927         phba->ktime_seg4_total += seg4;
928         if (seg4 < phba->ktime_seg4_min)
929                 phba->ktime_seg4_min = seg4;
930         else if (seg4 > phba->ktime_seg4_max)
931                 phba->ktime_seg4_max = seg4;
932
933         lpfc_ncmd->ts_last_cmd = 0;
934         lpfc_ncmd->ts_cmd_start = 0;
935         lpfc_ncmd->ts_cmd_wqput  = 0;
936         lpfc_ncmd->ts_isr_cmpl = 0;
937         lpfc_ncmd->ts_data_nvme = 0;
938 }
939 #endif
940
941 /**
942  * lpfc_nvme_io_cmd_wqe_cmpl - Complete an NVME-over-FCP IO
943  * @lpfc_pnvme: Pointer to the driver's nvme instance data
944  * @lpfc_nvme_lport: Pointer to the driver's local port data
945  * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
946  *
947  * Driver registers this routine as it io request handler.  This
948  * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
949  * data structure to the rport indicated in @lpfc_nvme_rport.
950  *
951  * Return value :
952  *   0 - Success
953  *   TODO: What are the failure codes.
954  **/
955 static void
956 lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
957                           struct lpfc_wcqe_complete *wcqe)
958 {
959         struct lpfc_io_buf *lpfc_ncmd =
960                 (struct lpfc_io_buf *)pwqeIn->context1;
961         struct lpfc_vport *vport = pwqeIn->vport;
962         struct nvmefc_fcp_req *nCmd;
963         struct nvme_fc_ersp_iu *ep;
964         struct nvme_fc_cmd_iu *cp;
965         struct lpfc_nodelist *ndlp;
966         struct lpfc_nvme_fcpreq_priv *freqpriv;
967         struct lpfc_nvme_lport *lport;
968         uint32_t code, status, idx;
969         uint16_t cid, sqhd, data;
970         uint32_t *ptr;
971
972         /* Sanity check on return of outstanding command */
973         if (!lpfc_ncmd) {
974                 lpfc_printf_vlog(vport, KERN_ERR,
975                                  LOG_NODE | LOG_NVME_IOERR,
976                                  "6071 Null lpfc_ncmd pointer. No "
977                                  "release, skip completion\n");
978                 return;
979         }
980
981         /* Guard against abort handler being called at same time */
982         spin_lock(&lpfc_ncmd->buf_lock);
983
984         if (!lpfc_ncmd->nvmeCmd) {
985                 spin_unlock(&lpfc_ncmd->buf_lock);
986                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
987                                  "6066 Missing cmpl ptrs: lpfc_ncmd %p, "
988                                  "nvmeCmd %p\n",
989                                  lpfc_ncmd, lpfc_ncmd->nvmeCmd);
990
991                 /* Release the lpfc_ncmd regardless of the missing elements. */
992                 lpfc_release_nvme_buf(phba, lpfc_ncmd);
993                 return;
994         }
995         nCmd = lpfc_ncmd->nvmeCmd;
996         status = bf_get(lpfc_wcqe_c_status, wcqe);
997
998         idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
999         phba->sli4_hba.hdwq[idx].nvme_cstat.io_cmpls++;
1000
1001         if (vport->localport) {
1002                 lport = (struct lpfc_nvme_lport *)vport->localport->private;
1003                 if (lport && status) {
1004                         if (bf_get(lpfc_wcqe_c_xb, wcqe))
1005                                 atomic_inc(&lport->cmpl_fcp_xb);
1006                         atomic_inc(&lport->cmpl_fcp_err);
1007                 }
1008         }
1009
1010         lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
1011                          lpfc_ncmd->cur_iocbq.sli4_xritag,
1012                          status, wcqe->parameter);
1013         /*
1014          * Catch race where our node has transitioned, but the
1015          * transport is still transitioning.
1016          */
1017         ndlp = lpfc_ncmd->ndlp;
1018         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1019                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
1020                                  "6062 Ignoring NVME cmpl.  No ndlp\n");
1021                 goto out_err;
1022         }
1023
1024         code = bf_get(lpfc_wcqe_c_code, wcqe);
1025         if (code == CQE_CODE_NVME_ERSP) {
1026                 /* For this type of CQE, we need to rebuild the rsp */
1027                 ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
1028
1029                 /*
1030                  * Get Command Id from cmd to plug into response. This
1031                  * code is not needed in the next NVME Transport drop.
1032                  */
1033                 cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
1034                 cid = cp->sqe.common.command_id;
1035
1036                 /*
1037                  * RSN is in CQE word 2
1038                  * SQHD is in CQE Word 3 bits 15:0
1039                  * Cmd Specific info is in CQE Word 1
1040                  * and in CQE Word 0 bits 15:0
1041                  */
1042                 sqhd = bf_get(lpfc_wcqe_c_sqhead, wcqe);
1043
1044                 /* Now lets build the NVME ERSP IU */
1045                 ep->iu_len = cpu_to_be16(8);
1046                 ep->rsn = wcqe->parameter;
1047                 ep->xfrd_len = cpu_to_be32(nCmd->payload_length);
1048                 ep->rsvd12 = 0;
1049                 ptr = (uint32_t *)&ep->cqe.result.u64;
1050                 *ptr++ = wcqe->total_data_placed;
1051                 data = bf_get(lpfc_wcqe_c_ersp0, wcqe);
1052                 *ptr = (uint32_t)data;
1053                 ep->cqe.sq_head = sqhd;
1054                 ep->cqe.sq_id =  nCmd->sqid;
1055                 ep->cqe.command_id = cid;
1056                 ep->cqe.status = 0;
1057
1058                 lpfc_ncmd->status = IOSTAT_SUCCESS;
1059                 lpfc_ncmd->result = 0;
1060                 nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN;
1061                 nCmd->transferred_length = nCmd->payload_length;
1062         } else {
1063                 lpfc_ncmd->status = (status & LPFC_IOCB_STATUS_MASK);
1064                 lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
1065
1066                 /* For NVME, the only failure path that results in an
1067                  * IO error is when the adapter rejects it.  All other
1068                  * conditions are a success case and resolved by the
1069                  * transport.
1070                  * IOSTAT_FCP_RSP_ERROR means:
1071                  * 1. Length of data received doesn't match total
1072                  *    transfer length in WQE
1073                  * 2. If the RSP payload does NOT match these cases:
1074                  *    a. RSP length 12/24 bytes and all zeros
1075                  *    b. NVME ERSP
1076                  */
1077                 switch (lpfc_ncmd->status) {
1078                 case IOSTAT_SUCCESS:
1079                         nCmd->transferred_length = wcqe->total_data_placed;
1080                         nCmd->rcv_rsplen = 0;
1081                         nCmd->status = 0;
1082                         break;
1083                 case IOSTAT_FCP_RSP_ERROR:
1084                         nCmd->transferred_length = wcqe->total_data_placed;
1085                         nCmd->rcv_rsplen = wcqe->parameter;
1086                         nCmd->status = 0;
1087                         /* Sanity check */
1088                         if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN)
1089                                 break;
1090                         lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
1091                                          "6081 NVME Completion Protocol Error: "
1092                                          "xri %x status x%x result x%x "
1093                                          "placed x%x\n",
1094                                          lpfc_ncmd->cur_iocbq.sli4_xritag,
1095                                          lpfc_ncmd->status, lpfc_ncmd->result,
1096                                          wcqe->total_data_placed);
1097                         break;
1098                 case IOSTAT_LOCAL_REJECT:
1099                         /* Let fall through to set command final state. */
1100                         if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED)
1101                                 lpfc_printf_vlog(vport, KERN_INFO,
1102                                          LOG_NVME_IOERR,
1103                                          "6032 Delay Aborted cmd %p "
1104                                          "nvme cmd %p, xri x%x, "
1105                                          "xb %d\n",
1106                                          lpfc_ncmd, nCmd,
1107                                          lpfc_ncmd->cur_iocbq.sli4_xritag,
1108                                          bf_get(lpfc_wcqe_c_xb, wcqe));
1109                         /* fall through */
1110                 default:
1111 out_err:
1112                         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1113                                          "6072 NVME Completion Error: xri %x "
1114                                          "status x%x result x%x [x%x] "
1115                                          "placed x%x\n",
1116                                          lpfc_ncmd->cur_iocbq.sli4_xritag,
1117                                          lpfc_ncmd->status, lpfc_ncmd->result,
1118                                          wcqe->parameter,
1119                                          wcqe->total_data_placed);
1120                         nCmd->transferred_length = 0;
1121                         nCmd->rcv_rsplen = 0;
1122                         nCmd->status = NVME_SC_INTERNAL;
1123                 }
1124         }
1125
1126         /* pick up SLI4 exhange busy condition */
1127         if (bf_get(lpfc_wcqe_c_xb, wcqe))
1128                 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
1129         else
1130                 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
1131
1132         /* Update stats and complete the IO.  There is
1133          * no need for dma unprep because the nvme_transport
1134          * owns the dma address.
1135          */
1136 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1137         if (lpfc_ncmd->ts_cmd_start) {
1138                 lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp;
1139                 lpfc_ncmd->ts_data_nvme = ktime_get_ns();
1140                 phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme;
1141                 lpfc_nvme_ktime(phba, lpfc_ncmd);
1142         }
1143         if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
1144                 uint32_t cpu;
1145                 idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
1146                 cpu = raw_smp_processor_id();
1147                 if (cpu < LPFC_CHECK_CPU_CNT) {
1148                         if (lpfc_ncmd->cpu != cpu)
1149                                 lpfc_printf_vlog(vport,
1150                                                  KERN_INFO, LOG_NVME_IOERR,
1151                                                  "6701 CPU Check cmpl: "
1152                                                  "cpu %d expect %d\n",
1153                                                  cpu, lpfc_ncmd->cpu);
1154                         phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
1155                 }
1156         }
1157 #endif
1158
1159         /* NVME targets need completion held off until the abort exchange
1160          * completes unless the NVME Rport is getting unregistered.
1161          */
1162
1163         if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
1164                 freqpriv = nCmd->private;
1165                 freqpriv->nvme_buf = NULL;
1166                 lpfc_ncmd->nvmeCmd = NULL;
1167                 spin_unlock(&lpfc_ncmd->buf_lock);
1168                 nCmd->done(nCmd);
1169         } else
1170                 spin_unlock(&lpfc_ncmd->buf_lock);
1171
1172         /* Call release with XB=1 to queue the IO into the abort list. */
1173         lpfc_release_nvme_buf(phba, lpfc_ncmd);
1174 }
1175
1176
1177 /**
1178  * lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO
1179  * @lpfc_pnvme: Pointer to the driver's nvme instance data
1180  * @lpfc_nvme_lport: Pointer to the driver's local port data
1181  * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1182  * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1183  * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1184  *
1185  * Driver registers this routine as it io request handler.  This
1186  * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1187  * data structure to the rport indicated in @lpfc_nvme_rport.
1188  *
1189  * Return value :
1190  *   0 - Success
1191  *   TODO: What are the failure codes.
1192  **/
1193 static int
1194 lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
1195                       struct lpfc_io_buf *lpfc_ncmd,
1196                       struct lpfc_nodelist *pnode,
1197                       struct lpfc_fc4_ctrl_stat *cstat)
1198 {
1199         struct lpfc_hba *phba = vport->phba;
1200         struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1201         struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq);
1202         union lpfc_wqe128 *wqe = &pwqeq->wqe;
1203         uint32_t req_len;
1204
1205         if (!NLP_CHK_NODE_ACT(pnode))
1206                 return -EINVAL;
1207
1208         /*
1209          * There are three possibilities here - use scatter-gather segment, use
1210          * the single mapping, or neither.
1211          */
1212         if (nCmd->sg_cnt) {
1213                 if (nCmd->io_dir == NVMEFC_FCP_WRITE) {
1214                         /* From the iwrite template, initialize words 7 - 11 */
1215                         memcpy(&wqe->words[7],
1216                                &lpfc_iwrite_cmd_template.words[7],
1217                                sizeof(uint32_t) * 5);
1218
1219                         /* Word 4 */
1220                         wqe->fcp_iwrite.total_xfer_len = nCmd->payload_length;
1221
1222                         /* Word 5 */
1223                         if ((phba->cfg_nvme_enable_fb) &&
1224                             (pnode->nlp_flag & NLP_FIRSTBURST)) {
1225                                 req_len = lpfc_ncmd->nvmeCmd->payload_length;
1226                                 if (req_len < pnode->nvme_fb_size)
1227                                         wqe->fcp_iwrite.initial_xfer_len =
1228                                                 req_len;
1229                                 else
1230                                         wqe->fcp_iwrite.initial_xfer_len =
1231                                                 pnode->nvme_fb_size;
1232                         } else {
1233                                 wqe->fcp_iwrite.initial_xfer_len = 0;
1234                         }
1235                         cstat->output_requests++;
1236                 } else {
1237                         /* From the iread template, initialize words 7 - 11 */
1238                         memcpy(&wqe->words[7],
1239                                &lpfc_iread_cmd_template.words[7],
1240                                sizeof(uint32_t) * 5);
1241
1242                         /* Word 4 */
1243                         wqe->fcp_iread.total_xfer_len = nCmd->payload_length;
1244
1245                         /* Word 5 */
1246                         wqe->fcp_iread.rsrvd5 = 0;
1247
1248                         cstat->input_requests++;
1249                 }
1250         } else {
1251                 /* From the icmnd template, initialize words 4 - 11 */
1252                 memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
1253                        sizeof(uint32_t) * 8);
1254                 cstat->control_requests++;
1255         }
1256         /*
1257          * Finish initializing those WQE fields that are independent
1258          * of the nvme_cmnd request_buffer
1259          */
1260
1261         /* Word 3 */
1262         bf_set(payload_offset_len, &wqe->fcp_icmd,
1263                (nCmd->rsplen + nCmd->cmdlen));
1264
1265         /* Word 6 */
1266         bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
1267                phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
1268         bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
1269
1270         /* Word 8 */
1271         wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
1272
1273         /* Word 9 */
1274         bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
1275
1276         /* Words 13 14 15 are for PBDE support */
1277
1278         pwqeq->vport = vport;
1279         return 0;
1280 }
1281
1282
1283 /**
1284  * lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO
1285  * @lpfc_pnvme: Pointer to the driver's nvme instance data
1286  * @lpfc_nvme_lport: Pointer to the driver's local port data
1287  * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1288  * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1289  * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1290  *
1291  * Driver registers this routine as it io request handler.  This
1292  * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1293  * data structure to the rport indicated in @lpfc_nvme_rport.
1294  *
1295  * Return value :
1296  *   0 - Success
1297  *   TODO: What are the failure codes.
1298  **/
1299 static int
1300 lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
1301                       struct lpfc_io_buf *lpfc_ncmd)
1302 {
1303         struct lpfc_hba *phba = vport->phba;
1304         struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1305         union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe;
1306         struct sli4_sge *sgl = lpfc_ncmd->dma_sgl;
1307         struct scatterlist *data_sg;
1308         struct sli4_sge *first_data_sgl;
1309         struct ulp_bde64 *bde;
1310         dma_addr_t physaddr;
1311         uint32_t num_bde = 0;
1312         uint32_t dma_len;
1313         uint32_t dma_offset = 0;
1314         int nseg, i;
1315
1316         /* Fix up the command and response DMA stuff. */
1317         lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd);
1318
1319         /*
1320          * There are three possibilities here - use scatter-gather segment, use
1321          * the single mapping, or neither.
1322          */
1323         if (nCmd->sg_cnt) {
1324                 /*
1325                  * Jump over the cmd and rsp SGEs.  The fix routine
1326                  * has already adjusted for this.
1327                  */
1328                 sgl += 2;
1329
1330                 first_data_sgl = sgl;
1331                 lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
1332                 if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) {
1333                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1334                                         "6058 Too many sg segments from "
1335                                         "NVME Transport.  Max %d, "
1336                                         "nvmeIO sg_cnt %d\n",
1337                                         phba->cfg_nvme_seg_cnt + 1,
1338                                         lpfc_ncmd->seg_cnt);
1339                         lpfc_ncmd->seg_cnt = 0;
1340                         return 1;
1341                 }
1342
1343                 /*
1344                  * The driver established a maximum scatter-gather segment count
1345                  * during probe that limits the number of sg elements in any
1346                  * single nvme command.  Just run through the seg_cnt and format
1347                  * the sge's.
1348                  */
1349                 nseg = nCmd->sg_cnt;
1350                 data_sg = nCmd->first_sgl;
1351                 for (i = 0; i < nseg; i++) {
1352                         if (data_sg == NULL) {
1353                                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1354                                                 "6059 dptr err %d, nseg %d\n",
1355                                                 i, nseg);
1356                                 lpfc_ncmd->seg_cnt = 0;
1357                                 return 1;
1358                         }
1359                         physaddr = data_sg->dma_address;
1360                         dma_len = data_sg->length;
1361                         sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
1362                         sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
1363                         sgl->word2 = le32_to_cpu(sgl->word2);
1364                         if ((num_bde + 1) == nseg)
1365                                 bf_set(lpfc_sli4_sge_last, sgl, 1);
1366                         else
1367                                 bf_set(lpfc_sli4_sge_last, sgl, 0);
1368                         bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1369                         bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
1370                         sgl->word2 = cpu_to_le32(sgl->word2);
1371                         sgl->sge_len = cpu_to_le32(dma_len);
1372
1373                         dma_offset += dma_len;
1374                         data_sg = sg_next(data_sg);
1375                         sgl++;
1376                 }
1377                 if (phba->cfg_enable_pbde) {
1378                         /* Use PBDE support for first SGL only, offset == 0 */
1379                         /* Words 13-15 */
1380                         bde = (struct ulp_bde64 *)
1381                                 &wqe->words[13];
1382                         bde->addrLow = first_data_sgl->addr_lo;
1383                         bde->addrHigh = first_data_sgl->addr_hi;
1384                         bde->tus.f.bdeSize =
1385                                 le32_to_cpu(first_data_sgl->sge_len);
1386                         bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1387                         bde->tus.w = cpu_to_le32(bde->tus.w);
1388                         /* wqe_pbde is 1 in template */
1389                 } else {
1390                         memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
1391                         bf_set(wqe_pbde, &wqe->generic.wqe_com, 0);
1392                 }
1393
1394         } else {
1395                 lpfc_ncmd->seg_cnt = 0;
1396
1397                 /* For this clause to be valid, the payload_length
1398                  * and sg_cnt must zero.
1399                  */
1400                 if (nCmd->payload_length != 0) {
1401                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1402                                         "6063 NVME DMA Prep Err: sg_cnt %d "
1403                                         "payload_length x%x\n",
1404                                         nCmd->sg_cnt, nCmd->payload_length);
1405                         return 1;
1406                 }
1407         }
1408         return 0;
1409 }
1410
1411 /**
1412  * lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO
1413  * @lpfc_pnvme: Pointer to the driver's nvme instance data
1414  * @lpfc_nvme_lport: Pointer to the driver's local port data
1415  * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1416  * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1417  * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1418  *
1419  * Driver registers this routine as it io request handler.  This
1420  * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1421  * data structure to the rport
1422  indicated in @lpfc_nvme_rport.
1423  *
1424  * Return value :
1425  *   0 - Success
1426  *   TODO: What are the failure codes.
1427  **/
1428 static int
1429 lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1430                         struct nvme_fc_remote_port *pnvme_rport,
1431                         void *hw_queue_handle,
1432                         struct nvmefc_fcp_req *pnvme_fcreq)
1433 {
1434         int ret = 0;
1435         int expedite = 0;
1436         int idx, cpu;
1437         struct lpfc_nvme_lport *lport;
1438         struct lpfc_fc4_ctrl_stat *cstat;
1439         struct lpfc_vport *vport;
1440         struct lpfc_hba *phba;
1441         struct lpfc_nodelist *ndlp;
1442         struct lpfc_io_buf *lpfc_ncmd;
1443         struct lpfc_nvme_rport *rport;
1444         struct lpfc_nvme_qhandle *lpfc_queue_info;
1445         struct lpfc_nvme_fcpreq_priv *freqpriv;
1446         struct nvme_common_command *sqe;
1447 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1448         uint64_t start = 0;
1449 #endif
1450
1451         /* Validate pointers. LLDD fault handling with transport does
1452          * have timing races.
1453          */
1454         lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1455         if (unlikely(!lport)) {
1456                 ret = -EINVAL;
1457                 goto out_fail;
1458         }
1459
1460         vport = lport->vport;
1461
1462         if (unlikely(!hw_queue_handle)) {
1463                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1464                                  "6117 Fail IO, NULL hw_queue_handle\n");
1465                 atomic_inc(&lport->xmt_fcp_err);
1466                 ret = -EBUSY;
1467                 goto out_fail;
1468         }
1469
1470         phba = vport->phba;
1471
1472         if (vport->load_flag & FC_UNLOADING) {
1473                 ret = -ENODEV;
1474                 goto out_fail;
1475         }
1476
1477         if (vport->load_flag & FC_UNLOADING) {
1478                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1479                                  "6124 Fail IO, Driver unload\n");
1480                 atomic_inc(&lport->xmt_fcp_err);
1481                 ret = -ENODEV;
1482                 goto out_fail;
1483         }
1484
1485         freqpriv = pnvme_fcreq->private;
1486         if (unlikely(!freqpriv)) {
1487                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1488                                  "6158 Fail IO, NULL request data\n");
1489                 atomic_inc(&lport->xmt_fcp_err);
1490                 ret = -EINVAL;
1491                 goto out_fail;
1492         }
1493
1494 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1495         if (phba->ktime_on)
1496                 start = ktime_get_ns();
1497 #endif
1498         rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
1499         lpfc_queue_info = (struct lpfc_nvme_qhandle *)hw_queue_handle;
1500
1501         /*
1502          * Catch race where our node has transitioned, but the
1503          * transport is still transitioning.
1504          */
1505         ndlp = rport->ndlp;
1506         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1507                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1508                                  "6053 Fail IO, ndlp not ready: rport %p "
1509                                   "ndlp %p, DID x%06x\n",
1510                                  rport, ndlp, pnvme_rport->port_id);
1511                 atomic_inc(&lport->xmt_fcp_err);
1512                 ret = -EBUSY;
1513                 goto out_fail;
1514         }
1515
1516         /* The remote node has to be a mapped target or it's an error. */
1517         if ((ndlp->nlp_type & NLP_NVME_TARGET) &&
1518             (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
1519                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
1520                                  "6036 Fail IO, DID x%06x not ready for "
1521                                  "IO. State x%x, Type x%x Flg x%x\n",
1522                                  pnvme_rport->port_id,
1523                                  ndlp->nlp_state, ndlp->nlp_type,
1524                                  ndlp->upcall_flags);
1525                 atomic_inc(&lport->xmt_fcp_bad_ndlp);
1526                 ret = -EBUSY;
1527                 goto out_fail;
1528
1529         }
1530
1531         /* Currently only NVME Keep alive commands should be expedited
1532          * if the driver runs out of a resource. These should only be
1533          * issued on the admin queue, qidx 0
1534          */
1535         if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) {
1536                 sqe = &((struct nvme_fc_cmd_iu *)
1537                         pnvme_fcreq->cmdaddr)->sqe.common;
1538                 if (sqe->opcode == nvme_admin_keep_alive)
1539                         expedite = 1;
1540         }
1541
1542         /* The node is shared with FCP IO, make sure the IO pending count does
1543          * not exceed the programmed depth.
1544          */
1545         if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
1546                 if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
1547                     !expedite) {
1548                         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1549                                          "6174 Fail IO, ndlp qdepth exceeded: "
1550                                          "idx %d DID %x pend %d qdepth %d\n",
1551                                          lpfc_queue_info->index, ndlp->nlp_DID,
1552                                          atomic_read(&ndlp->cmd_pending),
1553                                          ndlp->cmd_qdepth);
1554                         atomic_inc(&lport->xmt_fcp_qdepth);
1555                         ret = -EBUSY;
1556                         goto out_fail;
1557                 }
1558         }
1559
1560         /* Lookup Hardware Queue index based on fcp_io_sched module parameter */
1561         if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
1562                 idx = lpfc_queue_info->index;
1563         } else {
1564                 cpu = raw_smp_processor_id();
1565                 idx = phba->sli4_hba.cpu_map[cpu].hdwq;
1566         }
1567
1568         lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, idx, expedite);
1569         if (lpfc_ncmd == NULL) {
1570                 atomic_inc(&lport->xmt_fcp_noxri);
1571                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1572                                  "6065 Fail IO, driver buffer pool is empty: "
1573                                  "idx %d DID %x\n",
1574                                  lpfc_queue_info->index, ndlp->nlp_DID);
1575                 ret = -EBUSY;
1576                 goto out_fail;
1577         }
1578 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1579         if (start) {
1580                 lpfc_ncmd->ts_cmd_start = start;
1581                 lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd;
1582         } else {
1583                 lpfc_ncmd->ts_cmd_start = 0;
1584         }
1585 #endif
1586
1587         /*
1588          * Store the data needed by the driver to issue, abort, and complete
1589          * an IO.
1590          * Do not let the IO hang out forever.  There is no midlayer issuing
1591          * an abort so inform the FW of the maximum IO pending time.
1592          */
1593         freqpriv->nvme_buf = lpfc_ncmd;
1594         lpfc_ncmd->nvmeCmd = pnvme_fcreq;
1595         lpfc_ncmd->ndlp = ndlp;
1596         lpfc_ncmd->qidx = lpfc_queue_info->qidx;
1597
1598         /*
1599          * Issue the IO on the WQ indicated by index in the hw_queue_handle.
1600          * This identfier was create in our hardware queue create callback
1601          * routine. The driver now is dependent on the IO queue steering from
1602          * the transport.  We are trusting the upper NVME layers know which
1603          * index to use and that they have affinitized a CPU to this hardware
1604          * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ.
1605          */
1606         lpfc_ncmd->cur_iocbq.hba_wqidx = idx;
1607         cstat = &phba->sli4_hba.hdwq[idx].nvme_cstat;
1608
1609         lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp, cstat);
1610         ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
1611         if (ret) {
1612                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1613                                  "6175 Fail IO, Prep DMA: "
1614                                  "idx %d DID %x\n",
1615                                  lpfc_queue_info->index, ndlp->nlp_DID);
1616                 atomic_inc(&lport->xmt_fcp_err);
1617                 ret = -ENOMEM;
1618                 goto out_free_nvme_buf;
1619         }
1620
1621         lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
1622                          lpfc_ncmd->cur_iocbq.sli4_xritag,
1623                          lpfc_queue_info->index, ndlp->nlp_DID);
1624
1625         ret = lpfc_sli4_issue_wqe(phba, lpfc_ncmd->hdwq, &lpfc_ncmd->cur_iocbq);
1626         if (ret) {
1627                 atomic_inc(&lport->xmt_fcp_wqerr);
1628                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1629                                  "6113 Fail IO, Could not issue WQE err %x "
1630                                  "sid: x%x did: x%x oxid: x%x\n",
1631                                  ret, vport->fc_myDID, ndlp->nlp_DID,
1632                                  lpfc_ncmd->cur_iocbq.sli4_xritag);
1633                 goto out_free_nvme_buf;
1634         }
1635
1636         if (phba->cfg_xri_rebalancing)
1637                 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_ncmd->hdwq_no);
1638
1639 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1640         if (lpfc_ncmd->ts_cmd_start)
1641                 lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
1642
1643         if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
1644                 cpu = raw_smp_processor_id();
1645                 if (cpu < LPFC_CHECK_CPU_CNT) {
1646                         lpfc_ncmd->cpu = cpu;
1647                         if (idx != cpu)
1648                                 lpfc_printf_vlog(vport,
1649                                                  KERN_INFO, LOG_NVME_IOERR,
1650                                                 "6702 CPU Check cmd: "
1651                                                 "cpu %d wq %d\n",
1652                                                 lpfc_ncmd->cpu,
1653                                                 lpfc_queue_info->index);
1654                         phba->sli4_hba.hdwq[idx].cpucheck_xmt_io[cpu]++;
1655                 }
1656         }
1657 #endif
1658         return 0;
1659
1660  out_free_nvme_buf:
1661         if (lpfc_ncmd->nvmeCmd->sg_cnt) {
1662                 if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE)
1663                         cstat->output_requests--;
1664                 else
1665                         cstat->input_requests--;
1666         } else
1667                 cstat->control_requests--;
1668         lpfc_release_nvme_buf(phba, lpfc_ncmd);
1669  out_fail:
1670         return ret;
1671 }
1672
1673 /**
1674  * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request.
1675  * @phba: Pointer to HBA context object
1676  * @cmdiocb: Pointer to command iocb object.
1677  * @rspiocb: Pointer to response iocb object.
1678  *
1679  * This is the callback function for any NVME FCP IO that was aborted.
1680  *
1681  * Return value:
1682  *   None
1683  **/
1684 void
1685 lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1686                            struct lpfc_wcqe_complete *abts_cmpl)
1687 {
1688         lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1689                         "6145 ABORT_XRI_CN completing on rpi x%x "
1690                         "original iotag x%x, abort cmd iotag x%x "
1691                         "req_tag x%x, status x%x, hwstatus x%x\n",
1692                         cmdiocb->iocb.un.acxri.abortContextTag,
1693                         cmdiocb->iocb.un.acxri.abortIoTag,
1694                         cmdiocb->iotag,
1695                         bf_get(lpfc_wcqe_c_request_tag, abts_cmpl),
1696                         bf_get(lpfc_wcqe_c_status, abts_cmpl),
1697                         bf_get(lpfc_wcqe_c_hw_status, abts_cmpl));
1698         lpfc_sli_release_iocbq(phba, cmdiocb);
1699 }
1700
1701 /**
1702  * lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS
1703  * @lpfc_pnvme: Pointer to the driver's nvme instance data
1704  * @lpfc_nvme_lport: Pointer to the driver's local port data
1705  * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1706  * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1707  * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1708  *
1709  * Driver registers this routine as its nvme request io abort handler.  This
1710  * routine issues an fcp Abort WQE with data from the @lpfc_nvme_fcpreq
1711  * data structure to the rport indicated in @lpfc_nvme_rport.  This routine
1712  * is executed asynchronously - one the target is validated as "MAPPED" and
1713  * ready for IO, the driver issues the abort request and returns.
1714  *
1715  * Return value:
1716  *   None
1717  **/
1718 static void
1719 lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
1720                     struct nvme_fc_remote_port *pnvme_rport,
1721                     void *hw_queue_handle,
1722                     struct nvmefc_fcp_req *pnvme_fcreq)
1723 {
1724         struct lpfc_nvme_lport *lport;
1725         struct lpfc_vport *vport;
1726         struct lpfc_hba *phba;
1727         struct lpfc_io_buf *lpfc_nbuf;
1728         struct lpfc_iocbq *abts_buf;
1729         struct lpfc_iocbq *nvmereq_wqe;
1730         struct lpfc_nvme_fcpreq_priv *freqpriv;
1731         union lpfc_wqe128 *abts_wqe;
1732         unsigned long flags;
1733         int ret_val;
1734
1735         /* Validate pointers. LLDD fault handling with transport does
1736          * have timing races.
1737          */
1738         lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1739         if (unlikely(!lport))
1740                 return;
1741
1742         vport = lport->vport;
1743
1744         if (unlikely(!hw_queue_handle)) {
1745                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1746                                  "6129 Fail Abort, HW Queue Handle NULL.\n");
1747                 return;
1748         }
1749
1750         phba = vport->phba;
1751         freqpriv = pnvme_fcreq->private;
1752
1753         if (unlikely(!freqpriv))
1754                 return;
1755         if (vport->load_flag & FC_UNLOADING)
1756                 return;
1757
1758         /* Announce entry to new IO submit field. */
1759         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1760                          "6002 Abort Request to rport DID x%06x "
1761                          "for nvme_fc_req %p\n",
1762                          pnvme_rport->port_id,
1763                          pnvme_fcreq);
1764
1765         /* If the hba is getting reset, this flag is set.  It is
1766          * cleared when the reset is complete and rings reestablished.
1767          */
1768         spin_lock_irqsave(&phba->hbalock, flags);
1769         /* driver queued commands are in process of being flushed */
1770         if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
1771                 spin_unlock_irqrestore(&phba->hbalock, flags);
1772                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1773                                  "6139 Driver in reset cleanup - flushing "
1774                                  "NVME Req now.  hba_flag x%x\n",
1775                                  phba->hba_flag);
1776                 return;
1777         }
1778
1779         lpfc_nbuf = freqpriv->nvme_buf;
1780         if (!lpfc_nbuf) {
1781                 spin_unlock_irqrestore(&phba->hbalock, flags);
1782                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1783                                  "6140 NVME IO req has no matching lpfc nvme "
1784                                  "io buffer.  Skipping abort req.\n");
1785                 return;
1786         } else if (!lpfc_nbuf->nvmeCmd) {
1787                 spin_unlock_irqrestore(&phba->hbalock, flags);
1788                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1789                                  "6141 lpfc NVME IO req has no nvme_fcreq "
1790                                  "io buffer.  Skipping abort req.\n");
1791                 return;
1792         }
1793         nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
1794
1795         /* Guard against IO completion being called at same time */
1796         spin_lock(&lpfc_nbuf->buf_lock);
1797
1798         /*
1799          * The lpfc_nbuf and the mapped nvme_fcreq in the driver's
1800          * state must match the nvme_fcreq passed by the nvme
1801          * transport.  If they don't match, it is likely the driver
1802          * has already completed the NVME IO and the nvme transport
1803          * has not seen it yet.
1804          */
1805         if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
1806                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1807                                  "6143 NVME req mismatch: "
1808                                  "lpfc_nbuf %p nvmeCmd %p, "
1809                                  "pnvme_fcreq %p.  Skipping Abort xri x%x\n",
1810                                  lpfc_nbuf, lpfc_nbuf->nvmeCmd,
1811                                  pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1812                 goto out_unlock;
1813         }
1814
1815         /* Don't abort IOs no longer on the pending queue. */
1816         if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
1817                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1818                                  "6142 NVME IO req %p not queued - skipping "
1819                                  "abort req xri x%x\n",
1820                                  pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1821                 goto out_unlock;
1822         }
1823
1824         atomic_inc(&lport->xmt_fcp_abort);
1825         lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
1826                          nvmereq_wqe->sli4_xritag,
1827                          nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
1828
1829         /* Outstanding abort is in progress */
1830         if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
1831                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1832                                  "6144 Outstanding NVME I/O Abort Request "
1833                                  "still pending on nvme_fcreq %p, "
1834                                  "lpfc_ncmd %p xri x%x\n",
1835                                  pnvme_fcreq, lpfc_nbuf,
1836                                  nvmereq_wqe->sli4_xritag);
1837                 goto out_unlock;
1838         }
1839
1840         abts_buf = __lpfc_sli_get_iocbq(phba);
1841         if (!abts_buf) {
1842                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1843                                  "6136 No available abort wqes. Skipping "
1844                                  "Abts req for nvme_fcreq %p xri x%x\n",
1845                                  pnvme_fcreq, nvmereq_wqe->sli4_xritag);
1846                 goto out_unlock;
1847         }
1848
1849         /* Ready - mark outstanding as aborted by driver. */
1850         nvmereq_wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
1851
1852         /* Complete prepping the abort wqe and issue to the FW. */
1853         abts_wqe = &abts_buf->wqe;
1854
1855         /* WQEs are reused.  Clear stale data and set key fields to
1856          * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
1857          */
1858         memset(abts_wqe, 0, sizeof(union lpfc_wqe));
1859         bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
1860
1861         /* word 7 */
1862         bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
1863         bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
1864                nvmereq_wqe->iocb.ulpClass);
1865
1866         /* word 8 - tell the FW to abort the IO associated with this
1867          * outstanding exchange ID.
1868          */
1869         abts_wqe->abort_cmd.wqe_com.abort_tag = nvmereq_wqe->sli4_xritag;
1870
1871         /* word 9 - this is the iotag for the abts_wqe completion. */
1872         bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
1873                abts_buf->iotag);
1874
1875         /* word 10 */
1876         bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
1877         bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
1878
1879         /* word 11 */
1880         bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
1881         bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
1882         bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
1883
1884         /* ABTS WQE must go to the same WQ as the WQE to be aborted */
1885         abts_buf->iocb_flag |= LPFC_IO_NVME;
1886         abts_buf->hba_wqidx = nvmereq_wqe->hba_wqidx;
1887         abts_buf->vport = vport;
1888         abts_buf->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
1889         ret_val = lpfc_sli4_issue_wqe(phba, lpfc_nbuf->hdwq, abts_buf);
1890         spin_unlock(&lpfc_nbuf->buf_lock);
1891         spin_unlock_irqrestore(&phba->hbalock, flags);
1892         if (ret_val) {
1893                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1894                                  "6137 Failed abts issue_wqe with status x%x "
1895                                  "for nvme_fcreq %p.\n",
1896                                  ret_val, pnvme_fcreq);
1897                 lpfc_sli_release_iocbq(phba, abts_buf);
1898                 return;
1899         }
1900
1901         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
1902                          "6138 Transport Abort NVME Request Issued for "
1903                          "ox_id x%x on reqtag x%x\n",
1904                          nvmereq_wqe->sli4_xritag,
1905                          abts_buf->iotag);
1906         return;
1907
1908 out_unlock:
1909         spin_unlock(&lpfc_nbuf->buf_lock);
1910         spin_unlock_irqrestore(&phba->hbalock, flags);
1911         return;
1912 }
1913
1914 /* Declare and initialization an instance of the FC NVME template. */
1915 static struct nvme_fc_port_template lpfc_nvme_template = {
1916         /* initiator-based functions */
1917         .localport_delete  = lpfc_nvme_localport_delete,
1918         .remoteport_delete = lpfc_nvme_remoteport_delete,
1919         .create_queue = lpfc_nvme_create_queue,
1920         .delete_queue = lpfc_nvme_delete_queue,
1921         .ls_req       = lpfc_nvme_ls_req,
1922         .fcp_io       = lpfc_nvme_fcp_io_submit,
1923         .ls_abort     = lpfc_nvme_ls_abort,
1924         .fcp_abort    = lpfc_nvme_fcp_abort,
1925
1926         .max_hw_queues = 1,
1927         .max_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
1928         .max_dif_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
1929         .dma_boundary = 0xFFFFFFFF,
1930
1931         /* Sizes of additional private data for data structures.
1932          * No use for the last two sizes at this time.
1933          */
1934         .local_priv_sz = sizeof(struct lpfc_nvme_lport),
1935         .remote_priv_sz = sizeof(struct lpfc_nvme_rport),
1936         .lsrqst_priv_sz = 0,
1937         .fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv),
1938 };
1939
1940 /**
1941  * lpfc_get_nvme_buf - Get a nvme buffer from io_buf_list of the HBA
1942  * @phba: The HBA for which this call is being executed.
1943  *
1944  * This routine removes a nvme buffer from head of @hdwq io_buf_list
1945  * and returns to caller.
1946  *
1947  * Return codes:
1948  *   NULL - Error
1949  *   Pointer to lpfc_nvme_buf - Success
1950  **/
1951 static struct lpfc_io_buf *
1952 lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1953                   int idx, int expedite)
1954 {
1955         struct lpfc_io_buf *lpfc_ncmd;
1956         struct lpfc_sli4_hdw_queue *qp;
1957         struct sli4_sge *sgl;
1958         struct lpfc_iocbq *pwqeq;
1959         union lpfc_wqe128 *wqe;
1960
1961         lpfc_ncmd = lpfc_get_io_buf(phba, NULL, idx, expedite);
1962
1963         if (lpfc_ncmd) {
1964                 pwqeq = &(lpfc_ncmd->cur_iocbq);
1965                 wqe = &pwqeq->wqe;
1966
1967                 /* Setup key fields in buffer that may have been changed
1968                  * if other protocols used this buffer.
1969                  */
1970                 pwqeq->iocb_flag = LPFC_IO_NVME;
1971                 pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl;
1972                 lpfc_ncmd->start_time = jiffies;
1973                 lpfc_ncmd->flags = 0;
1974
1975                 /* Rsp SGE will be filled in when we rcv an IO
1976                  * from the NVME Layer to be sent.
1977                  * The cmd is going to be embedded so we need a SKIP SGE.
1978                  */
1979                 sgl = lpfc_ncmd->dma_sgl;
1980                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
1981                 bf_set(lpfc_sli4_sge_last, sgl, 0);
1982                 sgl->word2 = cpu_to_le32(sgl->word2);
1983                 /* Fill in word 3 / sgl_len during cmd submission */
1984
1985                 /* Initialize WQE */
1986                 memset(wqe, 0, sizeof(union lpfc_wqe));
1987
1988                 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
1989                         atomic_inc(&ndlp->cmd_pending);
1990                         lpfc_ncmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
1991                 }
1992
1993         } else {
1994                 qp = &phba->sli4_hba.hdwq[idx];
1995                 qp->empty_io_bufs++;
1996         }
1997
1998         return  lpfc_ncmd;
1999 }
2000
2001 /**
2002  * lpfc_release_nvme_buf: Return a nvme buffer back to hba nvme buf list.
2003  * @phba: The Hba for which this call is being executed.
2004  * @lpfc_ncmd: The nvme buffer which is being released.
2005  *
2006  * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba
2007  * lpfc_io_buf_list list. For SLI4 XRI's are tied to the nvme buffer
2008  * and cannot be reused for at least RA_TOV amount of time if it was
2009  * aborted.
2010  **/
2011 static void
2012 lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
2013 {
2014         struct lpfc_sli4_hdw_queue *qp;
2015         unsigned long iflag = 0;
2016
2017         if ((lpfc_ncmd->flags & LPFC_SBUF_BUMP_QDEPTH) && lpfc_ncmd->ndlp)
2018                 atomic_dec(&lpfc_ncmd->ndlp->cmd_pending);
2019
2020         lpfc_ncmd->ndlp = NULL;
2021         lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
2022
2023         qp = lpfc_ncmd->hdwq;
2024         if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
2025                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2026                                 "6310 XB release deferred for "
2027                                 "ox_id x%x on reqtag x%x\n",
2028                                 lpfc_ncmd->cur_iocbq.sli4_xritag,
2029                                 lpfc_ncmd->cur_iocbq.iotag);
2030
2031                 spin_lock_irqsave(&qp->abts_nvme_buf_list_lock, iflag);
2032                 list_add_tail(&lpfc_ncmd->list,
2033                         &qp->lpfc_abts_nvme_buf_list);
2034                 qp->abts_nvme_io_bufs++;
2035                 spin_unlock_irqrestore(&qp->abts_nvme_buf_list_lock, iflag);
2036         } else
2037                 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp);
2038 }
2039
2040 /**
2041  * lpfc_nvme_create_localport - Create/Bind an nvme localport instance.
2042  * @pvport - the lpfc_vport instance requesting a localport.
2043  *
2044  * This routine is invoked to create an nvme localport instance to bind
2045  * to the nvme_fc_transport.  It is called once during driver load
2046  * like lpfc_create_shost after all other services are initialized.
2047  * It requires a vport, vpi, and wwns at call time.  Other localport
2048  * parameters are modified as the driver's FCID and the Fabric WWN
2049  * are established.
2050  *
2051  * Return codes
2052  *      0 - successful
2053  *      -ENOMEM - no heap memory available
2054  *      other values - from nvme registration upcall
2055  **/
2056 int
2057 lpfc_nvme_create_localport(struct lpfc_vport *vport)
2058 {
2059         int ret = 0;
2060         struct lpfc_hba  *phba = vport->phba;
2061         struct nvme_fc_port_info nfcp_info;
2062         struct nvme_fc_local_port *localport;
2063         struct lpfc_nvme_lport *lport;
2064
2065         /* Initialize this localport instance.  The vport wwn usage ensures
2066          * that NPIV is accounted for.
2067          */
2068         memset(&nfcp_info, 0, sizeof(struct nvme_fc_port_info));
2069         nfcp_info.port_role = FC_PORT_ROLE_NVME_INITIATOR;
2070         nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
2071         nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
2072
2073         /* We need to tell the transport layer + 1 because it takes page
2074          * alignment into account. When space for the SGL is allocated we
2075          * allocate + 3, one for cmd, one for rsp and one for this alignment
2076          */
2077         lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
2078
2079         /* Advertise how many hw queues we support based on fcp_io_sched */
2080         if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ)
2081                 lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue;
2082         else
2083                 lpfc_nvme_template.max_hw_queues =
2084                         phba->sli4_hba.num_present_cpu;
2085
2086         if (!IS_ENABLED(CONFIG_NVME_FC))
2087                 return ret;
2088
2089         /* localport is allocated from the stack, but the registration
2090          * call allocates heap memory as well as the private area.
2091          */
2092
2093         ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
2094                                          &vport->phba->pcidev->dev, &localport);
2095         if (!ret) {
2096                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
2097                                  "6005 Successfully registered local "
2098                                  "NVME port num %d, localP %p, private %p, "
2099                                  "sg_seg %d\n",
2100                                  localport->port_num, localport,
2101                                  localport->private,
2102                                  lpfc_nvme_template.max_sgl_segments);
2103
2104                 /* Private is our lport size declared in the template. */
2105                 lport = (struct lpfc_nvme_lport *)localport->private;
2106                 vport->localport = localport;
2107                 lport->vport = vport;
2108                 vport->nvmei_support = 1;
2109
2110                 atomic_set(&lport->xmt_fcp_noxri, 0);
2111                 atomic_set(&lport->xmt_fcp_bad_ndlp, 0);
2112                 atomic_set(&lport->xmt_fcp_qdepth, 0);
2113                 atomic_set(&lport->xmt_fcp_err, 0);
2114                 atomic_set(&lport->xmt_fcp_wqerr, 0);
2115                 atomic_set(&lport->xmt_fcp_abort, 0);
2116                 atomic_set(&lport->xmt_ls_abort, 0);
2117                 atomic_set(&lport->xmt_ls_err, 0);
2118                 atomic_set(&lport->cmpl_fcp_xb, 0);
2119                 atomic_set(&lport->cmpl_fcp_err, 0);
2120                 atomic_set(&lport->cmpl_ls_xb, 0);
2121                 atomic_set(&lport->cmpl_ls_err, 0);
2122                 atomic_set(&lport->fc4NvmeLsRequests, 0);
2123                 atomic_set(&lport->fc4NvmeLsCmpls, 0);
2124         }
2125
2126         return ret;
2127 }
2128
2129 #if (IS_ENABLED(CONFIG_NVME_FC))
2130 /* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg.
2131  *
2132  * The driver has to wait for the host nvme transport to callback
2133  * indicating the localport has successfully unregistered all
2134  * resources.  Since this is an uninterruptible wait, loop every ten
2135  * seconds and print a message indicating no progress.
2136  *
2137  * An uninterruptible wait is used because of the risk of transport-to-
2138  * driver state mismatch.
2139  */
2140 static void
2141 lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
2142                            struct lpfc_nvme_lport *lport,
2143                            struct completion *lport_unreg_cmp)
2144 {
2145         u32 wait_tmo;
2146         int ret, i, pending = 0;
2147         struct lpfc_sli_ring  *pring;
2148         struct lpfc_hba  *phba = vport->phba;
2149
2150         /* Host transport has to clean up and confirm requiring an indefinite
2151          * wait. Print a message if a 10 second wait expires and renew the
2152          * wait. This is unexpected.
2153          */
2154         wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
2155         while (true) {
2156                 ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
2157                 if (unlikely(!ret)) {
2158                         pending = 0;
2159                         for (i = 0; i < phba->cfg_hdw_queue; i++) {
2160                                 pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
2161                                 if (!pring)
2162                                         continue;
2163                                 if (pring->txcmplq_cnt)
2164                                         pending += pring->txcmplq_cnt;
2165                         }
2166                         lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
2167                                          "6176 Lport %p Localport %p wait "
2168                                          "timed out. Pending %d. Renewing.\n",
2169                                          lport, vport->localport, pending);
2170                         continue;
2171                 }
2172                 break;
2173         }
2174         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
2175                          "6177 Lport %p Localport %p Complete Success\n",
2176                          lport, vport->localport);
2177 }
2178 #endif
2179
2180 /**
2181  * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
2182  * @pnvme: pointer to lpfc nvme data structure.
2183  *
2184  * This routine is invoked to destroy all lports bound to the phba.
2185  * The lport memory was allocated by the nvme fc transport and is
2186  * released there.  This routine ensures all rports bound to the
2187  * lport have been disconnected.
2188  *
2189  **/
2190 void
2191 lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2192 {
2193 #if (IS_ENABLED(CONFIG_NVME_FC))
2194         struct nvme_fc_local_port *localport;
2195         struct lpfc_nvme_lport *lport;
2196         int ret;
2197         DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp);
2198
2199         if (vport->nvmei_support == 0)
2200                 return;
2201
2202         localport = vport->localport;
2203         lport = (struct lpfc_nvme_lport *)localport->private;
2204
2205         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2206                          "6011 Destroying NVME localport %p\n",
2207                          localport);
2208
2209         /* lport's rport list is clear.  Unregister
2210          * lport and release resources.
2211          */
2212         lport->lport_unreg_cmp = &lport_unreg_cmp;
2213         ret = nvme_fc_unregister_localport(localport);
2214
2215         /* Wait for completion.  This either blocks
2216          * indefinitely or succeeds
2217          */
2218         lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp);
2219         vport->localport = NULL;
2220
2221         /* Regardless of the unregister upcall response, clear
2222          * nvmei_support.  All rports are unregistered and the
2223          * driver will clean up.
2224          */
2225         vport->nvmei_support = 0;
2226         if (ret == 0) {
2227                 lpfc_printf_vlog(vport,
2228                                  KERN_INFO, LOG_NVME_DISC,
2229                                  "6009 Unregistered lport Success\n");
2230         } else {
2231                 lpfc_printf_vlog(vport,
2232                                  KERN_INFO, LOG_NVME_DISC,
2233                                  "6010 Unregistered lport "
2234                                  "Failed, status x%x\n",
2235                                  ret);
2236         }
2237 #endif
2238 }
2239
2240 void
2241 lpfc_nvme_update_localport(struct lpfc_vport *vport)
2242 {
2243 #if (IS_ENABLED(CONFIG_NVME_FC))
2244         struct nvme_fc_local_port *localport;
2245         struct lpfc_nvme_lport *lport;
2246
2247         localport = vport->localport;
2248         if (!localport) {
2249                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2250                                  "6710 Update NVME fail. No localport\n");
2251                 return;
2252         }
2253         lport = (struct lpfc_nvme_lport *)localport->private;
2254         if (!lport) {
2255                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
2256                                  "6171 Update NVME fail. localP %p, No lport\n",
2257                                  localport);
2258                 return;
2259         }
2260         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2261                          "6012 Update NVME lport %p did x%x\n",
2262                          localport, vport->fc_myDID);
2263
2264         localport->port_id = vport->fc_myDID;
2265         if (localport->port_id == 0)
2266                 localport->port_role = FC_PORT_ROLE_NVME_DISCOVERY;
2267         else
2268                 localport->port_role = FC_PORT_ROLE_NVME_INITIATOR;
2269
2270         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2271                          "6030 bound lport %p to DID x%06x\n",
2272                          lport, localport->port_id);
2273 #endif
2274 }
2275
2276 int
2277 lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2278 {
2279 #if (IS_ENABLED(CONFIG_NVME_FC))
2280         int ret = 0;
2281         struct nvme_fc_local_port *localport;
2282         struct lpfc_nvme_lport *lport;
2283         struct lpfc_nvme_rport *rport;
2284         struct lpfc_nvme_rport *oldrport;
2285         struct nvme_fc_remote_port *remote_port;
2286         struct nvme_fc_port_info rpinfo;
2287         struct lpfc_nodelist *prev_ndlp = NULL;
2288
2289         lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
2290                          "6006 Register NVME PORT. DID x%06x nlptype x%x\n",
2291                          ndlp->nlp_DID, ndlp->nlp_type);
2292
2293         localport = vport->localport;
2294         if (!localport)
2295                 return 0;
2296
2297         lport = (struct lpfc_nvme_lport *)localport->private;
2298
2299         /* NVME rports are not preserved across devloss.
2300          * Just register this instance.  Note, rpinfo->dev_loss_tmo
2301          * is left 0 to indicate accept transport defaults.  The
2302          * driver communicates port role capabilities consistent
2303          * with the PRLI response data.
2304          */
2305         memset(&rpinfo, 0, sizeof(struct nvme_fc_port_info));
2306         rpinfo.port_id = ndlp->nlp_DID;
2307         if (ndlp->nlp_type & NLP_NVME_TARGET)
2308                 rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET;
2309         if (ndlp->nlp_type & NLP_NVME_INITIATOR)
2310                 rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
2311
2312         if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
2313                 rpinfo.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
2314
2315         rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
2316         rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
2317
2318         spin_lock_irq(&vport->phba->hbalock);
2319         oldrport = lpfc_ndlp_get_nrport(ndlp);
2320         spin_unlock_irq(&vport->phba->hbalock);
2321         if (!oldrport)
2322                 lpfc_nlp_get(ndlp);
2323
2324         ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
2325         if (!ret) {
2326                 /* If the ndlp already has an nrport, this is just
2327                  * a resume of the existing rport.  Else this is a
2328                  * new rport.
2329                  */
2330                 /* Guard against an unregister/reregister
2331                  * race that leaves the WAIT flag set.
2332                  */
2333                 spin_lock_irq(&vport->phba->hbalock);
2334                 ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
2335                 spin_unlock_irq(&vport->phba->hbalock);
2336                 rport = remote_port->private;
2337                 if (oldrport) {
2338                         /* New remoteport record does not guarantee valid
2339                          * host private memory area.
2340                          */
2341                         prev_ndlp = oldrport->ndlp;
2342                         if (oldrport == remote_port->private) {
2343                                 /* Same remoteport - ndlp should match.
2344                                  * Just reuse.
2345                                  */
2346                                 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
2347                                                  LOG_NVME_DISC,
2348                                                  "6014 Rebinding lport to "
2349                                                  "remoteport %p wwpn 0x%llx, "
2350                                                  "Data: x%x x%x %p %p x%x x%06x\n",
2351                                                  remote_port,
2352                                                  remote_port->port_name,
2353                                                  remote_port->port_id,
2354                                                  remote_port->port_role,
2355                                                  prev_ndlp,
2356                                                  ndlp,
2357                                                  ndlp->nlp_type,
2358                                                  ndlp->nlp_DID);
2359                                 return 0;
2360                         }
2361
2362                         /* Sever the ndlp<->rport association
2363                          * before dropping the ndlp ref from
2364                          * register.
2365                          */
2366                         spin_lock_irq(&vport->phba->hbalock);
2367                         ndlp->nrport = NULL;
2368                         ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
2369                         spin_unlock_irq(&vport->phba->hbalock);
2370                         rport->ndlp = NULL;
2371                         rport->remoteport = NULL;
2372
2373                         /* Reference only removed if previous NDLP is no longer
2374                          * active. It might be just a swap and removing the
2375                          * reference would cause a premature cleanup.
2376                          */
2377                         if (prev_ndlp && prev_ndlp != ndlp) {
2378                                 if ((!NLP_CHK_NODE_ACT(prev_ndlp)) ||
2379                                     (!prev_ndlp->nrport))
2380                                         lpfc_nlp_put(prev_ndlp);
2381                         }
2382                 }
2383
2384                 /* Clean bind the rport to the ndlp. */
2385                 rport->remoteport = remote_port;
2386                 rport->lport = lport;
2387                 rport->ndlp = ndlp;
2388                 spin_lock_irq(&vport->phba->hbalock);
2389                 ndlp->nrport = rport;
2390                 spin_unlock_irq(&vport->phba->hbalock);
2391                 lpfc_printf_vlog(vport, KERN_INFO,
2392                                  LOG_NVME_DISC | LOG_NODE,
2393                                  "6022 Binding new rport to "
2394                                  "lport %p Remoteport %p rport %p WWNN 0x%llx, "
2395                                  "Rport WWPN 0x%llx DID "
2396                                  "x%06x Role x%x, ndlp %p prev_ndlp %p\n",
2397                                  lport, remote_port, rport,
2398                                  rpinfo.node_name, rpinfo.port_name,
2399                                  rpinfo.port_id, rpinfo.port_role,
2400                                  ndlp, prev_ndlp);
2401         } else {
2402                 lpfc_printf_vlog(vport, KERN_ERR,
2403                                  LOG_NVME_DISC | LOG_NODE,
2404                                  "6031 RemotePort Registration failed "
2405                                  "err: %d, DID x%06x\n",
2406                                  ret, ndlp->nlp_DID);
2407         }
2408
2409         return ret;
2410 #else
2411         return 0;
2412 #endif
2413 }
2414
2415 /**
2416  * lpfc_nvme_rescan_port - Check to see if we should rescan this remoteport
2417  *
2418  * If the ndlp represents an NVME Target, that we are logged into,
2419  * ping the NVME FC Transport layer to initiate a device rescan
2420  * on this remote NPort.
2421  */
2422 void
2423 lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2424 {
2425 #if (IS_ENABLED(CONFIG_NVME_FC))
2426         struct lpfc_nvme_rport *rport;
2427         struct nvme_fc_remote_port *remoteport;
2428
2429         rport = ndlp->nrport;
2430
2431         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2432                          "6170 Rescan NPort DID x%06x type x%x "
2433                          "state x%x rport %p\n",
2434                          ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state, rport);
2435         if (!rport)
2436                 goto input_err;
2437         remoteport = rport->remoteport;
2438         if (!remoteport)
2439                 goto input_err;
2440
2441         /* Only rescan if we are an NVME target in the MAPPED state */
2442         if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY &&
2443             ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
2444                 nvme_fc_rescan_remoteport(remoteport);
2445
2446                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2447                                  "6172 NVME rescanned DID x%06x "
2448                                  "port_state x%x\n",
2449                                  ndlp->nlp_DID, remoteport->port_state);
2450         }
2451         return;
2452 input_err:
2453         lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2454                          "6169 State error: lport %p, rport%p FCID x%06x\n",
2455                          vport->localport, ndlp->rport, ndlp->nlp_DID);
2456 #endif
2457 }
2458
2459 /* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport.
2460  *
2461  * There is no notion of Devloss or rport recovery from the current
2462  * nvme_transport perspective.  Loss of an rport just means IO cannot
2463  * be sent and recovery is completely up to the initator.
2464  * For now, the driver just unbinds the DID and port_role so that
2465  * no further IO can be issued.  Changes are planned for later.
2466  *
2467  * Notes - the ndlp reference count is not decremented here since
2468  * since there is no nvme_transport api for devloss.  Node ref count
2469  * is only adjusted in driver unload.
2470  */
2471 void
2472 lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2473 {
2474 #if (IS_ENABLED(CONFIG_NVME_FC))
2475         int ret;
2476         struct nvme_fc_local_port *localport;
2477         struct lpfc_nvme_lport *lport;
2478         struct lpfc_nvme_rport *rport;
2479         struct nvme_fc_remote_port *remoteport = NULL;
2480
2481         localport = vport->localport;
2482
2483         /* This is fundamental error.  The localport is always
2484          * available until driver unload.  Just exit.
2485          */
2486         if (!localport)
2487                 return;
2488
2489         lport = (struct lpfc_nvme_lport *)localport->private;
2490         if (!lport)
2491                 goto input_err;
2492
2493         spin_lock_irq(&vport->phba->hbalock);
2494         rport = lpfc_ndlp_get_nrport(ndlp);
2495         if (rport)
2496                 remoteport = rport->remoteport;
2497         spin_unlock_irq(&vport->phba->hbalock);
2498         if (!remoteport)
2499                 goto input_err;
2500
2501         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2502                          "6033 Unreg nvme remoteport %p, portname x%llx, "
2503                          "port_id x%06x, portstate x%x port type x%x\n",
2504                          remoteport, remoteport->port_name,
2505                          remoteport->port_id, remoteport->port_state,
2506                          ndlp->nlp_type);
2507
2508         /* Sanity check ndlp type.  Only call for NVME ports. Don't
2509          * clear any rport state until the transport calls back.
2510          */
2511
2512         if (ndlp->nlp_type & NLP_NVME_TARGET) {
2513                 /* No concern about the role change on the nvme remoteport.
2514                  * The transport will update it.
2515                  */
2516                 ndlp->upcall_flags |= NLP_WAIT_FOR_UNREG;
2517
2518                 /* Don't let the host nvme transport keep sending keep-alives
2519                  * on this remoteport. Vport is unloading, no recovery. The
2520                  * return values is ignored.  The upcall is a courtesy to the
2521                  * transport.
2522                  */
2523                 if (vport->load_flag & FC_UNLOADING)
2524                         (void)nvme_fc_set_remoteport_devloss(remoteport, 0);
2525
2526                 ret = nvme_fc_unregister_remoteport(remoteport);
2527                 if (ret != 0) {
2528                         lpfc_nlp_put(ndlp);
2529                         lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2530                                          "6167 NVME unregister failed %d "
2531                                          "port_state x%x\n",
2532                                          ret, remoteport->port_state);
2533                 }
2534         }
2535         return;
2536
2537  input_err:
2538 #endif
2539         lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2540                          "6168 State error: lport %p, rport%p FCID x%06x\n",
2541                          vport->localport, ndlp->rport, ndlp->nlp_DID);
2542 }
2543
2544 /**
2545  * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
2546  * @phba: pointer to lpfc hba data structure.
2547  * @axri: pointer to the fcp xri abort wcqe structure.
2548  *
2549  * This routine is invoked by the worker thread to process a SLI4 fast-path
2550  * NVME aborted xri.  Aborted NVME IO commands are completed to the transport
2551  * here.
2552  **/
2553 void
2554 lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
2555                            struct sli4_wcqe_xri_aborted *axri, int idx)
2556 {
2557         uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
2558         struct lpfc_io_buf *lpfc_ncmd, *next_lpfc_ncmd;
2559         struct nvmefc_fcp_req *nvme_cmd = NULL;
2560         struct lpfc_nodelist *ndlp;
2561         struct lpfc_sli4_hdw_queue *qp;
2562         unsigned long iflag = 0;
2563
2564         if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
2565                 return;
2566         qp = &phba->sli4_hba.hdwq[idx];
2567         spin_lock_irqsave(&phba->hbalock, iflag);
2568         spin_lock(&qp->abts_nvme_buf_list_lock);
2569         list_for_each_entry_safe(lpfc_ncmd, next_lpfc_ncmd,
2570                                  &qp->lpfc_abts_nvme_buf_list, list) {
2571                 if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) {
2572                         list_del_init(&lpfc_ncmd->list);
2573                         qp->abts_nvme_io_bufs--;
2574                         lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
2575                         lpfc_ncmd->status = IOSTAT_SUCCESS;
2576                         spin_unlock(&qp->abts_nvme_buf_list_lock);
2577
2578                         spin_unlock_irqrestore(&phba->hbalock, iflag);
2579                         ndlp = lpfc_ncmd->ndlp;
2580                         if (ndlp)
2581                                 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
2582
2583                         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2584                                         "6311 nvme_cmd %p xri x%x tag x%x "
2585                                         "abort complete and xri released\n",
2586                                         lpfc_ncmd->nvmeCmd, xri,
2587                                         lpfc_ncmd->cur_iocbq.iotag);
2588
2589                         /* Aborted NVME commands are required to not complete
2590                          * before the abort exchange command fully completes.
2591                          * Once completed, it is available via the put list.
2592                          */
2593                         if (lpfc_ncmd->nvmeCmd) {
2594                                 nvme_cmd = lpfc_ncmd->nvmeCmd;
2595                                 nvme_cmd->done(nvme_cmd);
2596                                 lpfc_ncmd->nvmeCmd = NULL;
2597                         }
2598                         lpfc_release_nvme_buf(phba, lpfc_ncmd);
2599                         return;
2600                 }
2601         }
2602         spin_unlock(&qp->abts_nvme_buf_list_lock);
2603         spin_unlock_irqrestore(&phba->hbalock, iflag);
2604
2605         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2606                         "6312 XRI Aborted xri x%x not found\n", xri);
2607
2608 }
2609
2610 /**
2611  * lpfc_nvme_wait_for_io_drain - Wait for all NVME wqes to complete
2612  * @phba: Pointer to HBA context object.
2613  *
2614  * This function flushes all wqes in the nvme rings and frees all resources
2615  * in the txcmplq. This function does not issue abort wqes for the IO
2616  * commands in txcmplq, they will just be returned with
2617  * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
2618  * slot has been permanently disabled.
2619  **/
2620 void
2621 lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
2622 {
2623         struct lpfc_sli_ring  *pring;
2624         u32 i, wait_cnt = 0;
2625
2626         if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq)
2627                 return;
2628
2629         /* Cycle through all NVME rings and make sure all outstanding
2630          * WQEs have been removed from the txcmplqs.
2631          */
2632         for (i = 0; i < phba->cfg_hdw_queue; i++) {
2633                 if (!phba->sli4_hba.hdwq[i].nvme_wq)
2634                         continue;
2635                 pring = phba->sli4_hba.hdwq[i].nvme_wq->pring;
2636
2637                 if (!pring)
2638                         continue;
2639
2640                 /* Retrieve everything on the txcmplq */
2641                 while (!list_empty(&pring->txcmplq)) {
2642                         msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
2643                         wait_cnt++;
2644
2645                         /* The sleep is 10mS.  Every ten seconds,
2646                          * dump a message.  Something is wrong.
2647                          */
2648                         if ((wait_cnt % 1000) == 0) {
2649                                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2650                                                 "6178 NVME IO not empty, "
2651                                                 "cnt %d\n", wait_cnt);
2652                         }
2653                 }
2654         }
2655 }