Linux-libre 5.3.12-gnu
[librecmc/linux-libre.git] / drivers / infiniband / hw / qib / qib_rc.c
1 /*
2  * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <linux/io.h>
35
36 #include "qib.h"
37
38 /* cut down ridiculously long IB macro names */
39 #define OP(x) IB_OPCODE_RC_##x
40
41
42 static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
43                        u32 psn, u32 pmtu)
44 {
45         u32 len;
46
47         len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
48         return rvt_restart_sge(ss, wqe, len);
49 }
50
51 /**
52  * qib_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
53  * @dev: the device for this QP
54  * @qp: a pointer to the QP
55  * @ohdr: a pointer to the IB header being constructed
56  * @pmtu: the path MTU
57  *
58  * Return 1 if constructed; otherwise, return 0.
59  * Note that we are in the responder's side of the QP context.
60  * Note the QP s_lock must be held.
61  */
62 static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp,
63                            struct ib_other_headers *ohdr, u32 pmtu)
64 {
65         struct rvt_ack_entry *e;
66         u32 hwords;
67         u32 len;
68         u32 bth0;
69         u32 bth2;
70
71         /* Don't send an ACK if we aren't supposed to. */
72         if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
73                 goto bail;
74
75         /* header size in 32-bit words LRH+BTH = (8+12)/4. */
76         hwords = 5;
77
78         switch (qp->s_ack_state) {
79         case OP(RDMA_READ_RESPONSE_LAST):
80         case OP(RDMA_READ_RESPONSE_ONLY):
81                 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
82                 if (e->rdma_sge.mr) {
83                         rvt_put_mr(e->rdma_sge.mr);
84                         e->rdma_sge.mr = NULL;
85                 }
86                 /* FALLTHROUGH */
87         case OP(ATOMIC_ACKNOWLEDGE):
88                 /*
89                  * We can increment the tail pointer now that the last
90                  * response has been sent instead of only being
91                  * constructed.
92                  */
93                 if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC)
94                         qp->s_tail_ack_queue = 0;
95                 /* FALLTHROUGH */
96         case OP(SEND_ONLY):
97         case OP(ACKNOWLEDGE):
98                 /* Check for no next entry in the queue. */
99                 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
100                         if (qp->s_flags & RVT_S_ACK_PENDING)
101                                 goto normal;
102                         goto bail;
103                 }
104
105                 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
106                 if (e->opcode == OP(RDMA_READ_REQUEST)) {
107                         /*
108                          * If a RDMA read response is being resent and
109                          * we haven't seen the duplicate request yet,
110                          * then stop sending the remaining responses the
111                          * responder has seen until the requester resends it.
112                          */
113                         len = e->rdma_sge.sge_length;
114                         if (len && !e->rdma_sge.mr) {
115                                 qp->s_tail_ack_queue = qp->r_head_ack_queue;
116                                 goto bail;
117                         }
118                         /* Copy SGE state in case we need to resend */
119                         qp->s_rdma_mr = e->rdma_sge.mr;
120                         if (qp->s_rdma_mr)
121                                 rvt_get_mr(qp->s_rdma_mr);
122                         qp->s_ack_rdma_sge.sge = e->rdma_sge;
123                         qp->s_ack_rdma_sge.num_sge = 1;
124                         qp->s_cur_sge = &qp->s_ack_rdma_sge;
125                         if (len > pmtu) {
126                                 len = pmtu;
127                                 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
128                         } else {
129                                 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
130                                 e->sent = 1;
131                         }
132                         ohdr->u.aeth = rvt_compute_aeth(qp);
133                         hwords++;
134                         qp->s_ack_rdma_psn = e->psn;
135                         bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
136                 } else {
137                         /* COMPARE_SWAP or FETCH_ADD */
138                         qp->s_cur_sge = NULL;
139                         len = 0;
140                         qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
141                         ohdr->u.at.aeth = rvt_compute_aeth(qp);
142                         ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth);
143                         hwords += sizeof(ohdr->u.at) / sizeof(u32);
144                         bth2 = e->psn & QIB_PSN_MASK;
145                         e->sent = 1;
146                 }
147                 bth0 = qp->s_ack_state << 24;
148                 break;
149
150         case OP(RDMA_READ_RESPONSE_FIRST):
151                 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
152                 /* FALLTHROUGH */
153         case OP(RDMA_READ_RESPONSE_MIDDLE):
154                 qp->s_cur_sge = &qp->s_ack_rdma_sge;
155                 qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr;
156                 if (qp->s_rdma_mr)
157                         rvt_get_mr(qp->s_rdma_mr);
158                 len = qp->s_ack_rdma_sge.sge.sge_length;
159                 if (len > pmtu)
160                         len = pmtu;
161                 else {
162                         ohdr->u.aeth = rvt_compute_aeth(qp);
163                         hwords++;
164                         qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
165                         e = &qp->s_ack_queue[qp->s_tail_ack_queue];
166                         e->sent = 1;
167                 }
168                 bth0 = qp->s_ack_state << 24;
169                 bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
170                 break;
171
172         default:
173 normal:
174                 /*
175                  * Send a regular ACK.
176                  * Set the s_ack_state so we wait until after sending
177                  * the ACK before setting s_ack_state to ACKNOWLEDGE
178                  * (see above).
179                  */
180                 qp->s_ack_state = OP(SEND_ONLY);
181                 qp->s_flags &= ~RVT_S_ACK_PENDING;
182                 qp->s_cur_sge = NULL;
183                 if (qp->s_nak_state)
184                         ohdr->u.aeth =
185                                 cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
186                                             (qp->s_nak_state <<
187                                              IB_AETH_CREDIT_SHIFT));
188                 else
189                         ohdr->u.aeth = rvt_compute_aeth(qp);
190                 hwords++;
191                 len = 0;
192                 bth0 = OP(ACKNOWLEDGE) << 24;
193                 bth2 = qp->s_ack_psn & QIB_PSN_MASK;
194         }
195         qp->s_rdma_ack_cnt++;
196         qp->s_hdrwords = hwords;
197         qp->s_cur_size = len;
198         qib_make_ruc_header(qp, ohdr, bth0, bth2);
199         return 1;
200
201 bail:
202         qp->s_ack_state = OP(ACKNOWLEDGE);
203         qp->s_flags &= ~(RVT_S_RESP_PENDING | RVT_S_ACK_PENDING);
204         return 0;
205 }
206
207 /**
208  * qib_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
209  * @qp: a pointer to the QP
210  *
211  * Assumes the s_lock is held.
212  *
213  * Return 1 if constructed; otherwise, return 0.
214  */
215 int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
216 {
217         struct qib_qp_priv *priv = qp->priv;
218         struct qib_ibdev *dev = to_idev(qp->ibqp.device);
219         struct ib_other_headers *ohdr;
220         struct rvt_sge_state *ss;
221         struct rvt_swqe *wqe;
222         u32 hwords;
223         u32 len;
224         u32 bth0;
225         u32 bth2;
226         u32 pmtu = qp->pmtu;
227         char newreq;
228         int ret = 0;
229         int delta;
230
231         ohdr = &priv->s_hdr->u.oth;
232         if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)
233                 ohdr = &priv->s_hdr->u.l.oth;
234
235         /* Sending responses has higher priority over sending requests. */
236         if ((qp->s_flags & RVT_S_RESP_PENDING) &&
237             qib_make_rc_ack(dev, qp, ohdr, pmtu))
238                 goto done;
239
240         if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
241                 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
242                         goto bail;
243                 /* We are in the error state, flush the work request. */
244                 if (qp->s_last == READ_ONCE(qp->s_head))
245                         goto bail;
246                 /* If DMAs are in progress, we can't flush immediately. */
247                 if (atomic_read(&priv->s_dma_busy)) {
248                         qp->s_flags |= RVT_S_WAIT_DMA;
249                         goto bail;
250                 }
251                 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
252                 rvt_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
253                         IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
254                 /* will get called again */
255                 goto done;
256         }
257
258         if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK))
259                 goto bail;
260
261         if (qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) {
262                 if (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
263                         qp->s_flags |= RVT_S_WAIT_PSN;
264                         goto bail;
265                 }
266                 qp->s_sending_psn = qp->s_psn;
267                 qp->s_sending_hpsn = qp->s_psn - 1;
268         }
269
270         /* header size in 32-bit words LRH+BTH = (8+12)/4. */
271         hwords = 5;
272         bth0 = 0;
273
274         /* Send a request. */
275         wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
276         switch (qp->s_state) {
277         default:
278                 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK))
279                         goto bail;
280                 /*
281                  * Resend an old request or start a new one.
282                  *
283                  * We keep track of the current SWQE so that
284                  * we don't reset the "furthest progress" state
285                  * if we need to back up.
286                  */
287                 newreq = 0;
288                 if (qp->s_cur == qp->s_tail) {
289                         /* Check if send work queue is empty. */
290                         if (qp->s_tail == READ_ONCE(qp->s_head))
291                                 goto bail;
292                         /*
293                          * If a fence is requested, wait for previous
294                          * RDMA read and atomic operations to finish.
295                          */
296                         if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
297                             qp->s_num_rd_atomic) {
298                                 qp->s_flags |= RVT_S_WAIT_FENCE;
299                                 goto bail;
300                         }
301                         newreq = 1;
302                         qp->s_psn = wqe->psn;
303                 }
304                 /*
305                  * Note that we have to be careful not to modify the
306                  * original work request since we may need to resend
307                  * it.
308                  */
309                 len = wqe->length;
310                 ss = &qp->s_sge;
311                 bth2 = qp->s_psn & QIB_PSN_MASK;
312                 switch (wqe->wr.opcode) {
313                 case IB_WR_SEND:
314                 case IB_WR_SEND_WITH_IMM:
315                         /* If no credit, return. */
316                         if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
317                             rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
318                                 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
319                                 goto bail;
320                         }
321                         if (len > pmtu) {
322                                 qp->s_state = OP(SEND_FIRST);
323                                 len = pmtu;
324                                 break;
325                         }
326                         if (wqe->wr.opcode == IB_WR_SEND)
327                                 qp->s_state = OP(SEND_ONLY);
328                         else {
329                                 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
330                                 /* Immediate data comes after the BTH */
331                                 ohdr->u.imm_data = wqe->wr.ex.imm_data;
332                                 hwords += 1;
333                         }
334                         if (wqe->wr.send_flags & IB_SEND_SOLICITED)
335                                 bth0 |= IB_BTH_SOLICITED;
336                         bth2 |= IB_BTH_REQ_ACK;
337                         if (++qp->s_cur == qp->s_size)
338                                 qp->s_cur = 0;
339                         break;
340
341                 case IB_WR_RDMA_WRITE:
342                         if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
343                                 qp->s_lsn++;
344                         goto no_flow_control;
345                 case IB_WR_RDMA_WRITE_WITH_IMM:
346                         /* If no credit, return. */
347                         if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
348                             rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
349                                 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
350                                 goto bail;
351                         }
352 no_flow_control:
353                         ohdr->u.rc.reth.vaddr =
354                                 cpu_to_be64(wqe->rdma_wr.remote_addr);
355                         ohdr->u.rc.reth.rkey =
356                                 cpu_to_be32(wqe->rdma_wr.rkey);
357                         ohdr->u.rc.reth.length = cpu_to_be32(len);
358                         hwords += sizeof(struct ib_reth) / sizeof(u32);
359                         if (len > pmtu) {
360                                 qp->s_state = OP(RDMA_WRITE_FIRST);
361                                 len = pmtu;
362                                 break;
363                         }
364                         if (wqe->rdma_wr.wr.opcode == IB_WR_RDMA_WRITE)
365                                 qp->s_state = OP(RDMA_WRITE_ONLY);
366                         else {
367                                 qp->s_state = OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
368                                 /* Immediate data comes after RETH */
369                                 ohdr->u.rc.imm_data =
370                                         wqe->rdma_wr.wr.ex.imm_data;
371                                 hwords += 1;
372                                 if (wqe->rdma_wr.wr.send_flags & IB_SEND_SOLICITED)
373                                         bth0 |= IB_BTH_SOLICITED;
374                         }
375                         bth2 |= IB_BTH_REQ_ACK;
376                         if (++qp->s_cur == qp->s_size)
377                                 qp->s_cur = 0;
378                         break;
379
380                 case IB_WR_RDMA_READ:
381                         /*
382                          * Don't allow more operations to be started
383                          * than the QP limits allow.
384                          */
385                         if (newreq) {
386                                 if (qp->s_num_rd_atomic >=
387                                     qp->s_max_rd_atomic) {
388                                         qp->s_flags |= RVT_S_WAIT_RDMAR;
389                                         goto bail;
390                                 }
391                                 qp->s_num_rd_atomic++;
392                                 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
393                                         qp->s_lsn++;
394                         }
395
396                         ohdr->u.rc.reth.vaddr =
397                                 cpu_to_be64(wqe->rdma_wr.remote_addr);
398                         ohdr->u.rc.reth.rkey =
399                                 cpu_to_be32(wqe->rdma_wr.rkey);
400                         ohdr->u.rc.reth.length = cpu_to_be32(len);
401                         qp->s_state = OP(RDMA_READ_REQUEST);
402                         hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
403                         ss = NULL;
404                         len = 0;
405                         bth2 |= IB_BTH_REQ_ACK;
406                         if (++qp->s_cur == qp->s_size)
407                                 qp->s_cur = 0;
408                         break;
409
410                 case IB_WR_ATOMIC_CMP_AND_SWP:
411                 case IB_WR_ATOMIC_FETCH_AND_ADD:
412                         /*
413                          * Don't allow more operations to be started
414                          * than the QP limits allow.
415                          */
416                         if (newreq) {
417                                 if (qp->s_num_rd_atomic >=
418                                     qp->s_max_rd_atomic) {
419                                         qp->s_flags |= RVT_S_WAIT_RDMAR;
420                                         goto bail;
421                                 }
422                                 qp->s_num_rd_atomic++;
423                                 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
424                                         qp->s_lsn++;
425                         }
426                         if (wqe->atomic_wr.wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
427                                 qp->s_state = OP(COMPARE_SWAP);
428                                 put_ib_ateth_swap(wqe->atomic_wr.swap,
429                                                   &ohdr->u.atomic_eth);
430                                 put_ib_ateth_compare(wqe->atomic_wr.compare_add,
431                                                      &ohdr->u.atomic_eth);
432                         } else {
433                                 qp->s_state = OP(FETCH_ADD);
434                                 put_ib_ateth_swap(wqe->atomic_wr.compare_add,
435                                                   &ohdr->u.atomic_eth);
436                                 put_ib_ateth_compare(0, &ohdr->u.atomic_eth);
437                         }
438                         put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr,
439                                            &ohdr->u.atomic_eth);
440                         ohdr->u.atomic_eth.rkey = cpu_to_be32(
441                                 wqe->atomic_wr.rkey);
442                         hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
443                         ss = NULL;
444                         len = 0;
445                         bth2 |= IB_BTH_REQ_ACK;
446                         if (++qp->s_cur == qp->s_size)
447                                 qp->s_cur = 0;
448                         break;
449
450                 default:
451                         goto bail;
452                 }
453                 qp->s_sge.sge = wqe->sg_list[0];
454                 qp->s_sge.sg_list = wqe->sg_list + 1;
455                 qp->s_sge.num_sge = wqe->wr.num_sge;
456                 qp->s_sge.total_len = wqe->length;
457                 qp->s_len = wqe->length;
458                 if (newreq) {
459                         qp->s_tail++;
460                         if (qp->s_tail >= qp->s_size)
461                                 qp->s_tail = 0;
462                 }
463                 if (wqe->wr.opcode == IB_WR_RDMA_READ)
464                         qp->s_psn = wqe->lpsn + 1;
465                 else
466                         qp->s_psn++;
467                 break;
468
469         case OP(RDMA_READ_RESPONSE_FIRST):
470                 /*
471                  * qp->s_state is normally set to the opcode of the
472                  * last packet constructed for new requests and therefore
473                  * is never set to RDMA read response.
474                  * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
475                  * thread to indicate a SEND needs to be restarted from an
476                  * earlier PSN without interferring with the sending thread.
477                  * See qib_restart_rc().
478                  */
479                 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
480                 /* FALLTHROUGH */
481         case OP(SEND_FIRST):
482                 qp->s_state = OP(SEND_MIDDLE);
483                 /* FALLTHROUGH */
484         case OP(SEND_MIDDLE):
485                 bth2 = qp->s_psn++ & QIB_PSN_MASK;
486                 ss = &qp->s_sge;
487                 len = qp->s_len;
488                 if (len > pmtu) {
489                         len = pmtu;
490                         break;
491                 }
492                 if (wqe->wr.opcode == IB_WR_SEND)
493                         qp->s_state = OP(SEND_LAST);
494                 else {
495                         qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
496                         /* Immediate data comes after the BTH */
497                         ohdr->u.imm_data = wqe->wr.ex.imm_data;
498                         hwords += 1;
499                 }
500                 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
501                         bth0 |= IB_BTH_SOLICITED;
502                 bth2 |= IB_BTH_REQ_ACK;
503                 qp->s_cur++;
504                 if (qp->s_cur >= qp->s_size)
505                         qp->s_cur = 0;
506                 break;
507
508         case OP(RDMA_READ_RESPONSE_LAST):
509                 /*
510                  * qp->s_state is normally set to the opcode of the
511                  * last packet constructed for new requests and therefore
512                  * is never set to RDMA read response.
513                  * RDMA_READ_RESPONSE_LAST is used by the ACK processing
514                  * thread to indicate a RDMA write needs to be restarted from
515                  * an earlier PSN without interferring with the sending thread.
516                  * See qib_restart_rc().
517                  */
518                 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
519                 /* FALLTHROUGH */
520         case OP(RDMA_WRITE_FIRST):
521                 qp->s_state = OP(RDMA_WRITE_MIDDLE);
522                 /* FALLTHROUGH */
523         case OP(RDMA_WRITE_MIDDLE):
524                 bth2 = qp->s_psn++ & QIB_PSN_MASK;
525                 ss = &qp->s_sge;
526                 len = qp->s_len;
527                 if (len > pmtu) {
528                         len = pmtu;
529                         break;
530                 }
531                 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
532                         qp->s_state = OP(RDMA_WRITE_LAST);
533                 else {
534                         qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
535                         /* Immediate data comes after the BTH */
536                         ohdr->u.imm_data = wqe->wr.ex.imm_data;
537                         hwords += 1;
538                         if (wqe->wr.send_flags & IB_SEND_SOLICITED)
539                                 bth0 |= IB_BTH_SOLICITED;
540                 }
541                 bth2 |= IB_BTH_REQ_ACK;
542                 qp->s_cur++;
543                 if (qp->s_cur >= qp->s_size)
544                         qp->s_cur = 0;
545                 break;
546
547         case OP(RDMA_READ_RESPONSE_MIDDLE):
548                 /*
549                  * qp->s_state is normally set to the opcode of the
550                  * last packet constructed for new requests and therefore
551                  * is never set to RDMA read response.
552                  * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
553                  * thread to indicate a RDMA read needs to be restarted from
554                  * an earlier PSN without interferring with the sending thread.
555                  * See qib_restart_rc().
556                  */
557                 len = ((qp->s_psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
558                 ohdr->u.rc.reth.vaddr =
559                         cpu_to_be64(wqe->rdma_wr.remote_addr + len);
560                 ohdr->u.rc.reth.rkey =
561                         cpu_to_be32(wqe->rdma_wr.rkey);
562                 ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
563                 qp->s_state = OP(RDMA_READ_REQUEST);
564                 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
565                 bth2 = (qp->s_psn & QIB_PSN_MASK) | IB_BTH_REQ_ACK;
566                 qp->s_psn = wqe->lpsn + 1;
567                 ss = NULL;
568                 len = 0;
569                 qp->s_cur++;
570                 if (qp->s_cur == qp->s_size)
571                         qp->s_cur = 0;
572                 break;
573         }
574         qp->s_sending_hpsn = bth2;
575         delta = (((int) bth2 - (int) wqe->psn) << 8) >> 8;
576         if (delta && delta % QIB_PSN_CREDIT == 0)
577                 bth2 |= IB_BTH_REQ_ACK;
578         if (qp->s_flags & RVT_S_SEND_ONE) {
579                 qp->s_flags &= ~RVT_S_SEND_ONE;
580                 qp->s_flags |= RVT_S_WAIT_ACK;
581                 bth2 |= IB_BTH_REQ_ACK;
582         }
583         qp->s_len -= len;
584         qp->s_hdrwords = hwords;
585         qp->s_cur_sge = ss;
586         qp->s_cur_size = len;
587         qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), bth2);
588 done:
589         return 1;
590 bail:
591         qp->s_flags &= ~RVT_S_BUSY;
592         return ret;
593 }
594
595 /**
596  * qib_send_rc_ack - Construct an ACK packet and send it
597  * @qp: a pointer to the QP
598  *
599  * This is called from qib_rc_rcv() and qib_kreceive().
600  * Note that RDMA reads and atomics are handled in the
601  * send side QP state and tasklet.
602  */
603 void qib_send_rc_ack(struct rvt_qp *qp)
604 {
605         struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
606         struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
607         struct qib_pportdata *ppd = ppd_from_ibp(ibp);
608         u64 pbc;
609         u16 lrh0;
610         u32 bth0;
611         u32 hwords;
612         u32 pbufn;
613         u32 __iomem *piobuf;
614         struct ib_header hdr;
615         struct ib_other_headers *ohdr;
616         u32 control;
617         unsigned long flags;
618
619         spin_lock_irqsave(&qp->s_lock, flags);
620
621         if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
622                 goto unlock;
623
624         /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
625         if ((qp->s_flags & RVT_S_RESP_PENDING) || qp->s_rdma_ack_cnt)
626                 goto queue_ack;
627
628         /* Construct the header with s_lock held so APM doesn't change it. */
629         ohdr = &hdr.u.oth;
630         lrh0 = QIB_LRH_BTH;
631         /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
632         hwords = 6;
633         if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
634                      IB_AH_GRH)) {
635                 hwords += qib_make_grh(ibp, &hdr.u.l.grh,
636                                        rdma_ah_read_grh(&qp->remote_ah_attr),
637                                        hwords, 0);
638                 ohdr = &hdr.u.l.oth;
639                 lrh0 = QIB_LRH_GRH;
640         }
641         /* read pkey_index w/o lock (its atomic) */
642         bth0 = qib_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24);
643         if (qp->s_mig_state == IB_MIG_MIGRATED)
644                 bth0 |= IB_BTH_MIG_REQ;
645         if (qp->r_nak_state)
646                 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
647                                             (qp->r_nak_state <<
648                                              IB_AETH_CREDIT_SHIFT));
649         else
650                 ohdr->u.aeth = rvt_compute_aeth(qp);
651         lrh0 |= ibp->sl_to_vl[rdma_ah_get_sl(&qp->remote_ah_attr)] << 12 |
652                 rdma_ah_get_sl(&qp->remote_ah_attr) << 4;
653         hdr.lrh[0] = cpu_to_be16(lrh0);
654         hdr.lrh[1] = cpu_to_be16(rdma_ah_get_dlid(&qp->remote_ah_attr));
655         hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
656         hdr.lrh[3] = cpu_to_be16(ppd->lid |
657                                  rdma_ah_get_path_bits(&qp->remote_ah_attr));
658         ohdr->bth[0] = cpu_to_be32(bth0);
659         ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
660         ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & QIB_PSN_MASK);
661
662         spin_unlock_irqrestore(&qp->s_lock, flags);
663
664         /* Don't try to send ACKs if the link isn't ACTIVE */
665         if (!(ppd->lflags & QIBL_LINKACTIVE))
666                 goto done;
667
668         control = dd->f_setpbc_control(ppd, hwords + SIZE_OF_CRC,
669                                        qp->s_srate, lrh0 >> 12);
670         /* length is + 1 for the control dword */
671         pbc = ((u64) control << 32) | (hwords + 1);
672
673         piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
674         if (!piobuf) {
675                 /*
676                  * We are out of PIO buffers at the moment.
677                  * Pass responsibility for sending the ACK to the
678                  * send tasklet so that when a PIO buffer becomes
679                  * available, the ACK is sent ahead of other outgoing
680                  * packets.
681                  */
682                 spin_lock_irqsave(&qp->s_lock, flags);
683                 goto queue_ack;
684         }
685
686         /*
687          * Write the pbc.
688          * We have to flush after the PBC for correctness
689          * on some cpus or WC buffer can be written out of order.
690          */
691         writeq(pbc, piobuf);
692
693         if (dd->flags & QIB_PIO_FLUSH_WC) {
694                 u32 *hdrp = (u32 *) &hdr;
695
696                 qib_flush_wc();
697                 qib_pio_copy(piobuf + 2, hdrp, hwords - 1);
698                 qib_flush_wc();
699                 __raw_writel(hdrp[hwords - 1], piobuf + hwords + 1);
700         } else
701                 qib_pio_copy(piobuf + 2, (u32 *) &hdr, hwords);
702
703         if (dd->flags & QIB_USE_SPCL_TRIG) {
704                 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
705
706                 qib_flush_wc();
707                 __raw_writel(0xaebecede, piobuf + spcl_off);
708         }
709
710         qib_flush_wc();
711         qib_sendbuf_done(dd, pbufn);
712
713         this_cpu_inc(ibp->pmastats->n_unicast_xmit);
714         goto done;
715
716 queue_ack:
717         if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
718                 this_cpu_inc(*ibp->rvp.rc_qacks);
719                 qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
720                 qp->s_nak_state = qp->r_nak_state;
721                 qp->s_ack_psn = qp->r_ack_psn;
722
723                 /* Schedule the send tasklet. */
724                 qib_schedule_send(qp);
725         }
726 unlock:
727         spin_unlock_irqrestore(&qp->s_lock, flags);
728 done:
729         return;
730 }
731
732 /**
733  * reset_psn - reset the QP state to send starting from PSN
734  * @qp: the QP
735  * @psn: the packet sequence number to restart at
736  *
737  * This is called from qib_rc_rcv() to process an incoming RC ACK
738  * for the given QP.
739  * Called at interrupt level with the QP s_lock held.
740  */
741 static void reset_psn(struct rvt_qp *qp, u32 psn)
742 {
743         u32 n = qp->s_acked;
744         struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n);
745         u32 opcode;
746
747         qp->s_cur = n;
748
749         /*
750          * If we are starting the request from the beginning,
751          * let the normal send code handle initialization.
752          */
753         if (qib_cmp24(psn, wqe->psn) <= 0) {
754                 qp->s_state = OP(SEND_LAST);
755                 goto done;
756         }
757
758         /* Find the work request opcode corresponding to the given PSN. */
759         opcode = wqe->wr.opcode;
760         for (;;) {
761                 int diff;
762
763                 if (++n == qp->s_size)
764                         n = 0;
765                 if (n == qp->s_tail)
766                         break;
767                 wqe = rvt_get_swqe_ptr(qp, n);
768                 diff = qib_cmp24(psn, wqe->psn);
769                 if (diff < 0)
770                         break;
771                 qp->s_cur = n;
772                 /*
773                  * If we are starting the request from the beginning,
774                  * let the normal send code handle initialization.
775                  */
776                 if (diff == 0) {
777                         qp->s_state = OP(SEND_LAST);
778                         goto done;
779                 }
780                 opcode = wqe->wr.opcode;
781         }
782
783         /*
784          * Set the state to restart in the middle of a request.
785          * Don't change the s_sge, s_cur_sge, or s_cur_size.
786          * See qib_make_rc_req().
787          */
788         switch (opcode) {
789         case IB_WR_SEND:
790         case IB_WR_SEND_WITH_IMM:
791                 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
792                 break;
793
794         case IB_WR_RDMA_WRITE:
795         case IB_WR_RDMA_WRITE_WITH_IMM:
796                 qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
797                 break;
798
799         case IB_WR_RDMA_READ:
800                 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
801                 break;
802
803         default:
804                 /*
805                  * This case shouldn't happen since its only
806                  * one PSN per req.
807                  */
808                 qp->s_state = OP(SEND_LAST);
809         }
810 done:
811         qp->s_psn = psn;
812         /*
813          * Set RVT_S_WAIT_PSN as qib_rc_complete() may start the timer
814          * asynchronously before the send tasklet can get scheduled.
815          * Doing it in qib_make_rc_req() is too late.
816          */
817         if ((qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
818             (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
819                 qp->s_flags |= RVT_S_WAIT_PSN;
820 }
821
822 /*
823  * Back up requester to resend the last un-ACKed request.
824  * The QP r_lock and s_lock should be held and interrupts disabled.
825  */
826 void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
827 {
828         struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
829         struct qib_ibport *ibp;
830
831         if (qp->s_retry == 0) {
832                 if (qp->s_mig_state == IB_MIG_ARMED) {
833                         qib_migrate_qp(qp);
834                         qp->s_retry = qp->s_retry_cnt;
835                 } else if (qp->s_last == qp->s_acked) {
836                         rvt_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
837                         rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
838                         return;
839                 } else /* XXX need to handle delayed completion */
840                         return;
841         } else
842                 qp->s_retry--;
843
844         ibp = to_iport(qp->ibqp.device, qp->port_num);
845         if (wqe->wr.opcode == IB_WR_RDMA_READ)
846                 ibp->rvp.n_rc_resends++;
847         else
848                 ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
849
850         qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR |
851                          RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN |
852                          RVT_S_WAIT_ACK);
853         if (wait)
854                 qp->s_flags |= RVT_S_SEND_ONE;
855         reset_psn(qp, psn);
856 }
857
858 /*
859  * Set qp->s_sending_psn to the next PSN after the given one.
860  * This would be psn+1 except when RDMA reads are present.
861  */
862 static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
863 {
864         struct rvt_swqe *wqe;
865         u32 n = qp->s_last;
866
867         /* Find the work request corresponding to the given PSN. */
868         for (;;) {
869                 wqe = rvt_get_swqe_ptr(qp, n);
870                 if (qib_cmp24(psn, wqe->lpsn) <= 0) {
871                         if (wqe->wr.opcode == IB_WR_RDMA_READ)
872                                 qp->s_sending_psn = wqe->lpsn + 1;
873                         else
874                                 qp->s_sending_psn = psn + 1;
875                         break;
876                 }
877                 if (++n == qp->s_size)
878                         n = 0;
879                 if (n == qp->s_tail)
880                         break;
881         }
882 }
883
884 /*
885  * This should be called with the QP s_lock held and interrupts disabled.
886  */
887 void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
888 {
889         struct ib_other_headers *ohdr;
890         struct rvt_swqe *wqe;
891         u32 opcode;
892         u32 psn;
893
894         if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK))
895                 return;
896
897         /* Find out where the BTH is */
898         if ((be16_to_cpu(hdr->lrh[0]) & 3) == QIB_LRH_BTH)
899                 ohdr = &hdr->u.oth;
900         else
901                 ohdr = &hdr->u.l.oth;
902
903         opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
904         if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
905             opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
906                 WARN_ON(!qp->s_rdma_ack_cnt);
907                 qp->s_rdma_ack_cnt--;
908                 return;
909         }
910
911         psn = be32_to_cpu(ohdr->bth[2]);
912         reset_sending_psn(qp, psn);
913
914         /*
915          * Start timer after a packet requesting an ACK has been sent and
916          * there are still requests that haven't been acked.
917          */
918         if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
919             !(qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
920             (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
921                 rvt_add_retry_timer(qp);
922
923         while (qp->s_last != qp->s_acked) {
924                 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
925                 if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 &&
926                     qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
927                         break;
928                 rvt_qp_complete_swqe(qp,
929                                      wqe,
930                                      ib_qib_wc_opcode[wqe->wr.opcode],
931                                      IB_WC_SUCCESS);
932         }
933         /*
934          * If we were waiting for sends to complete before resending,
935          * and they are now complete, restart sending.
936          */
937         if (qp->s_flags & RVT_S_WAIT_PSN &&
938             qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
939                 qp->s_flags &= ~RVT_S_WAIT_PSN;
940                 qp->s_sending_psn = qp->s_psn;
941                 qp->s_sending_hpsn = qp->s_psn - 1;
942                 qib_schedule_send(qp);
943         }
944 }
945
946 static inline void update_last_psn(struct rvt_qp *qp, u32 psn)
947 {
948         qp->s_last_psn = psn;
949 }
950
951 /*
952  * Generate a SWQE completion.
953  * This is similar to qib_send_complete but has to check to be sure
954  * that the SGEs are not being referenced if the SWQE is being resent.
955  */
956 static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
957                                          struct rvt_swqe *wqe,
958                                          struct qib_ibport *ibp)
959 {
960         /*
961          * Don't decrement refcount and don't generate a
962          * completion if the SWQE is being resent until the send
963          * is finished.
964          */
965         if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 ||
966             qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0)
967                 rvt_qp_complete_swqe(qp,
968                                      wqe,
969                                      ib_qib_wc_opcode[wqe->wr.opcode],
970                                      IB_WC_SUCCESS);
971         else
972                 this_cpu_inc(*ibp->rvp.rc_delayed_comp);
973
974         qp->s_retry = qp->s_retry_cnt;
975         update_last_psn(qp, wqe->lpsn);
976
977         /*
978          * If we are completing a request which is in the process of
979          * being resent, we can stop resending it since we know the
980          * responder has already seen it.
981          */
982         if (qp->s_acked == qp->s_cur) {
983                 if (++qp->s_cur >= qp->s_size)
984                         qp->s_cur = 0;
985                 qp->s_acked = qp->s_cur;
986                 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
987                 if (qp->s_acked != qp->s_tail) {
988                         qp->s_state = OP(SEND_LAST);
989                         qp->s_psn = wqe->psn;
990                 }
991         } else {
992                 if (++qp->s_acked >= qp->s_size)
993                         qp->s_acked = 0;
994                 if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
995                         qp->s_draining = 0;
996                 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
997         }
998         return wqe;
999 }
1000
1001 /**
1002  * do_rc_ack - process an incoming RC ACK
1003  * @qp: the QP the ACK came in on
1004  * @psn: the packet sequence number of the ACK
1005  * @opcode: the opcode of the request that resulted in the ACK
1006  *
1007  * This is called from qib_rc_rcv_resp() to process an incoming RC ACK
1008  * for the given QP.
1009  * Called at interrupt level with the QP s_lock held.
1010  * Returns 1 if OK, 0 if current operation should be aborted (NAK).
1011  */
1012 static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
1013                      u64 val, struct qib_ctxtdata *rcd)
1014 {
1015         struct qib_ibport *ibp;
1016         enum ib_wc_status status;
1017         struct rvt_swqe *wqe;
1018         int ret = 0;
1019         u32 ack_psn;
1020         int diff;
1021
1022         /*
1023          * Note that NAKs implicitly ACK outstanding SEND and RDMA write
1024          * requests and implicitly NAK RDMA read and atomic requests issued
1025          * before the NAK'ed request.  The MSN won't include the NAK'ed
1026          * request but will include an ACK'ed request(s).
1027          */
1028         ack_psn = psn;
1029         if (aeth >> IB_AETH_NAK_SHIFT)
1030                 ack_psn--;
1031         wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1032         ibp = to_iport(qp->ibqp.device, qp->port_num);
1033
1034         /*
1035          * The MSN might be for a later WQE than the PSN indicates so
1036          * only complete WQEs that the PSN finishes.
1037          */
1038         while ((diff = qib_cmp24(ack_psn, wqe->lpsn)) >= 0) {
1039                 /*
1040                  * RDMA_READ_RESPONSE_ONLY is a special case since
1041                  * we want to generate completion events for everything
1042                  * before the RDMA read, copy the data, then generate
1043                  * the completion for the read.
1044                  */
1045                 if (wqe->wr.opcode == IB_WR_RDMA_READ &&
1046                     opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
1047                     diff == 0) {
1048                         ret = 1;
1049                         goto bail;
1050                 }
1051                 /*
1052                  * If this request is a RDMA read or atomic, and the ACK is
1053                  * for a later operation, this ACK NAKs the RDMA read or
1054                  * atomic.  In other words, only a RDMA_READ_LAST or ONLY
1055                  * can ACK a RDMA read and likewise for atomic ops.  Note
1056                  * that the NAK case can only happen if relaxed ordering is
1057                  * used and requests are sent after an RDMA read or atomic
1058                  * is sent but before the response is received.
1059                  */
1060                 if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
1061                      (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
1062                     ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1063                       wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
1064                      (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
1065                         /* Retry this request. */
1066                         if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
1067                                 qp->r_flags |= RVT_R_RDMAR_SEQ;
1068                                 qib_restart_rc(qp, qp->s_last_psn + 1, 0);
1069                                 if (list_empty(&qp->rspwait)) {
1070                                         qp->r_flags |= RVT_R_RSP_SEND;
1071                                         rvt_get_qp(qp);
1072                                         list_add_tail(&qp->rspwait,
1073                                                       &rcd->qp_wait_list);
1074                                 }
1075                         }
1076                         /*
1077                          * No need to process the ACK/NAK since we are
1078                          * restarting an earlier request.
1079                          */
1080                         goto bail;
1081                 }
1082                 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1083                     wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
1084                         u64 *vaddr = wqe->sg_list[0].vaddr;
1085                         *vaddr = val;
1086                 }
1087                 if (qp->s_num_rd_atomic &&
1088                     (wqe->wr.opcode == IB_WR_RDMA_READ ||
1089                      wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1090                      wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
1091                         qp->s_num_rd_atomic--;
1092                         /* Restart sending task if fence is complete */
1093                         if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
1094                             !qp->s_num_rd_atomic) {
1095                                 qp->s_flags &= ~(RVT_S_WAIT_FENCE |
1096                                                  RVT_S_WAIT_ACK);
1097                                 qib_schedule_send(qp);
1098                         } else if (qp->s_flags & RVT_S_WAIT_RDMAR) {
1099                                 qp->s_flags &= ~(RVT_S_WAIT_RDMAR |
1100                                                  RVT_S_WAIT_ACK);
1101                                 qib_schedule_send(qp);
1102                         }
1103                 }
1104                 wqe = do_rc_completion(qp, wqe, ibp);
1105                 if (qp->s_acked == qp->s_tail)
1106                         break;
1107         }
1108
1109         switch (aeth >> IB_AETH_NAK_SHIFT) {
1110         case 0:         /* ACK */
1111                 this_cpu_inc(*ibp->rvp.rc_acks);
1112                 if (qp->s_acked != qp->s_tail) {
1113                         /*
1114                          * We are expecting more ACKs so
1115                          * reset the retransmit timer.
1116                          */
1117                         rvt_mod_retry_timer(qp);
1118                         /*
1119                          * We can stop resending the earlier packets and
1120                          * continue with the next packet the receiver wants.
1121                          */
1122                         if (qib_cmp24(qp->s_psn, psn) <= 0)
1123                                 reset_psn(qp, psn + 1);
1124                 } else {
1125                         /* No more acks - kill all timers */
1126                         rvt_stop_rc_timers(qp);
1127                         if (qib_cmp24(qp->s_psn, psn) <= 0) {
1128                                 qp->s_state = OP(SEND_LAST);
1129                                 qp->s_psn = psn + 1;
1130                         }
1131                 }
1132                 if (qp->s_flags & RVT_S_WAIT_ACK) {
1133                         qp->s_flags &= ~RVT_S_WAIT_ACK;
1134                         qib_schedule_send(qp);
1135                 }
1136                 rvt_get_credit(qp, aeth);
1137                 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1138                 qp->s_retry = qp->s_retry_cnt;
1139                 update_last_psn(qp, psn);
1140                 return 1;
1141
1142         case 1:         /* RNR NAK */
1143                 ibp->rvp.n_rnr_naks++;
1144                 if (qp->s_acked == qp->s_tail)
1145                         goto bail;
1146                 if (qp->s_flags & RVT_S_WAIT_RNR)
1147                         goto bail;
1148                 if (qp->s_rnr_retry == 0) {
1149                         status = IB_WC_RNR_RETRY_EXC_ERR;
1150                         goto class_b;
1151                 }
1152                 if (qp->s_rnr_retry_cnt < 7)
1153                         qp->s_rnr_retry--;
1154
1155                 /* The last valid PSN is the previous PSN. */
1156                 update_last_psn(qp, psn - 1);
1157
1158                 ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
1159
1160                 reset_psn(qp, psn);
1161
1162                 qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
1163                 rvt_stop_rc_timers(qp);
1164                 rvt_add_rnr_timer(qp, aeth);
1165                 return 0;
1166
1167         case 3:         /* NAK */
1168                 if (qp->s_acked == qp->s_tail)
1169                         goto bail;
1170                 /* The last valid PSN is the previous PSN. */
1171                 update_last_psn(qp, psn - 1);
1172                 switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
1173                         IB_AETH_CREDIT_MASK) {
1174                 case 0: /* PSN sequence error */
1175                         ibp->rvp.n_seq_naks++;
1176                         /*
1177                          * Back up to the responder's expected PSN.
1178                          * Note that we might get a NAK in the middle of an
1179                          * RDMA READ response which terminates the RDMA
1180                          * READ.
1181                          */
1182                         qib_restart_rc(qp, psn, 0);
1183                         qib_schedule_send(qp);
1184                         break;
1185
1186                 case 1: /* Invalid Request */
1187                         status = IB_WC_REM_INV_REQ_ERR;
1188                         ibp->rvp.n_other_naks++;
1189                         goto class_b;
1190
1191                 case 2: /* Remote Access Error */
1192                         status = IB_WC_REM_ACCESS_ERR;
1193                         ibp->rvp.n_other_naks++;
1194                         goto class_b;
1195
1196                 case 3: /* Remote Operation Error */
1197                         status = IB_WC_REM_OP_ERR;
1198                         ibp->rvp.n_other_naks++;
1199 class_b:
1200                         if (qp->s_last == qp->s_acked) {
1201                                 rvt_send_complete(qp, wqe, status);
1202                                 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1203                         }
1204                         break;
1205
1206                 default:
1207                         /* Ignore other reserved NAK error codes */
1208                         goto reserved;
1209                 }
1210                 qp->s_retry = qp->s_retry_cnt;
1211                 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1212                 goto bail;
1213
1214         default:                /* 2: reserved */
1215 reserved:
1216                 /* Ignore reserved NAK codes. */
1217                 goto bail;
1218         }
1219
1220 bail:
1221         rvt_stop_rc_timers(qp);
1222         return ret;
1223 }
1224
1225 /*
1226  * We have seen an out of sequence RDMA read middle or last packet.
1227  * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
1228  */
1229 static void rdma_seq_err(struct rvt_qp *qp, struct qib_ibport *ibp, u32 psn,
1230                          struct qib_ctxtdata *rcd)
1231 {
1232         struct rvt_swqe *wqe;
1233
1234         /* Remove QP from retry timer */
1235         rvt_stop_rc_timers(qp);
1236
1237         wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1238
1239         while (qib_cmp24(psn, wqe->lpsn) > 0) {
1240                 if (wqe->wr.opcode == IB_WR_RDMA_READ ||
1241                     wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1242                     wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
1243                         break;
1244                 wqe = do_rc_completion(qp, wqe, ibp);
1245         }
1246
1247         ibp->rvp.n_rdma_seq++;
1248         qp->r_flags |= RVT_R_RDMAR_SEQ;
1249         qib_restart_rc(qp, qp->s_last_psn + 1, 0);
1250         if (list_empty(&qp->rspwait)) {
1251                 qp->r_flags |= RVT_R_RSP_SEND;
1252                 rvt_get_qp(qp);
1253                 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1254         }
1255 }
1256
1257 /**
1258  * qib_rc_rcv_resp - process an incoming RC response packet
1259  * @ibp: the port this packet came in on
1260  * @ohdr: the other headers for this packet
1261  * @data: the packet data
1262  * @tlen: the packet length
1263  * @qp: the QP for this packet
1264  * @opcode: the opcode for this packet
1265  * @psn: the packet sequence number for this packet
1266  * @hdrsize: the header length
1267  * @pmtu: the path MTU
1268  *
1269  * This is called from qib_rc_rcv() to process an incoming RC response
1270  * packet for the given QP.
1271  * Called at interrupt level.
1272  */
1273 static void qib_rc_rcv_resp(struct qib_ibport *ibp,
1274                             struct ib_other_headers *ohdr,
1275                             void *data, u32 tlen,
1276                             struct rvt_qp *qp,
1277                             u32 opcode,
1278                             u32 psn, u32 hdrsize, u32 pmtu,
1279                             struct qib_ctxtdata *rcd)
1280 {
1281         struct rvt_swqe *wqe;
1282         struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1283         enum ib_wc_status status;
1284         unsigned long flags;
1285         int diff;
1286         u32 pad;
1287         u32 aeth;
1288         u64 val;
1289
1290         if (opcode != OP(RDMA_READ_RESPONSE_MIDDLE)) {
1291                 /*
1292                  * If ACK'd PSN on SDMA busy list try to make progress to
1293                  * reclaim SDMA credits.
1294                  */
1295                 if ((qib_cmp24(psn, qp->s_sending_psn) >= 0) &&
1296                     (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) {
1297
1298                         /*
1299                          * If send tasklet not running attempt to progress
1300                          * SDMA queue.
1301                          */
1302                         if (!(qp->s_flags & RVT_S_BUSY)) {
1303                                 /* Acquire SDMA Lock */
1304                                 spin_lock_irqsave(&ppd->sdma_lock, flags);
1305                                 /* Invoke sdma make progress */
1306                                 qib_sdma_make_progress(ppd);
1307                                 /* Release SDMA Lock */
1308                                 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1309                         }
1310                 }
1311         }
1312
1313         spin_lock_irqsave(&qp->s_lock, flags);
1314         if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
1315                 goto ack_done;
1316
1317         /* Ignore invalid responses. */
1318         if (qib_cmp24(psn, READ_ONCE(qp->s_next_psn)) >= 0)
1319                 goto ack_done;
1320
1321         /* Ignore duplicate responses. */
1322         diff = qib_cmp24(psn, qp->s_last_psn);
1323         if (unlikely(diff <= 0)) {
1324                 /* Update credits for "ghost" ACKs */
1325                 if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
1326                         aeth = be32_to_cpu(ohdr->u.aeth);
1327                         if ((aeth >> IB_AETH_NAK_SHIFT) == 0)
1328                                 rvt_get_credit(qp, aeth);
1329                 }
1330                 goto ack_done;
1331         }
1332
1333         /*
1334          * Skip everything other than the PSN we expect, if we are waiting
1335          * for a reply to a restarted RDMA read or atomic op.
1336          */
1337         if (qp->r_flags & RVT_R_RDMAR_SEQ) {
1338                 if (qib_cmp24(psn, qp->s_last_psn + 1) != 0)
1339                         goto ack_done;
1340                 qp->r_flags &= ~RVT_R_RDMAR_SEQ;
1341         }
1342
1343         if (unlikely(qp->s_acked == qp->s_tail))
1344                 goto ack_done;
1345         wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1346         status = IB_WC_SUCCESS;
1347
1348         switch (opcode) {
1349         case OP(ACKNOWLEDGE):
1350         case OP(ATOMIC_ACKNOWLEDGE):
1351         case OP(RDMA_READ_RESPONSE_FIRST):
1352                 aeth = be32_to_cpu(ohdr->u.aeth);
1353                 if (opcode == OP(ATOMIC_ACKNOWLEDGE))
1354                         val = ib_u64_get(&ohdr->u.at.atomic_ack_eth);
1355                 else
1356                         val = 0;
1357                 if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
1358                     opcode != OP(RDMA_READ_RESPONSE_FIRST))
1359                         goto ack_done;
1360                 hdrsize += 4;
1361                 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1362                 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1363                         goto ack_op_err;
1364                 /*
1365                  * If this is a response to a resent RDMA read, we
1366                  * have to be careful to copy the data to the right
1367                  * location.
1368                  */
1369                 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1370                                                   wqe, psn, pmtu);
1371                 goto read_middle;
1372
1373         case OP(RDMA_READ_RESPONSE_MIDDLE):
1374                 /* no AETH, no ACK */
1375                 if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
1376                         goto ack_seq_err;
1377                 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1378                         goto ack_op_err;
1379 read_middle:
1380                 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1381                         goto ack_len_err;
1382                 if (unlikely(pmtu >= qp->s_rdma_read_len))
1383                         goto ack_len_err;
1384
1385                 /*
1386                  * We got a response so update the timeout.
1387                  * 4.096 usec. * (1 << qp->timeout)
1388                  */
1389                 rvt_mod_retry_timer(qp);
1390                 if (qp->s_flags & RVT_S_WAIT_ACK) {
1391                         qp->s_flags &= ~RVT_S_WAIT_ACK;
1392                         qib_schedule_send(qp);
1393                 }
1394
1395                 if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
1396                         qp->s_retry = qp->s_retry_cnt;
1397
1398                 /*
1399                  * Update the RDMA receive state but do the copy w/o
1400                  * holding the locks and blocking interrupts.
1401                  */
1402                 qp->s_rdma_read_len -= pmtu;
1403                 update_last_psn(qp, psn);
1404                 spin_unlock_irqrestore(&qp->s_lock, flags);
1405                 rvt_copy_sge(qp, &qp->s_rdma_read_sge,
1406                              data, pmtu, false, false);
1407                 goto bail;
1408
1409         case OP(RDMA_READ_RESPONSE_ONLY):
1410                 aeth = be32_to_cpu(ohdr->u.aeth);
1411                 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
1412                         goto ack_done;
1413                 /* Get the number of bytes the message was padded by. */
1414                 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1415                 /*
1416                  * Check that the data size is >= 0 && <= pmtu.
1417                  * Remember to account for the AETH header (4) and
1418                  * ICRC (4).
1419                  */
1420                 if (unlikely(tlen < (hdrsize + pad + 8)))
1421                         goto ack_len_err;
1422                 /*
1423                  * If this is a response to a resent RDMA read, we
1424                  * have to be careful to copy the data to the right
1425                  * location.
1426                  */
1427                 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1428                 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1429                                                   wqe, psn, pmtu);
1430                 goto read_last;
1431
1432         case OP(RDMA_READ_RESPONSE_LAST):
1433                 /* ACKs READ req. */
1434                 if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
1435                         goto ack_seq_err;
1436                 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1437                         goto ack_op_err;
1438                 /* Get the number of bytes the message was padded by. */
1439                 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1440                 /*
1441                  * Check that the data size is >= 1 && <= pmtu.
1442                  * Remember to account for the AETH header (4) and
1443                  * ICRC (4).
1444                  */
1445                 if (unlikely(tlen <= (hdrsize + pad + 8)))
1446                         goto ack_len_err;
1447 read_last:
1448                 tlen -= hdrsize + pad + 8;
1449                 if (unlikely(tlen != qp->s_rdma_read_len))
1450                         goto ack_len_err;
1451                 aeth = be32_to_cpu(ohdr->u.aeth);
1452                 rvt_copy_sge(qp, &qp->s_rdma_read_sge,
1453                              data, tlen, false, false);
1454                 WARN_ON(qp->s_rdma_read_sge.num_sge);
1455                 (void) do_rc_ack(qp, aeth, psn,
1456                                  OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
1457                 goto ack_done;
1458         }
1459
1460 ack_op_err:
1461         status = IB_WC_LOC_QP_OP_ERR;
1462         goto ack_err;
1463
1464 ack_seq_err:
1465         rdma_seq_err(qp, ibp, psn, rcd);
1466         goto ack_done;
1467
1468 ack_len_err:
1469         status = IB_WC_LOC_LEN_ERR;
1470 ack_err:
1471         if (qp->s_last == qp->s_acked) {
1472                 rvt_send_complete(qp, wqe, status);
1473                 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1474         }
1475 ack_done:
1476         spin_unlock_irqrestore(&qp->s_lock, flags);
1477 bail:
1478         return;
1479 }
1480
1481 /**
1482  * qib_rc_rcv_error - process an incoming duplicate or error RC packet
1483  * @ohdr: the other headers for this packet
1484  * @data: the packet data
1485  * @qp: the QP for this packet
1486  * @opcode: the opcode for this packet
1487  * @psn: the packet sequence number for this packet
1488  * @diff: the difference between the PSN and the expected PSN
1489  *
1490  * This is called from qib_rc_rcv() to process an unexpected
1491  * incoming RC packet for the given QP.
1492  * Called at interrupt level.
1493  * Return 1 if no more processing is needed; otherwise return 0 to
1494  * schedule a response to be sent.
1495  */
1496 static int qib_rc_rcv_error(struct ib_other_headers *ohdr,
1497                             void *data,
1498                             struct rvt_qp *qp,
1499                             u32 opcode,
1500                             u32 psn,
1501                             int diff,
1502                             struct qib_ctxtdata *rcd)
1503 {
1504         struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
1505         struct rvt_ack_entry *e;
1506         unsigned long flags;
1507         u8 i, prev;
1508         int old_req;
1509
1510         if (diff > 0) {
1511                 /*
1512                  * Packet sequence error.
1513                  * A NAK will ACK earlier sends and RDMA writes.
1514                  * Don't queue the NAK if we already sent one.
1515                  */
1516                 if (!qp->r_nak_state) {
1517                         ibp->rvp.n_rc_seqnak++;
1518                         qp->r_nak_state = IB_NAK_PSN_ERROR;
1519                         /* Use the expected PSN. */
1520                         qp->r_ack_psn = qp->r_psn;
1521                         /*
1522                          * Wait to send the sequence NAK until all packets
1523                          * in the receive queue have been processed.
1524                          * Otherwise, we end up propagating congestion.
1525                          */
1526                         if (list_empty(&qp->rspwait)) {
1527                                 qp->r_flags |= RVT_R_RSP_NAK;
1528                                 rvt_get_qp(qp);
1529                                 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1530                         }
1531                 }
1532                 goto done;
1533         }
1534
1535         /*
1536          * Handle a duplicate request.  Don't re-execute SEND, RDMA
1537          * write or atomic op.  Don't NAK errors, just silently drop
1538          * the duplicate request.  Note that r_sge, r_len, and
1539          * r_rcv_len may be in use so don't modify them.
1540          *
1541          * We are supposed to ACK the earliest duplicate PSN but we
1542          * can coalesce an outstanding duplicate ACK.  We have to
1543          * send the earliest so that RDMA reads can be restarted at
1544          * the requester's expected PSN.
1545          *
1546          * First, find where this duplicate PSN falls within the
1547          * ACKs previously sent.
1548          * old_req is true if there is an older response that is scheduled
1549          * to be sent before sending this one.
1550          */
1551         e = NULL;
1552         old_req = 1;
1553         ibp->rvp.n_rc_dupreq++;
1554
1555         spin_lock_irqsave(&qp->s_lock, flags);
1556
1557         for (i = qp->r_head_ack_queue; ; i = prev) {
1558                 if (i == qp->s_tail_ack_queue)
1559                         old_req = 0;
1560                 if (i)
1561                         prev = i - 1;
1562                 else
1563                         prev = QIB_MAX_RDMA_ATOMIC;
1564                 if (prev == qp->r_head_ack_queue) {
1565                         e = NULL;
1566                         break;
1567                 }
1568                 e = &qp->s_ack_queue[prev];
1569                 if (!e->opcode) {
1570                         e = NULL;
1571                         break;
1572                 }
1573                 if (qib_cmp24(psn, e->psn) >= 0) {
1574                         if (prev == qp->s_tail_ack_queue &&
1575                             qib_cmp24(psn, e->lpsn) <= 0)
1576                                 old_req = 0;
1577                         break;
1578                 }
1579         }
1580         switch (opcode) {
1581         case OP(RDMA_READ_REQUEST): {
1582                 struct ib_reth *reth;
1583                 u32 offset;
1584                 u32 len;
1585
1586                 /*
1587                  * If we didn't find the RDMA read request in the ack queue,
1588                  * we can ignore this request.
1589                  */
1590                 if (!e || e->opcode != OP(RDMA_READ_REQUEST))
1591                         goto unlock_done;
1592                 /* RETH comes after BTH */
1593                 reth = &ohdr->u.rc.reth;
1594                 /*
1595                  * Address range must be a subset of the original
1596                  * request and start on pmtu boundaries.
1597                  * We reuse the old ack_queue slot since the requester
1598                  * should not back up and request an earlier PSN for the
1599                  * same request.
1600                  */
1601                 offset = ((psn - e->psn) & QIB_PSN_MASK) *
1602                         qp->pmtu;
1603                 len = be32_to_cpu(reth->length);
1604                 if (unlikely(offset + len != e->rdma_sge.sge_length))
1605                         goto unlock_done;
1606                 if (e->rdma_sge.mr) {
1607                         rvt_put_mr(e->rdma_sge.mr);
1608                         e->rdma_sge.mr = NULL;
1609                 }
1610                 if (len != 0) {
1611                         u32 rkey = be32_to_cpu(reth->rkey);
1612                         u64 vaddr = be64_to_cpu(reth->vaddr);
1613                         int ok;
1614
1615                         ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
1616                                          IB_ACCESS_REMOTE_READ);
1617                         if (unlikely(!ok))
1618                                 goto unlock_done;
1619                 } else {
1620                         e->rdma_sge.vaddr = NULL;
1621                         e->rdma_sge.length = 0;
1622                         e->rdma_sge.sge_length = 0;
1623                 }
1624                 e->psn = psn;
1625                 if (old_req)
1626                         goto unlock_done;
1627                 qp->s_tail_ack_queue = prev;
1628                 break;
1629         }
1630
1631         case OP(COMPARE_SWAP):
1632         case OP(FETCH_ADD): {
1633                 /*
1634                  * If we didn't find the atomic request in the ack queue
1635                  * or the send tasklet is already backed up to send an
1636                  * earlier entry, we can ignore this request.
1637                  */
1638                 if (!e || e->opcode != (u8) opcode || old_req)
1639                         goto unlock_done;
1640                 qp->s_tail_ack_queue = prev;
1641                 break;
1642         }
1643
1644         default:
1645                 /*
1646                  * Ignore this operation if it doesn't request an ACK
1647                  * or an earlier RDMA read or atomic is going to be resent.
1648                  */
1649                 if (!(psn & IB_BTH_REQ_ACK) || old_req)
1650                         goto unlock_done;
1651                 /*
1652                  * Resend the most recent ACK if this request is
1653                  * after all the previous RDMA reads and atomics.
1654                  */
1655                 if (i == qp->r_head_ack_queue) {
1656                         spin_unlock_irqrestore(&qp->s_lock, flags);
1657                         qp->r_nak_state = 0;
1658                         qp->r_ack_psn = qp->r_psn - 1;
1659                         goto send_ack;
1660                 }
1661                 /*
1662                  * Try to send a simple ACK to work around a Mellanox bug
1663                  * which doesn't accept a RDMA read response or atomic
1664                  * response as an ACK for earlier SENDs or RDMA writes.
1665                  */
1666                 if (!(qp->s_flags & RVT_S_RESP_PENDING)) {
1667                         spin_unlock_irqrestore(&qp->s_lock, flags);
1668                         qp->r_nak_state = 0;
1669                         qp->r_ack_psn = qp->s_ack_queue[i].psn - 1;
1670                         goto send_ack;
1671                 }
1672                 /*
1673                  * Resend the RDMA read or atomic op which
1674                  * ACKs this duplicate request.
1675                  */
1676                 qp->s_tail_ack_queue = i;
1677                 break;
1678         }
1679         qp->s_ack_state = OP(ACKNOWLEDGE);
1680         qp->s_flags |= RVT_S_RESP_PENDING;
1681         qp->r_nak_state = 0;
1682         qib_schedule_send(qp);
1683
1684 unlock_done:
1685         spin_unlock_irqrestore(&qp->s_lock, flags);
1686 done:
1687         return 1;
1688
1689 send_ack:
1690         return 0;
1691 }
1692
1693 static inline void qib_update_ack_queue(struct rvt_qp *qp, unsigned n)
1694 {
1695         unsigned next;
1696
1697         next = n + 1;
1698         if (next > QIB_MAX_RDMA_ATOMIC)
1699                 next = 0;
1700         qp->s_tail_ack_queue = next;
1701         qp->s_ack_state = OP(ACKNOWLEDGE);
1702 }
1703
1704 /**
1705  * qib_rc_rcv - process an incoming RC packet
1706  * @rcd: the context pointer
1707  * @hdr: the header of this packet
1708  * @has_grh: true if the header has a GRH
1709  * @data: the packet data
1710  * @tlen: the packet length
1711  * @qp: the QP for this packet
1712  *
1713  * This is called from qib_qp_rcv() to process an incoming RC packet
1714  * for the given QP.
1715  * Called at interrupt level.
1716  */
1717 void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
1718                 int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
1719 {
1720         struct qib_ibport *ibp = &rcd->ppd->ibport_data;
1721         struct ib_other_headers *ohdr;
1722         u32 opcode;
1723         u32 hdrsize;
1724         u32 psn;
1725         u32 pad;
1726         struct ib_wc wc;
1727         u32 pmtu = qp->pmtu;
1728         int diff;
1729         struct ib_reth *reth;
1730         unsigned long flags;
1731         int ret;
1732
1733         /* Check for GRH */
1734         if (!has_grh) {
1735                 ohdr = &hdr->u.oth;
1736                 hdrsize = 8 + 12;       /* LRH + BTH */
1737         } else {
1738                 ohdr = &hdr->u.l.oth;
1739                 hdrsize = 8 + 40 + 12;  /* LRH + GRH + BTH */
1740         }
1741
1742         opcode = be32_to_cpu(ohdr->bth[0]);
1743         if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
1744                 return;
1745
1746         psn = be32_to_cpu(ohdr->bth[2]);
1747         opcode >>= 24;
1748
1749         /*
1750          * Process responses (ACKs) before anything else.  Note that the
1751          * packet sequence number will be for something in the send work
1752          * queue rather than the expected receive packet sequence number.
1753          * In other words, this QP is the requester.
1754          */
1755         if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1756             opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
1757                 qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn,
1758                                 hdrsize, pmtu, rcd);
1759                 return;
1760         }
1761
1762         /* Compute 24 bits worth of difference. */
1763         diff = qib_cmp24(psn, qp->r_psn);
1764         if (unlikely(diff)) {
1765                 if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
1766                         return;
1767                 goto send_ack;
1768         }
1769
1770         /* Check for opcode sequence errors. */
1771         switch (qp->r_state) {
1772         case OP(SEND_FIRST):
1773         case OP(SEND_MIDDLE):
1774                 if (opcode == OP(SEND_MIDDLE) ||
1775                     opcode == OP(SEND_LAST) ||
1776                     opcode == OP(SEND_LAST_WITH_IMMEDIATE))
1777                         break;
1778                 goto nack_inv;
1779
1780         case OP(RDMA_WRITE_FIRST):
1781         case OP(RDMA_WRITE_MIDDLE):
1782                 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
1783                     opcode == OP(RDMA_WRITE_LAST) ||
1784                     opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1785                         break;
1786                 goto nack_inv;
1787
1788         default:
1789                 if (opcode == OP(SEND_MIDDLE) ||
1790                     opcode == OP(SEND_LAST) ||
1791                     opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
1792                     opcode == OP(RDMA_WRITE_MIDDLE) ||
1793                     opcode == OP(RDMA_WRITE_LAST) ||
1794                     opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1795                         goto nack_inv;
1796                 /*
1797                  * Note that it is up to the requester to not send a new
1798                  * RDMA read or atomic operation before receiving an ACK
1799                  * for the previous operation.
1800                  */
1801                 break;
1802         }
1803
1804         if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
1805                 rvt_comm_est(qp);
1806
1807         /* OK, process the packet. */
1808         switch (opcode) {
1809         case OP(SEND_FIRST):
1810                 ret = rvt_get_rwqe(qp, false);
1811                 if (ret < 0)
1812                         goto nack_op_err;
1813                 if (!ret)
1814                         goto rnr_nak;
1815                 qp->r_rcv_len = 0;
1816                 /* FALLTHROUGH */
1817         case OP(SEND_MIDDLE):
1818         case OP(RDMA_WRITE_MIDDLE):
1819 send_middle:
1820                 /* Check for invalid length PMTU or posted rwqe len. */
1821                 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1822                         goto nack_inv;
1823                 qp->r_rcv_len += pmtu;
1824                 if (unlikely(qp->r_rcv_len > qp->r_len))
1825                         goto nack_inv;
1826                 rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false);
1827                 break;
1828
1829         case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
1830                 /* consume RWQE */
1831                 ret = rvt_get_rwqe(qp, true);
1832                 if (ret < 0)
1833                         goto nack_op_err;
1834                 if (!ret)
1835                         goto rnr_nak;
1836                 goto send_last_imm;
1837
1838         case OP(SEND_ONLY):
1839         case OP(SEND_ONLY_WITH_IMMEDIATE):
1840                 ret = rvt_get_rwqe(qp, false);
1841                 if (ret < 0)
1842                         goto nack_op_err;
1843                 if (!ret)
1844                         goto rnr_nak;
1845                 qp->r_rcv_len = 0;
1846                 if (opcode == OP(SEND_ONLY))
1847                         goto no_immediate_data;
1848                 /* fall through -- for SEND_ONLY_WITH_IMMEDIATE */
1849         case OP(SEND_LAST_WITH_IMMEDIATE):
1850 send_last_imm:
1851                 wc.ex.imm_data = ohdr->u.imm_data;
1852                 hdrsize += 4;
1853                 wc.wc_flags = IB_WC_WITH_IMM;
1854                 goto send_last;
1855         case OP(SEND_LAST):
1856         case OP(RDMA_WRITE_LAST):
1857 no_immediate_data:
1858                 wc.wc_flags = 0;
1859                 wc.ex.imm_data = 0;
1860 send_last:
1861                 /* Get the number of bytes the message was padded by. */
1862                 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1863                 /* Check for invalid length. */
1864                 /* XXX LAST len should be >= 1 */
1865                 if (unlikely(tlen < (hdrsize + pad + 4)))
1866                         goto nack_inv;
1867                 /* Don't count the CRC. */
1868                 tlen -= (hdrsize + pad + 4);
1869                 wc.byte_len = tlen + qp->r_rcv_len;
1870                 if (unlikely(wc.byte_len > qp->r_len))
1871                         goto nack_inv;
1872                 rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, false);
1873                 rvt_put_ss(&qp->r_sge);
1874                 qp->r_msn++;
1875                 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
1876                         break;
1877                 wc.wr_id = qp->r_wr_id;
1878                 wc.status = IB_WC_SUCCESS;
1879                 if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
1880                     opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
1881                         wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
1882                 else
1883                         wc.opcode = IB_WC_RECV;
1884                 wc.qp = &qp->ibqp;
1885                 wc.src_qp = qp->remote_qpn;
1886                 wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
1887                 wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
1888                 /* zero fields that are N/A */
1889                 wc.vendor_err = 0;
1890                 wc.pkey_index = 0;
1891                 wc.dlid_path_bits = 0;
1892                 wc.port_num = 0;
1893                 /* Signal completion event if the solicited bit is set. */
1894                 rvt_recv_cq(qp, &wc, ib_bth_is_solicited(ohdr));
1895                 break;
1896
1897         case OP(RDMA_WRITE_FIRST):
1898         case OP(RDMA_WRITE_ONLY):
1899         case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
1900                 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
1901                         goto nack_inv;
1902                 /* consume RWQE */
1903                 reth = &ohdr->u.rc.reth;
1904                 hdrsize += sizeof(*reth);
1905                 qp->r_len = be32_to_cpu(reth->length);
1906                 qp->r_rcv_len = 0;
1907                 qp->r_sge.sg_list = NULL;
1908                 if (qp->r_len != 0) {
1909                         u32 rkey = be32_to_cpu(reth->rkey);
1910                         u64 vaddr = be64_to_cpu(reth->vaddr);
1911                         int ok;
1912
1913                         /* Check rkey & NAK */
1914                         ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
1915                                          rkey, IB_ACCESS_REMOTE_WRITE);
1916                         if (unlikely(!ok))
1917                                 goto nack_acc;
1918                         qp->r_sge.num_sge = 1;
1919                 } else {
1920                         qp->r_sge.num_sge = 0;
1921                         qp->r_sge.sge.mr = NULL;
1922                         qp->r_sge.sge.vaddr = NULL;
1923                         qp->r_sge.sge.length = 0;
1924                         qp->r_sge.sge.sge_length = 0;
1925                 }
1926                 if (opcode == OP(RDMA_WRITE_FIRST))
1927                         goto send_middle;
1928                 else if (opcode == OP(RDMA_WRITE_ONLY))
1929                         goto no_immediate_data;
1930                 ret = rvt_get_rwqe(qp, true);
1931                 if (ret < 0)
1932                         goto nack_op_err;
1933                 if (!ret) {
1934                         rvt_put_ss(&qp->r_sge);
1935                         goto rnr_nak;
1936                 }
1937                 wc.ex.imm_data = ohdr->u.rc.imm_data;
1938                 hdrsize += 4;
1939                 wc.wc_flags = IB_WC_WITH_IMM;
1940                 goto send_last;
1941
1942         case OP(RDMA_READ_REQUEST): {
1943                 struct rvt_ack_entry *e;
1944                 u32 len;
1945                 u8 next;
1946
1947                 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
1948                         goto nack_inv;
1949                 next = qp->r_head_ack_queue + 1;
1950                 /* s_ack_queue is size QIB_MAX_RDMA_ATOMIC+1 so use > not >= */
1951                 if (next > QIB_MAX_RDMA_ATOMIC)
1952                         next = 0;
1953                 spin_lock_irqsave(&qp->s_lock, flags);
1954                 if (unlikely(next == qp->s_tail_ack_queue)) {
1955                         if (!qp->s_ack_queue[next].sent)
1956                                 goto nack_inv_unlck;
1957                         qib_update_ack_queue(qp, next);
1958                 }
1959                 e = &qp->s_ack_queue[qp->r_head_ack_queue];
1960                 if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
1961                         rvt_put_mr(e->rdma_sge.mr);
1962                         e->rdma_sge.mr = NULL;
1963                 }
1964                 reth = &ohdr->u.rc.reth;
1965                 len = be32_to_cpu(reth->length);
1966                 if (len) {
1967                         u32 rkey = be32_to_cpu(reth->rkey);
1968                         u64 vaddr = be64_to_cpu(reth->vaddr);
1969                         int ok;
1970
1971                         /* Check rkey & NAK */
1972                         ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr,
1973                                          rkey, IB_ACCESS_REMOTE_READ);
1974                         if (unlikely(!ok))
1975                                 goto nack_acc_unlck;
1976                         /*
1977                          * Update the next expected PSN.  We add 1 later
1978                          * below, so only add the remainder here.
1979                          */
1980                         qp->r_psn += rvt_div_mtu(qp, len - 1);
1981                 } else {
1982                         e->rdma_sge.mr = NULL;
1983                         e->rdma_sge.vaddr = NULL;
1984                         e->rdma_sge.length = 0;
1985                         e->rdma_sge.sge_length = 0;
1986                 }
1987                 e->opcode = opcode;
1988                 e->sent = 0;
1989                 e->psn = psn;
1990                 e->lpsn = qp->r_psn;
1991                 /*
1992                  * We need to increment the MSN here instead of when we
1993                  * finish sending the result since a duplicate request would
1994                  * increment it more than once.
1995                  */
1996                 qp->r_msn++;
1997                 qp->r_psn++;
1998                 qp->r_state = opcode;
1999                 qp->r_nak_state = 0;
2000                 qp->r_head_ack_queue = next;
2001
2002                 /* Schedule the send tasklet. */
2003                 qp->s_flags |= RVT_S_RESP_PENDING;
2004                 qib_schedule_send(qp);
2005
2006                 goto sunlock;
2007         }
2008
2009         case OP(COMPARE_SWAP):
2010         case OP(FETCH_ADD): {
2011                 struct ib_atomic_eth *ateth;
2012                 struct rvt_ack_entry *e;
2013                 u64 vaddr;
2014                 atomic64_t *maddr;
2015                 u64 sdata;
2016                 u32 rkey;
2017                 u8 next;
2018
2019                 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
2020                         goto nack_inv;
2021                 next = qp->r_head_ack_queue + 1;
2022                 if (next > QIB_MAX_RDMA_ATOMIC)
2023                         next = 0;
2024                 spin_lock_irqsave(&qp->s_lock, flags);
2025                 if (unlikely(next == qp->s_tail_ack_queue)) {
2026                         if (!qp->s_ack_queue[next].sent)
2027                                 goto nack_inv_unlck;
2028                         qib_update_ack_queue(qp, next);
2029                 }
2030                 e = &qp->s_ack_queue[qp->r_head_ack_queue];
2031                 if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
2032                         rvt_put_mr(e->rdma_sge.mr);
2033                         e->rdma_sge.mr = NULL;
2034                 }
2035                 ateth = &ohdr->u.atomic_eth;
2036                 vaddr = get_ib_ateth_vaddr(ateth);
2037                 if (unlikely(vaddr & (sizeof(u64) - 1)))
2038                         goto nack_inv_unlck;
2039                 rkey = be32_to_cpu(ateth->rkey);
2040                 /* Check rkey & NAK */
2041                 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
2042                                           vaddr, rkey,
2043                                           IB_ACCESS_REMOTE_ATOMIC)))
2044                         goto nack_acc_unlck;
2045                 /* Perform atomic OP and save result. */
2046                 maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
2047                 sdata = get_ib_ateth_swap(ateth);
2048                 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
2049                         (u64) atomic64_add_return(sdata, maddr) - sdata :
2050                         (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
2051                                       get_ib_ateth_compare(ateth),
2052                                       sdata);
2053                 rvt_put_mr(qp->r_sge.sge.mr);
2054                 qp->r_sge.num_sge = 0;
2055                 e->opcode = opcode;
2056                 e->sent = 0;
2057                 e->psn = psn;
2058                 e->lpsn = psn;
2059                 qp->r_msn++;
2060                 qp->r_psn++;
2061                 qp->r_state = opcode;
2062                 qp->r_nak_state = 0;
2063                 qp->r_head_ack_queue = next;
2064
2065                 /* Schedule the send tasklet. */
2066                 qp->s_flags |= RVT_S_RESP_PENDING;
2067                 qib_schedule_send(qp);
2068
2069                 goto sunlock;
2070         }
2071
2072         default:
2073                 /* NAK unknown opcodes. */
2074                 goto nack_inv;
2075         }
2076         qp->r_psn++;
2077         qp->r_state = opcode;
2078         qp->r_ack_psn = psn;
2079         qp->r_nak_state = 0;
2080         /* Send an ACK if requested or required. */
2081         if (psn & (1 << 31))
2082                 goto send_ack;
2083         return;
2084
2085 rnr_nak:
2086         qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
2087         qp->r_ack_psn = qp->r_psn;
2088         /* Queue RNR NAK for later */
2089         if (list_empty(&qp->rspwait)) {
2090                 qp->r_flags |= RVT_R_RSP_NAK;
2091                 rvt_get_qp(qp);
2092                 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2093         }
2094         return;
2095
2096 nack_op_err:
2097         rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2098         qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
2099         qp->r_ack_psn = qp->r_psn;
2100         /* Queue NAK for later */
2101         if (list_empty(&qp->rspwait)) {
2102                 qp->r_flags |= RVT_R_RSP_NAK;
2103                 rvt_get_qp(qp);
2104                 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2105         }
2106         return;
2107
2108 nack_inv_unlck:
2109         spin_unlock_irqrestore(&qp->s_lock, flags);
2110 nack_inv:
2111         rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2112         qp->r_nak_state = IB_NAK_INVALID_REQUEST;
2113         qp->r_ack_psn = qp->r_psn;
2114         /* Queue NAK for later */
2115         if (list_empty(&qp->rspwait)) {
2116                 qp->r_flags |= RVT_R_RSP_NAK;
2117                 rvt_get_qp(qp);
2118                 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2119         }
2120         return;
2121
2122 nack_acc_unlck:
2123         spin_unlock_irqrestore(&qp->s_lock, flags);
2124 nack_acc:
2125         rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
2126         qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
2127         qp->r_ack_psn = qp->r_psn;
2128 send_ack:
2129         qib_send_rc_ack(qp);
2130         return;
2131
2132 sunlock:
2133         spin_unlock_irqrestore(&qp->s_lock, flags);
2134 }