2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 /* cut down ridiculously long IB macro names */
39 #define OP(x) IB_OPCODE_RC_##x
42 static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
47 len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
48 return rvt_restart_sge(ss, wqe, len);
52 * qib_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
53 * @dev: the device for this QP
54 * @qp: a pointer to the QP
55 * @ohdr: a pointer to the IB header being constructed
58 * Return 1 if constructed; otherwise, return 0.
59 * Note that we are in the responder's side of the QP context.
60 * Note the QP s_lock must be held.
62 static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp,
63 struct ib_other_headers *ohdr, u32 pmtu)
65 struct rvt_ack_entry *e;
71 /* Don't send an ACK if we aren't supposed to. */
72 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
75 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
78 switch (qp->s_ack_state) {
79 case OP(RDMA_READ_RESPONSE_LAST):
80 case OP(RDMA_READ_RESPONSE_ONLY):
81 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
83 rvt_put_mr(e->rdma_sge.mr);
84 e->rdma_sge.mr = NULL;
87 case OP(ATOMIC_ACKNOWLEDGE):
89 * We can increment the tail pointer now that the last
90 * response has been sent instead of only being
93 if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC)
94 qp->s_tail_ack_queue = 0;
98 /* Check for no next entry in the queue. */
99 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
100 if (qp->s_flags & RVT_S_ACK_PENDING)
105 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
106 if (e->opcode == OP(RDMA_READ_REQUEST)) {
108 * If a RDMA read response is being resent and
109 * we haven't seen the duplicate request yet,
110 * then stop sending the remaining responses the
111 * responder has seen until the requester resends it.
113 len = e->rdma_sge.sge_length;
114 if (len && !e->rdma_sge.mr) {
115 qp->s_tail_ack_queue = qp->r_head_ack_queue;
118 /* Copy SGE state in case we need to resend */
119 qp->s_rdma_mr = e->rdma_sge.mr;
121 rvt_get_mr(qp->s_rdma_mr);
122 qp->s_ack_rdma_sge.sge = e->rdma_sge;
123 qp->s_ack_rdma_sge.num_sge = 1;
124 qp->s_cur_sge = &qp->s_ack_rdma_sge;
127 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
129 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
132 ohdr->u.aeth = rvt_compute_aeth(qp);
134 qp->s_ack_rdma_psn = e->psn;
135 bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
137 /* COMPARE_SWAP or FETCH_ADD */
138 qp->s_cur_sge = NULL;
140 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
141 ohdr->u.at.aeth = rvt_compute_aeth(qp);
142 ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth);
143 hwords += sizeof(ohdr->u.at) / sizeof(u32);
144 bth2 = e->psn & QIB_PSN_MASK;
147 bth0 = qp->s_ack_state << 24;
150 case OP(RDMA_READ_RESPONSE_FIRST):
151 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
153 case OP(RDMA_READ_RESPONSE_MIDDLE):
154 qp->s_cur_sge = &qp->s_ack_rdma_sge;
155 qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr;
157 rvt_get_mr(qp->s_rdma_mr);
158 len = qp->s_ack_rdma_sge.sge.sge_length;
162 ohdr->u.aeth = rvt_compute_aeth(qp);
164 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
165 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
168 bth0 = qp->s_ack_state << 24;
169 bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
175 * Send a regular ACK.
176 * Set the s_ack_state so we wait until after sending
177 * the ACK before setting s_ack_state to ACKNOWLEDGE
180 qp->s_ack_state = OP(SEND_ONLY);
181 qp->s_flags &= ~RVT_S_ACK_PENDING;
182 qp->s_cur_sge = NULL;
185 cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
187 IB_AETH_CREDIT_SHIFT));
189 ohdr->u.aeth = rvt_compute_aeth(qp);
192 bth0 = OP(ACKNOWLEDGE) << 24;
193 bth2 = qp->s_ack_psn & QIB_PSN_MASK;
195 qp->s_rdma_ack_cnt++;
196 qp->s_hdrwords = hwords;
197 qp->s_cur_size = len;
198 qib_make_ruc_header(qp, ohdr, bth0, bth2);
202 qp->s_ack_state = OP(ACKNOWLEDGE);
203 qp->s_flags &= ~(RVT_S_RESP_PENDING | RVT_S_ACK_PENDING);
208 * qib_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
209 * @qp: a pointer to the QP
211 * Assumes the s_lock is held.
213 * Return 1 if constructed; otherwise, return 0.
215 int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
217 struct qib_qp_priv *priv = qp->priv;
218 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
219 struct ib_other_headers *ohdr;
220 struct rvt_sge_state *ss;
221 struct rvt_swqe *wqe;
231 ohdr = &priv->s_hdr->u.oth;
232 if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)
233 ohdr = &priv->s_hdr->u.l.oth;
235 /* Sending responses has higher priority over sending requests. */
236 if ((qp->s_flags & RVT_S_RESP_PENDING) &&
237 qib_make_rc_ack(dev, qp, ohdr, pmtu))
240 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
241 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
243 /* We are in the error state, flush the work request. */
244 if (qp->s_last == READ_ONCE(qp->s_head))
246 /* If DMAs are in progress, we can't flush immediately. */
247 if (atomic_read(&priv->s_dma_busy)) {
248 qp->s_flags |= RVT_S_WAIT_DMA;
251 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
252 rvt_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
253 IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
254 /* will get called again */
258 if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK))
261 if (qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) {
262 if (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
263 qp->s_flags |= RVT_S_WAIT_PSN;
266 qp->s_sending_psn = qp->s_psn;
267 qp->s_sending_hpsn = qp->s_psn - 1;
270 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
274 /* Send a request. */
275 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
276 switch (qp->s_state) {
278 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK))
281 * Resend an old request or start a new one.
283 * We keep track of the current SWQE so that
284 * we don't reset the "furthest progress" state
285 * if we need to back up.
288 if (qp->s_cur == qp->s_tail) {
289 /* Check if send work queue is empty. */
290 if (qp->s_tail == READ_ONCE(qp->s_head))
293 * If a fence is requested, wait for previous
294 * RDMA read and atomic operations to finish.
296 if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
297 qp->s_num_rd_atomic) {
298 qp->s_flags |= RVT_S_WAIT_FENCE;
302 qp->s_psn = wqe->psn;
305 * Note that we have to be careful not to modify the
306 * original work request since we may need to resend
311 bth2 = qp->s_psn & QIB_PSN_MASK;
312 switch (wqe->wr.opcode) {
314 case IB_WR_SEND_WITH_IMM:
315 /* If no credit, return. */
316 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
317 rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
318 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
322 qp->s_state = OP(SEND_FIRST);
326 if (wqe->wr.opcode == IB_WR_SEND)
327 qp->s_state = OP(SEND_ONLY);
329 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
330 /* Immediate data comes after the BTH */
331 ohdr->u.imm_data = wqe->wr.ex.imm_data;
334 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
335 bth0 |= IB_BTH_SOLICITED;
336 bth2 |= IB_BTH_REQ_ACK;
337 if (++qp->s_cur == qp->s_size)
341 case IB_WR_RDMA_WRITE:
342 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
344 goto no_flow_control;
345 case IB_WR_RDMA_WRITE_WITH_IMM:
346 /* If no credit, return. */
347 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
348 rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
349 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
353 ohdr->u.rc.reth.vaddr =
354 cpu_to_be64(wqe->rdma_wr.remote_addr);
355 ohdr->u.rc.reth.rkey =
356 cpu_to_be32(wqe->rdma_wr.rkey);
357 ohdr->u.rc.reth.length = cpu_to_be32(len);
358 hwords += sizeof(struct ib_reth) / sizeof(u32);
360 qp->s_state = OP(RDMA_WRITE_FIRST);
364 if (wqe->rdma_wr.wr.opcode == IB_WR_RDMA_WRITE)
365 qp->s_state = OP(RDMA_WRITE_ONLY);
367 qp->s_state = OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
368 /* Immediate data comes after RETH */
369 ohdr->u.rc.imm_data =
370 wqe->rdma_wr.wr.ex.imm_data;
372 if (wqe->rdma_wr.wr.send_flags & IB_SEND_SOLICITED)
373 bth0 |= IB_BTH_SOLICITED;
375 bth2 |= IB_BTH_REQ_ACK;
376 if (++qp->s_cur == qp->s_size)
380 case IB_WR_RDMA_READ:
382 * Don't allow more operations to be started
383 * than the QP limits allow.
386 if (qp->s_num_rd_atomic >=
387 qp->s_max_rd_atomic) {
388 qp->s_flags |= RVT_S_WAIT_RDMAR;
391 qp->s_num_rd_atomic++;
392 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
396 ohdr->u.rc.reth.vaddr =
397 cpu_to_be64(wqe->rdma_wr.remote_addr);
398 ohdr->u.rc.reth.rkey =
399 cpu_to_be32(wqe->rdma_wr.rkey);
400 ohdr->u.rc.reth.length = cpu_to_be32(len);
401 qp->s_state = OP(RDMA_READ_REQUEST);
402 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
405 bth2 |= IB_BTH_REQ_ACK;
406 if (++qp->s_cur == qp->s_size)
410 case IB_WR_ATOMIC_CMP_AND_SWP:
411 case IB_WR_ATOMIC_FETCH_AND_ADD:
413 * Don't allow more operations to be started
414 * than the QP limits allow.
417 if (qp->s_num_rd_atomic >=
418 qp->s_max_rd_atomic) {
419 qp->s_flags |= RVT_S_WAIT_RDMAR;
422 qp->s_num_rd_atomic++;
423 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
426 if (wqe->atomic_wr.wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
427 qp->s_state = OP(COMPARE_SWAP);
428 put_ib_ateth_swap(wqe->atomic_wr.swap,
429 &ohdr->u.atomic_eth);
430 put_ib_ateth_compare(wqe->atomic_wr.compare_add,
431 &ohdr->u.atomic_eth);
433 qp->s_state = OP(FETCH_ADD);
434 put_ib_ateth_swap(wqe->atomic_wr.compare_add,
435 &ohdr->u.atomic_eth);
436 put_ib_ateth_compare(0, &ohdr->u.atomic_eth);
438 put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr,
439 &ohdr->u.atomic_eth);
440 ohdr->u.atomic_eth.rkey = cpu_to_be32(
441 wqe->atomic_wr.rkey);
442 hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
445 bth2 |= IB_BTH_REQ_ACK;
446 if (++qp->s_cur == qp->s_size)
453 qp->s_sge.sge = wqe->sg_list[0];
454 qp->s_sge.sg_list = wqe->sg_list + 1;
455 qp->s_sge.num_sge = wqe->wr.num_sge;
456 qp->s_sge.total_len = wqe->length;
457 qp->s_len = wqe->length;
460 if (qp->s_tail >= qp->s_size)
463 if (wqe->wr.opcode == IB_WR_RDMA_READ)
464 qp->s_psn = wqe->lpsn + 1;
469 case OP(RDMA_READ_RESPONSE_FIRST):
471 * qp->s_state is normally set to the opcode of the
472 * last packet constructed for new requests and therefore
473 * is never set to RDMA read response.
474 * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
475 * thread to indicate a SEND needs to be restarted from an
476 * earlier PSN without interferring with the sending thread.
477 * See qib_restart_rc().
479 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
482 qp->s_state = OP(SEND_MIDDLE);
484 case OP(SEND_MIDDLE):
485 bth2 = qp->s_psn++ & QIB_PSN_MASK;
492 if (wqe->wr.opcode == IB_WR_SEND)
493 qp->s_state = OP(SEND_LAST);
495 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
496 /* Immediate data comes after the BTH */
497 ohdr->u.imm_data = wqe->wr.ex.imm_data;
500 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
501 bth0 |= IB_BTH_SOLICITED;
502 bth2 |= IB_BTH_REQ_ACK;
504 if (qp->s_cur >= qp->s_size)
508 case OP(RDMA_READ_RESPONSE_LAST):
510 * qp->s_state is normally set to the opcode of the
511 * last packet constructed for new requests and therefore
512 * is never set to RDMA read response.
513 * RDMA_READ_RESPONSE_LAST is used by the ACK processing
514 * thread to indicate a RDMA write needs to be restarted from
515 * an earlier PSN without interferring with the sending thread.
516 * See qib_restart_rc().
518 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
520 case OP(RDMA_WRITE_FIRST):
521 qp->s_state = OP(RDMA_WRITE_MIDDLE);
523 case OP(RDMA_WRITE_MIDDLE):
524 bth2 = qp->s_psn++ & QIB_PSN_MASK;
531 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
532 qp->s_state = OP(RDMA_WRITE_LAST);
534 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
535 /* Immediate data comes after the BTH */
536 ohdr->u.imm_data = wqe->wr.ex.imm_data;
538 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
539 bth0 |= IB_BTH_SOLICITED;
541 bth2 |= IB_BTH_REQ_ACK;
543 if (qp->s_cur >= qp->s_size)
547 case OP(RDMA_READ_RESPONSE_MIDDLE):
549 * qp->s_state is normally set to the opcode of the
550 * last packet constructed for new requests and therefore
551 * is never set to RDMA read response.
552 * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
553 * thread to indicate a RDMA read needs to be restarted from
554 * an earlier PSN without interferring with the sending thread.
555 * See qib_restart_rc().
557 len = ((qp->s_psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
558 ohdr->u.rc.reth.vaddr =
559 cpu_to_be64(wqe->rdma_wr.remote_addr + len);
560 ohdr->u.rc.reth.rkey =
561 cpu_to_be32(wqe->rdma_wr.rkey);
562 ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
563 qp->s_state = OP(RDMA_READ_REQUEST);
564 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
565 bth2 = (qp->s_psn & QIB_PSN_MASK) | IB_BTH_REQ_ACK;
566 qp->s_psn = wqe->lpsn + 1;
570 if (qp->s_cur == qp->s_size)
574 qp->s_sending_hpsn = bth2;
575 delta = (((int) bth2 - (int) wqe->psn) << 8) >> 8;
576 if (delta && delta % QIB_PSN_CREDIT == 0)
577 bth2 |= IB_BTH_REQ_ACK;
578 if (qp->s_flags & RVT_S_SEND_ONE) {
579 qp->s_flags &= ~RVT_S_SEND_ONE;
580 qp->s_flags |= RVT_S_WAIT_ACK;
581 bth2 |= IB_BTH_REQ_ACK;
584 qp->s_hdrwords = hwords;
586 qp->s_cur_size = len;
587 qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), bth2);
591 qp->s_flags &= ~RVT_S_BUSY;
596 * qib_send_rc_ack - Construct an ACK packet and send it
597 * @qp: a pointer to the QP
599 * This is called from qib_rc_rcv() and qib_kreceive().
600 * Note that RDMA reads and atomics are handled in the
601 * send side QP state and tasklet.
603 void qib_send_rc_ack(struct rvt_qp *qp)
605 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
606 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
607 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
614 struct ib_header hdr;
615 struct ib_other_headers *ohdr;
619 spin_lock_irqsave(&qp->s_lock, flags);
621 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
624 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
625 if ((qp->s_flags & RVT_S_RESP_PENDING) || qp->s_rdma_ack_cnt)
628 /* Construct the header with s_lock held so APM doesn't change it. */
631 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
633 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
635 hwords += qib_make_grh(ibp, &hdr.u.l.grh,
636 rdma_ah_read_grh(&qp->remote_ah_attr),
641 /* read pkey_index w/o lock (its atomic) */
642 bth0 = qib_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24);
643 if (qp->s_mig_state == IB_MIG_MIGRATED)
644 bth0 |= IB_BTH_MIG_REQ;
646 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
648 IB_AETH_CREDIT_SHIFT));
650 ohdr->u.aeth = rvt_compute_aeth(qp);
651 lrh0 |= ibp->sl_to_vl[rdma_ah_get_sl(&qp->remote_ah_attr)] << 12 |
652 rdma_ah_get_sl(&qp->remote_ah_attr) << 4;
653 hdr.lrh[0] = cpu_to_be16(lrh0);
654 hdr.lrh[1] = cpu_to_be16(rdma_ah_get_dlid(&qp->remote_ah_attr));
655 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
656 hdr.lrh[3] = cpu_to_be16(ppd->lid |
657 rdma_ah_get_path_bits(&qp->remote_ah_attr));
658 ohdr->bth[0] = cpu_to_be32(bth0);
659 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
660 ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & QIB_PSN_MASK);
662 spin_unlock_irqrestore(&qp->s_lock, flags);
664 /* Don't try to send ACKs if the link isn't ACTIVE */
665 if (!(ppd->lflags & QIBL_LINKACTIVE))
668 control = dd->f_setpbc_control(ppd, hwords + SIZE_OF_CRC,
669 qp->s_srate, lrh0 >> 12);
670 /* length is + 1 for the control dword */
671 pbc = ((u64) control << 32) | (hwords + 1);
673 piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
676 * We are out of PIO buffers at the moment.
677 * Pass responsibility for sending the ACK to the
678 * send tasklet so that when a PIO buffer becomes
679 * available, the ACK is sent ahead of other outgoing
682 spin_lock_irqsave(&qp->s_lock, flags);
688 * We have to flush after the PBC for correctness
689 * on some cpus or WC buffer can be written out of order.
693 if (dd->flags & QIB_PIO_FLUSH_WC) {
694 u32 *hdrp = (u32 *) &hdr;
697 qib_pio_copy(piobuf + 2, hdrp, hwords - 1);
699 __raw_writel(hdrp[hwords - 1], piobuf + hwords + 1);
701 qib_pio_copy(piobuf + 2, (u32 *) &hdr, hwords);
703 if (dd->flags & QIB_USE_SPCL_TRIG) {
704 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
707 __raw_writel(0xaebecede, piobuf + spcl_off);
711 qib_sendbuf_done(dd, pbufn);
713 this_cpu_inc(ibp->pmastats->n_unicast_xmit);
717 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
718 this_cpu_inc(*ibp->rvp.rc_qacks);
719 qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
720 qp->s_nak_state = qp->r_nak_state;
721 qp->s_ack_psn = qp->r_ack_psn;
723 /* Schedule the send tasklet. */
724 qib_schedule_send(qp);
727 spin_unlock_irqrestore(&qp->s_lock, flags);
733 * reset_psn - reset the QP state to send starting from PSN
735 * @psn: the packet sequence number to restart at
737 * This is called from qib_rc_rcv() to process an incoming RC ACK
739 * Called at interrupt level with the QP s_lock held.
741 static void reset_psn(struct rvt_qp *qp, u32 psn)
744 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n);
750 * If we are starting the request from the beginning,
751 * let the normal send code handle initialization.
753 if (qib_cmp24(psn, wqe->psn) <= 0) {
754 qp->s_state = OP(SEND_LAST);
758 /* Find the work request opcode corresponding to the given PSN. */
759 opcode = wqe->wr.opcode;
763 if (++n == qp->s_size)
767 wqe = rvt_get_swqe_ptr(qp, n);
768 diff = qib_cmp24(psn, wqe->psn);
773 * If we are starting the request from the beginning,
774 * let the normal send code handle initialization.
777 qp->s_state = OP(SEND_LAST);
780 opcode = wqe->wr.opcode;
784 * Set the state to restart in the middle of a request.
785 * Don't change the s_sge, s_cur_sge, or s_cur_size.
786 * See qib_make_rc_req().
790 case IB_WR_SEND_WITH_IMM:
791 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
794 case IB_WR_RDMA_WRITE:
795 case IB_WR_RDMA_WRITE_WITH_IMM:
796 qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
799 case IB_WR_RDMA_READ:
800 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
805 * This case shouldn't happen since its only
808 qp->s_state = OP(SEND_LAST);
813 * Set RVT_S_WAIT_PSN as qib_rc_complete() may start the timer
814 * asynchronously before the send tasklet can get scheduled.
815 * Doing it in qib_make_rc_req() is too late.
817 if ((qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
818 (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
819 qp->s_flags |= RVT_S_WAIT_PSN;
823 * Back up requester to resend the last un-ACKed request.
824 * The QP r_lock and s_lock should be held and interrupts disabled.
826 void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
828 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
829 struct qib_ibport *ibp;
831 if (qp->s_retry == 0) {
832 if (qp->s_mig_state == IB_MIG_ARMED) {
834 qp->s_retry = qp->s_retry_cnt;
835 } else if (qp->s_last == qp->s_acked) {
836 rvt_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
837 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
839 } else /* XXX need to handle delayed completion */
844 ibp = to_iport(qp->ibqp.device, qp->port_num);
845 if (wqe->wr.opcode == IB_WR_RDMA_READ)
846 ibp->rvp.n_rc_resends++;
848 ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
850 qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR |
851 RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN |
854 qp->s_flags |= RVT_S_SEND_ONE;
859 * Set qp->s_sending_psn to the next PSN after the given one.
860 * This would be psn+1 except when RDMA reads are present.
862 static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
864 struct rvt_swqe *wqe;
867 /* Find the work request corresponding to the given PSN. */
869 wqe = rvt_get_swqe_ptr(qp, n);
870 if (qib_cmp24(psn, wqe->lpsn) <= 0) {
871 if (wqe->wr.opcode == IB_WR_RDMA_READ)
872 qp->s_sending_psn = wqe->lpsn + 1;
874 qp->s_sending_psn = psn + 1;
877 if (++n == qp->s_size)
885 * This should be called with the QP s_lock held and interrupts disabled.
887 void qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr)
889 struct ib_other_headers *ohdr;
890 struct rvt_swqe *wqe;
894 if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK))
897 /* Find out where the BTH is */
898 if ((be16_to_cpu(hdr->lrh[0]) & 3) == QIB_LRH_BTH)
901 ohdr = &hdr->u.l.oth;
903 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
904 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
905 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
906 WARN_ON(!qp->s_rdma_ack_cnt);
907 qp->s_rdma_ack_cnt--;
911 psn = be32_to_cpu(ohdr->bth[2]);
912 reset_sending_psn(qp, psn);
915 * Start timer after a packet requesting an ACK has been sent and
916 * there are still requests that haven't been acked.
918 if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
919 !(qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
920 (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
921 rvt_add_retry_timer(qp);
923 while (qp->s_last != qp->s_acked) {
924 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
925 if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 &&
926 qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
928 rvt_qp_complete_swqe(qp,
930 ib_qib_wc_opcode[wqe->wr.opcode],
934 * If we were waiting for sends to complete before resending,
935 * and they are now complete, restart sending.
937 if (qp->s_flags & RVT_S_WAIT_PSN &&
938 qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
939 qp->s_flags &= ~RVT_S_WAIT_PSN;
940 qp->s_sending_psn = qp->s_psn;
941 qp->s_sending_hpsn = qp->s_psn - 1;
942 qib_schedule_send(qp);
946 static inline void update_last_psn(struct rvt_qp *qp, u32 psn)
948 qp->s_last_psn = psn;
952 * Generate a SWQE completion.
953 * This is similar to qib_send_complete but has to check to be sure
954 * that the SGEs are not being referenced if the SWQE is being resent.
956 static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
957 struct rvt_swqe *wqe,
958 struct qib_ibport *ibp)
961 * Don't decrement refcount and don't generate a
962 * completion if the SWQE is being resent until the send
965 if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 ||
966 qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0)
967 rvt_qp_complete_swqe(qp,
969 ib_qib_wc_opcode[wqe->wr.opcode],
972 this_cpu_inc(*ibp->rvp.rc_delayed_comp);
974 qp->s_retry = qp->s_retry_cnt;
975 update_last_psn(qp, wqe->lpsn);
978 * If we are completing a request which is in the process of
979 * being resent, we can stop resending it since we know the
980 * responder has already seen it.
982 if (qp->s_acked == qp->s_cur) {
983 if (++qp->s_cur >= qp->s_size)
985 qp->s_acked = qp->s_cur;
986 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
987 if (qp->s_acked != qp->s_tail) {
988 qp->s_state = OP(SEND_LAST);
989 qp->s_psn = wqe->psn;
992 if (++qp->s_acked >= qp->s_size)
994 if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
996 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1002 * do_rc_ack - process an incoming RC ACK
1003 * @qp: the QP the ACK came in on
1004 * @psn: the packet sequence number of the ACK
1005 * @opcode: the opcode of the request that resulted in the ACK
1007 * This is called from qib_rc_rcv_resp() to process an incoming RC ACK
1009 * Called at interrupt level with the QP s_lock held.
1010 * Returns 1 if OK, 0 if current operation should be aborted (NAK).
1012 static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
1013 u64 val, struct qib_ctxtdata *rcd)
1015 struct qib_ibport *ibp;
1016 enum ib_wc_status status;
1017 struct rvt_swqe *wqe;
1023 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
1024 * requests and implicitly NAK RDMA read and atomic requests issued
1025 * before the NAK'ed request. The MSN won't include the NAK'ed
1026 * request but will include an ACK'ed request(s).
1029 if (aeth >> IB_AETH_NAK_SHIFT)
1031 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1032 ibp = to_iport(qp->ibqp.device, qp->port_num);
1035 * The MSN might be for a later WQE than the PSN indicates so
1036 * only complete WQEs that the PSN finishes.
1038 while ((diff = qib_cmp24(ack_psn, wqe->lpsn)) >= 0) {
1040 * RDMA_READ_RESPONSE_ONLY is a special case since
1041 * we want to generate completion events for everything
1042 * before the RDMA read, copy the data, then generate
1043 * the completion for the read.
1045 if (wqe->wr.opcode == IB_WR_RDMA_READ &&
1046 opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
1052 * If this request is a RDMA read or atomic, and the ACK is
1053 * for a later operation, this ACK NAKs the RDMA read or
1054 * atomic. In other words, only a RDMA_READ_LAST or ONLY
1055 * can ACK a RDMA read and likewise for atomic ops. Note
1056 * that the NAK case can only happen if relaxed ordering is
1057 * used and requests are sent after an RDMA read or atomic
1058 * is sent but before the response is received.
1060 if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
1061 (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
1062 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1063 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
1064 (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
1065 /* Retry this request. */
1066 if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
1067 qp->r_flags |= RVT_R_RDMAR_SEQ;
1068 qib_restart_rc(qp, qp->s_last_psn + 1, 0);
1069 if (list_empty(&qp->rspwait)) {
1070 qp->r_flags |= RVT_R_RSP_SEND;
1072 list_add_tail(&qp->rspwait,
1073 &rcd->qp_wait_list);
1077 * No need to process the ACK/NAK since we are
1078 * restarting an earlier request.
1082 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1083 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
1084 u64 *vaddr = wqe->sg_list[0].vaddr;
1087 if (qp->s_num_rd_atomic &&
1088 (wqe->wr.opcode == IB_WR_RDMA_READ ||
1089 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1090 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
1091 qp->s_num_rd_atomic--;
1092 /* Restart sending task if fence is complete */
1093 if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
1094 !qp->s_num_rd_atomic) {
1095 qp->s_flags &= ~(RVT_S_WAIT_FENCE |
1097 qib_schedule_send(qp);
1098 } else if (qp->s_flags & RVT_S_WAIT_RDMAR) {
1099 qp->s_flags &= ~(RVT_S_WAIT_RDMAR |
1101 qib_schedule_send(qp);
1104 wqe = do_rc_completion(qp, wqe, ibp);
1105 if (qp->s_acked == qp->s_tail)
1109 switch (aeth >> IB_AETH_NAK_SHIFT) {
1111 this_cpu_inc(*ibp->rvp.rc_acks);
1112 if (qp->s_acked != qp->s_tail) {
1114 * We are expecting more ACKs so
1115 * reset the retransmit timer.
1117 rvt_mod_retry_timer(qp);
1119 * We can stop resending the earlier packets and
1120 * continue with the next packet the receiver wants.
1122 if (qib_cmp24(qp->s_psn, psn) <= 0)
1123 reset_psn(qp, psn + 1);
1125 /* No more acks - kill all timers */
1126 rvt_stop_rc_timers(qp);
1127 if (qib_cmp24(qp->s_psn, psn) <= 0) {
1128 qp->s_state = OP(SEND_LAST);
1129 qp->s_psn = psn + 1;
1132 if (qp->s_flags & RVT_S_WAIT_ACK) {
1133 qp->s_flags &= ~RVT_S_WAIT_ACK;
1134 qib_schedule_send(qp);
1136 rvt_get_credit(qp, aeth);
1137 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1138 qp->s_retry = qp->s_retry_cnt;
1139 update_last_psn(qp, psn);
1142 case 1: /* RNR NAK */
1143 ibp->rvp.n_rnr_naks++;
1144 if (qp->s_acked == qp->s_tail)
1146 if (qp->s_flags & RVT_S_WAIT_RNR)
1148 if (qp->s_rnr_retry == 0) {
1149 status = IB_WC_RNR_RETRY_EXC_ERR;
1152 if (qp->s_rnr_retry_cnt < 7)
1155 /* The last valid PSN is the previous PSN. */
1156 update_last_psn(qp, psn - 1);
1158 ibp->rvp.n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
1162 qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
1163 rvt_stop_rc_timers(qp);
1164 rvt_add_rnr_timer(qp, aeth);
1168 if (qp->s_acked == qp->s_tail)
1170 /* The last valid PSN is the previous PSN. */
1171 update_last_psn(qp, psn - 1);
1172 switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
1173 IB_AETH_CREDIT_MASK) {
1174 case 0: /* PSN sequence error */
1175 ibp->rvp.n_seq_naks++;
1177 * Back up to the responder's expected PSN.
1178 * Note that we might get a NAK in the middle of an
1179 * RDMA READ response which terminates the RDMA
1182 qib_restart_rc(qp, psn, 0);
1183 qib_schedule_send(qp);
1186 case 1: /* Invalid Request */
1187 status = IB_WC_REM_INV_REQ_ERR;
1188 ibp->rvp.n_other_naks++;
1191 case 2: /* Remote Access Error */
1192 status = IB_WC_REM_ACCESS_ERR;
1193 ibp->rvp.n_other_naks++;
1196 case 3: /* Remote Operation Error */
1197 status = IB_WC_REM_OP_ERR;
1198 ibp->rvp.n_other_naks++;
1200 if (qp->s_last == qp->s_acked) {
1201 rvt_send_complete(qp, wqe, status);
1202 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1207 /* Ignore other reserved NAK error codes */
1210 qp->s_retry = qp->s_retry_cnt;
1211 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1214 default: /* 2: reserved */
1216 /* Ignore reserved NAK codes. */
1221 rvt_stop_rc_timers(qp);
1226 * We have seen an out of sequence RDMA read middle or last packet.
1227 * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
1229 static void rdma_seq_err(struct rvt_qp *qp, struct qib_ibport *ibp, u32 psn,
1230 struct qib_ctxtdata *rcd)
1232 struct rvt_swqe *wqe;
1234 /* Remove QP from retry timer */
1235 rvt_stop_rc_timers(qp);
1237 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1239 while (qib_cmp24(psn, wqe->lpsn) > 0) {
1240 if (wqe->wr.opcode == IB_WR_RDMA_READ ||
1241 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1242 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
1244 wqe = do_rc_completion(qp, wqe, ibp);
1247 ibp->rvp.n_rdma_seq++;
1248 qp->r_flags |= RVT_R_RDMAR_SEQ;
1249 qib_restart_rc(qp, qp->s_last_psn + 1, 0);
1250 if (list_empty(&qp->rspwait)) {
1251 qp->r_flags |= RVT_R_RSP_SEND;
1253 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1258 * qib_rc_rcv_resp - process an incoming RC response packet
1259 * @ibp: the port this packet came in on
1260 * @ohdr: the other headers for this packet
1261 * @data: the packet data
1262 * @tlen: the packet length
1263 * @qp: the QP for this packet
1264 * @opcode: the opcode for this packet
1265 * @psn: the packet sequence number for this packet
1266 * @hdrsize: the header length
1267 * @pmtu: the path MTU
1269 * This is called from qib_rc_rcv() to process an incoming RC response
1270 * packet for the given QP.
1271 * Called at interrupt level.
1273 static void qib_rc_rcv_resp(struct qib_ibport *ibp,
1274 struct ib_other_headers *ohdr,
1275 void *data, u32 tlen,
1278 u32 psn, u32 hdrsize, u32 pmtu,
1279 struct qib_ctxtdata *rcd)
1281 struct rvt_swqe *wqe;
1282 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1283 enum ib_wc_status status;
1284 unsigned long flags;
1290 if (opcode != OP(RDMA_READ_RESPONSE_MIDDLE)) {
1292 * If ACK'd PSN on SDMA busy list try to make progress to
1293 * reclaim SDMA credits.
1295 if ((qib_cmp24(psn, qp->s_sending_psn) >= 0) &&
1296 (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) {
1299 * If send tasklet not running attempt to progress
1302 if (!(qp->s_flags & RVT_S_BUSY)) {
1303 /* Acquire SDMA Lock */
1304 spin_lock_irqsave(&ppd->sdma_lock, flags);
1305 /* Invoke sdma make progress */
1306 qib_sdma_make_progress(ppd);
1307 /* Release SDMA Lock */
1308 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1313 spin_lock_irqsave(&qp->s_lock, flags);
1314 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
1317 /* Ignore invalid responses. */
1318 if (qib_cmp24(psn, READ_ONCE(qp->s_next_psn)) >= 0)
1321 /* Ignore duplicate responses. */
1322 diff = qib_cmp24(psn, qp->s_last_psn);
1323 if (unlikely(diff <= 0)) {
1324 /* Update credits for "ghost" ACKs */
1325 if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
1326 aeth = be32_to_cpu(ohdr->u.aeth);
1327 if ((aeth >> IB_AETH_NAK_SHIFT) == 0)
1328 rvt_get_credit(qp, aeth);
1334 * Skip everything other than the PSN we expect, if we are waiting
1335 * for a reply to a restarted RDMA read or atomic op.
1337 if (qp->r_flags & RVT_R_RDMAR_SEQ) {
1338 if (qib_cmp24(psn, qp->s_last_psn + 1) != 0)
1340 qp->r_flags &= ~RVT_R_RDMAR_SEQ;
1343 if (unlikely(qp->s_acked == qp->s_tail))
1345 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1346 status = IB_WC_SUCCESS;
1349 case OP(ACKNOWLEDGE):
1350 case OP(ATOMIC_ACKNOWLEDGE):
1351 case OP(RDMA_READ_RESPONSE_FIRST):
1352 aeth = be32_to_cpu(ohdr->u.aeth);
1353 if (opcode == OP(ATOMIC_ACKNOWLEDGE))
1354 val = ib_u64_get(&ohdr->u.at.atomic_ack_eth);
1357 if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
1358 opcode != OP(RDMA_READ_RESPONSE_FIRST))
1361 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1362 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1365 * If this is a response to a resent RDMA read, we
1366 * have to be careful to copy the data to the right
1369 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1373 case OP(RDMA_READ_RESPONSE_MIDDLE):
1374 /* no AETH, no ACK */
1375 if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
1377 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1380 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1382 if (unlikely(pmtu >= qp->s_rdma_read_len))
1386 * We got a response so update the timeout.
1387 * 4.096 usec. * (1 << qp->timeout)
1389 rvt_mod_retry_timer(qp);
1390 if (qp->s_flags & RVT_S_WAIT_ACK) {
1391 qp->s_flags &= ~RVT_S_WAIT_ACK;
1392 qib_schedule_send(qp);
1395 if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
1396 qp->s_retry = qp->s_retry_cnt;
1399 * Update the RDMA receive state but do the copy w/o
1400 * holding the locks and blocking interrupts.
1402 qp->s_rdma_read_len -= pmtu;
1403 update_last_psn(qp, psn);
1404 spin_unlock_irqrestore(&qp->s_lock, flags);
1405 rvt_copy_sge(qp, &qp->s_rdma_read_sge,
1406 data, pmtu, false, false);
1409 case OP(RDMA_READ_RESPONSE_ONLY):
1410 aeth = be32_to_cpu(ohdr->u.aeth);
1411 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
1413 /* Get the number of bytes the message was padded by. */
1414 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1416 * Check that the data size is >= 0 && <= pmtu.
1417 * Remember to account for the AETH header (4) and
1420 if (unlikely(tlen < (hdrsize + pad + 8)))
1423 * If this is a response to a resent RDMA read, we
1424 * have to be careful to copy the data to the right
1427 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1428 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1432 case OP(RDMA_READ_RESPONSE_LAST):
1433 /* ACKs READ req. */
1434 if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
1436 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1438 /* Get the number of bytes the message was padded by. */
1439 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1441 * Check that the data size is >= 1 && <= pmtu.
1442 * Remember to account for the AETH header (4) and
1445 if (unlikely(tlen <= (hdrsize + pad + 8)))
1448 tlen -= hdrsize + pad + 8;
1449 if (unlikely(tlen != qp->s_rdma_read_len))
1451 aeth = be32_to_cpu(ohdr->u.aeth);
1452 rvt_copy_sge(qp, &qp->s_rdma_read_sge,
1453 data, tlen, false, false);
1454 WARN_ON(qp->s_rdma_read_sge.num_sge);
1455 (void) do_rc_ack(qp, aeth, psn,
1456 OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
1461 status = IB_WC_LOC_QP_OP_ERR;
1465 rdma_seq_err(qp, ibp, psn, rcd);
1469 status = IB_WC_LOC_LEN_ERR;
1471 if (qp->s_last == qp->s_acked) {
1472 rvt_send_complete(qp, wqe, status);
1473 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1476 spin_unlock_irqrestore(&qp->s_lock, flags);
1482 * qib_rc_rcv_error - process an incoming duplicate or error RC packet
1483 * @ohdr: the other headers for this packet
1484 * @data: the packet data
1485 * @qp: the QP for this packet
1486 * @opcode: the opcode for this packet
1487 * @psn: the packet sequence number for this packet
1488 * @diff: the difference between the PSN and the expected PSN
1490 * This is called from qib_rc_rcv() to process an unexpected
1491 * incoming RC packet for the given QP.
1492 * Called at interrupt level.
1493 * Return 1 if no more processing is needed; otherwise return 0 to
1494 * schedule a response to be sent.
1496 static int qib_rc_rcv_error(struct ib_other_headers *ohdr,
1502 struct qib_ctxtdata *rcd)
1504 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
1505 struct rvt_ack_entry *e;
1506 unsigned long flags;
1512 * Packet sequence error.
1513 * A NAK will ACK earlier sends and RDMA writes.
1514 * Don't queue the NAK if we already sent one.
1516 if (!qp->r_nak_state) {
1517 ibp->rvp.n_rc_seqnak++;
1518 qp->r_nak_state = IB_NAK_PSN_ERROR;
1519 /* Use the expected PSN. */
1520 qp->r_ack_psn = qp->r_psn;
1522 * Wait to send the sequence NAK until all packets
1523 * in the receive queue have been processed.
1524 * Otherwise, we end up propagating congestion.
1526 if (list_empty(&qp->rspwait)) {
1527 qp->r_flags |= RVT_R_RSP_NAK;
1529 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1536 * Handle a duplicate request. Don't re-execute SEND, RDMA
1537 * write or atomic op. Don't NAK errors, just silently drop
1538 * the duplicate request. Note that r_sge, r_len, and
1539 * r_rcv_len may be in use so don't modify them.
1541 * We are supposed to ACK the earliest duplicate PSN but we
1542 * can coalesce an outstanding duplicate ACK. We have to
1543 * send the earliest so that RDMA reads can be restarted at
1544 * the requester's expected PSN.
1546 * First, find where this duplicate PSN falls within the
1547 * ACKs previously sent.
1548 * old_req is true if there is an older response that is scheduled
1549 * to be sent before sending this one.
1553 ibp->rvp.n_rc_dupreq++;
1555 spin_lock_irqsave(&qp->s_lock, flags);
1557 for (i = qp->r_head_ack_queue; ; i = prev) {
1558 if (i == qp->s_tail_ack_queue)
1563 prev = QIB_MAX_RDMA_ATOMIC;
1564 if (prev == qp->r_head_ack_queue) {
1568 e = &qp->s_ack_queue[prev];
1573 if (qib_cmp24(psn, e->psn) >= 0) {
1574 if (prev == qp->s_tail_ack_queue &&
1575 qib_cmp24(psn, e->lpsn) <= 0)
1581 case OP(RDMA_READ_REQUEST): {
1582 struct ib_reth *reth;
1587 * If we didn't find the RDMA read request in the ack queue,
1588 * we can ignore this request.
1590 if (!e || e->opcode != OP(RDMA_READ_REQUEST))
1592 /* RETH comes after BTH */
1593 reth = &ohdr->u.rc.reth;
1595 * Address range must be a subset of the original
1596 * request and start on pmtu boundaries.
1597 * We reuse the old ack_queue slot since the requester
1598 * should not back up and request an earlier PSN for the
1601 offset = ((psn - e->psn) & QIB_PSN_MASK) *
1603 len = be32_to_cpu(reth->length);
1604 if (unlikely(offset + len != e->rdma_sge.sge_length))
1606 if (e->rdma_sge.mr) {
1607 rvt_put_mr(e->rdma_sge.mr);
1608 e->rdma_sge.mr = NULL;
1611 u32 rkey = be32_to_cpu(reth->rkey);
1612 u64 vaddr = be64_to_cpu(reth->vaddr);
1615 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
1616 IB_ACCESS_REMOTE_READ);
1620 e->rdma_sge.vaddr = NULL;
1621 e->rdma_sge.length = 0;
1622 e->rdma_sge.sge_length = 0;
1627 qp->s_tail_ack_queue = prev;
1631 case OP(COMPARE_SWAP):
1632 case OP(FETCH_ADD): {
1634 * If we didn't find the atomic request in the ack queue
1635 * or the send tasklet is already backed up to send an
1636 * earlier entry, we can ignore this request.
1638 if (!e || e->opcode != (u8) opcode || old_req)
1640 qp->s_tail_ack_queue = prev;
1646 * Ignore this operation if it doesn't request an ACK
1647 * or an earlier RDMA read or atomic is going to be resent.
1649 if (!(psn & IB_BTH_REQ_ACK) || old_req)
1652 * Resend the most recent ACK if this request is
1653 * after all the previous RDMA reads and atomics.
1655 if (i == qp->r_head_ack_queue) {
1656 spin_unlock_irqrestore(&qp->s_lock, flags);
1657 qp->r_nak_state = 0;
1658 qp->r_ack_psn = qp->r_psn - 1;
1662 * Try to send a simple ACK to work around a Mellanox bug
1663 * which doesn't accept a RDMA read response or atomic
1664 * response as an ACK for earlier SENDs or RDMA writes.
1666 if (!(qp->s_flags & RVT_S_RESP_PENDING)) {
1667 spin_unlock_irqrestore(&qp->s_lock, flags);
1668 qp->r_nak_state = 0;
1669 qp->r_ack_psn = qp->s_ack_queue[i].psn - 1;
1673 * Resend the RDMA read or atomic op which
1674 * ACKs this duplicate request.
1676 qp->s_tail_ack_queue = i;
1679 qp->s_ack_state = OP(ACKNOWLEDGE);
1680 qp->s_flags |= RVT_S_RESP_PENDING;
1681 qp->r_nak_state = 0;
1682 qib_schedule_send(qp);
1685 spin_unlock_irqrestore(&qp->s_lock, flags);
1693 static inline void qib_update_ack_queue(struct rvt_qp *qp, unsigned n)
1698 if (next > QIB_MAX_RDMA_ATOMIC)
1700 qp->s_tail_ack_queue = next;
1701 qp->s_ack_state = OP(ACKNOWLEDGE);
1705 * qib_rc_rcv - process an incoming RC packet
1706 * @rcd: the context pointer
1707 * @hdr: the header of this packet
1708 * @has_grh: true if the header has a GRH
1709 * @data: the packet data
1710 * @tlen: the packet length
1711 * @qp: the QP for this packet
1713 * This is called from qib_qp_rcv() to process an incoming RC packet
1715 * Called at interrupt level.
1717 void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
1718 int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
1720 struct qib_ibport *ibp = &rcd->ppd->ibport_data;
1721 struct ib_other_headers *ohdr;
1727 u32 pmtu = qp->pmtu;
1729 struct ib_reth *reth;
1730 unsigned long flags;
1736 hdrsize = 8 + 12; /* LRH + BTH */
1738 ohdr = &hdr->u.l.oth;
1739 hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
1742 opcode = be32_to_cpu(ohdr->bth[0]);
1743 if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
1746 psn = be32_to_cpu(ohdr->bth[2]);
1750 * Process responses (ACKs) before anything else. Note that the
1751 * packet sequence number will be for something in the send work
1752 * queue rather than the expected receive packet sequence number.
1753 * In other words, this QP is the requester.
1755 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1756 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
1757 qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn,
1758 hdrsize, pmtu, rcd);
1762 /* Compute 24 bits worth of difference. */
1763 diff = qib_cmp24(psn, qp->r_psn);
1764 if (unlikely(diff)) {
1765 if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
1770 /* Check for opcode sequence errors. */
1771 switch (qp->r_state) {
1772 case OP(SEND_FIRST):
1773 case OP(SEND_MIDDLE):
1774 if (opcode == OP(SEND_MIDDLE) ||
1775 opcode == OP(SEND_LAST) ||
1776 opcode == OP(SEND_LAST_WITH_IMMEDIATE))
1780 case OP(RDMA_WRITE_FIRST):
1781 case OP(RDMA_WRITE_MIDDLE):
1782 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
1783 opcode == OP(RDMA_WRITE_LAST) ||
1784 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1789 if (opcode == OP(SEND_MIDDLE) ||
1790 opcode == OP(SEND_LAST) ||
1791 opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
1792 opcode == OP(RDMA_WRITE_MIDDLE) ||
1793 opcode == OP(RDMA_WRITE_LAST) ||
1794 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1797 * Note that it is up to the requester to not send a new
1798 * RDMA read or atomic operation before receiving an ACK
1799 * for the previous operation.
1804 if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
1807 /* OK, process the packet. */
1809 case OP(SEND_FIRST):
1810 ret = rvt_get_rwqe(qp, false);
1817 case OP(SEND_MIDDLE):
1818 case OP(RDMA_WRITE_MIDDLE):
1820 /* Check for invalid length PMTU or posted rwqe len. */
1821 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1823 qp->r_rcv_len += pmtu;
1824 if (unlikely(qp->r_rcv_len > qp->r_len))
1826 rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false);
1829 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
1831 ret = rvt_get_rwqe(qp, true);
1839 case OP(SEND_ONLY_WITH_IMMEDIATE):
1840 ret = rvt_get_rwqe(qp, false);
1846 if (opcode == OP(SEND_ONLY))
1847 goto no_immediate_data;
1848 /* fall through -- for SEND_ONLY_WITH_IMMEDIATE */
1849 case OP(SEND_LAST_WITH_IMMEDIATE):
1851 wc.ex.imm_data = ohdr->u.imm_data;
1853 wc.wc_flags = IB_WC_WITH_IMM;
1856 case OP(RDMA_WRITE_LAST):
1861 /* Get the number of bytes the message was padded by. */
1862 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1863 /* Check for invalid length. */
1864 /* XXX LAST len should be >= 1 */
1865 if (unlikely(tlen < (hdrsize + pad + 4)))
1867 /* Don't count the CRC. */
1868 tlen -= (hdrsize + pad + 4);
1869 wc.byte_len = tlen + qp->r_rcv_len;
1870 if (unlikely(wc.byte_len > qp->r_len))
1872 rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, false);
1873 rvt_put_ss(&qp->r_sge);
1875 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
1877 wc.wr_id = qp->r_wr_id;
1878 wc.status = IB_WC_SUCCESS;
1879 if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
1880 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
1881 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
1883 wc.opcode = IB_WC_RECV;
1885 wc.src_qp = qp->remote_qpn;
1886 wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
1887 wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
1888 /* zero fields that are N/A */
1891 wc.dlid_path_bits = 0;
1893 /* Signal completion event if the solicited bit is set. */
1894 rvt_recv_cq(qp, &wc, ib_bth_is_solicited(ohdr));
1897 case OP(RDMA_WRITE_FIRST):
1898 case OP(RDMA_WRITE_ONLY):
1899 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
1900 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
1903 reth = &ohdr->u.rc.reth;
1904 hdrsize += sizeof(*reth);
1905 qp->r_len = be32_to_cpu(reth->length);
1907 qp->r_sge.sg_list = NULL;
1908 if (qp->r_len != 0) {
1909 u32 rkey = be32_to_cpu(reth->rkey);
1910 u64 vaddr = be64_to_cpu(reth->vaddr);
1913 /* Check rkey & NAK */
1914 ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
1915 rkey, IB_ACCESS_REMOTE_WRITE);
1918 qp->r_sge.num_sge = 1;
1920 qp->r_sge.num_sge = 0;
1921 qp->r_sge.sge.mr = NULL;
1922 qp->r_sge.sge.vaddr = NULL;
1923 qp->r_sge.sge.length = 0;
1924 qp->r_sge.sge.sge_length = 0;
1926 if (opcode == OP(RDMA_WRITE_FIRST))
1928 else if (opcode == OP(RDMA_WRITE_ONLY))
1929 goto no_immediate_data;
1930 ret = rvt_get_rwqe(qp, true);
1934 rvt_put_ss(&qp->r_sge);
1937 wc.ex.imm_data = ohdr->u.rc.imm_data;
1939 wc.wc_flags = IB_WC_WITH_IMM;
1942 case OP(RDMA_READ_REQUEST): {
1943 struct rvt_ack_entry *e;
1947 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
1949 next = qp->r_head_ack_queue + 1;
1950 /* s_ack_queue is size QIB_MAX_RDMA_ATOMIC+1 so use > not >= */
1951 if (next > QIB_MAX_RDMA_ATOMIC)
1953 spin_lock_irqsave(&qp->s_lock, flags);
1954 if (unlikely(next == qp->s_tail_ack_queue)) {
1955 if (!qp->s_ack_queue[next].sent)
1956 goto nack_inv_unlck;
1957 qib_update_ack_queue(qp, next);
1959 e = &qp->s_ack_queue[qp->r_head_ack_queue];
1960 if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
1961 rvt_put_mr(e->rdma_sge.mr);
1962 e->rdma_sge.mr = NULL;
1964 reth = &ohdr->u.rc.reth;
1965 len = be32_to_cpu(reth->length);
1967 u32 rkey = be32_to_cpu(reth->rkey);
1968 u64 vaddr = be64_to_cpu(reth->vaddr);
1971 /* Check rkey & NAK */
1972 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr,
1973 rkey, IB_ACCESS_REMOTE_READ);
1975 goto nack_acc_unlck;
1977 * Update the next expected PSN. We add 1 later
1978 * below, so only add the remainder here.
1980 qp->r_psn += rvt_div_mtu(qp, len - 1);
1982 e->rdma_sge.mr = NULL;
1983 e->rdma_sge.vaddr = NULL;
1984 e->rdma_sge.length = 0;
1985 e->rdma_sge.sge_length = 0;
1990 e->lpsn = qp->r_psn;
1992 * We need to increment the MSN here instead of when we
1993 * finish sending the result since a duplicate request would
1994 * increment it more than once.
1998 qp->r_state = opcode;
1999 qp->r_nak_state = 0;
2000 qp->r_head_ack_queue = next;
2002 /* Schedule the send tasklet. */
2003 qp->s_flags |= RVT_S_RESP_PENDING;
2004 qib_schedule_send(qp);
2009 case OP(COMPARE_SWAP):
2010 case OP(FETCH_ADD): {
2011 struct ib_atomic_eth *ateth;
2012 struct rvt_ack_entry *e;
2019 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
2021 next = qp->r_head_ack_queue + 1;
2022 if (next > QIB_MAX_RDMA_ATOMIC)
2024 spin_lock_irqsave(&qp->s_lock, flags);
2025 if (unlikely(next == qp->s_tail_ack_queue)) {
2026 if (!qp->s_ack_queue[next].sent)
2027 goto nack_inv_unlck;
2028 qib_update_ack_queue(qp, next);
2030 e = &qp->s_ack_queue[qp->r_head_ack_queue];
2031 if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
2032 rvt_put_mr(e->rdma_sge.mr);
2033 e->rdma_sge.mr = NULL;
2035 ateth = &ohdr->u.atomic_eth;
2036 vaddr = get_ib_ateth_vaddr(ateth);
2037 if (unlikely(vaddr & (sizeof(u64) - 1)))
2038 goto nack_inv_unlck;
2039 rkey = be32_to_cpu(ateth->rkey);
2040 /* Check rkey & NAK */
2041 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
2043 IB_ACCESS_REMOTE_ATOMIC)))
2044 goto nack_acc_unlck;
2045 /* Perform atomic OP and save result. */
2046 maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
2047 sdata = get_ib_ateth_swap(ateth);
2048 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
2049 (u64) atomic64_add_return(sdata, maddr) - sdata :
2050 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
2051 get_ib_ateth_compare(ateth),
2053 rvt_put_mr(qp->r_sge.sge.mr);
2054 qp->r_sge.num_sge = 0;
2061 qp->r_state = opcode;
2062 qp->r_nak_state = 0;
2063 qp->r_head_ack_queue = next;
2065 /* Schedule the send tasklet. */
2066 qp->s_flags |= RVT_S_RESP_PENDING;
2067 qib_schedule_send(qp);
2073 /* NAK unknown opcodes. */
2077 qp->r_state = opcode;
2078 qp->r_ack_psn = psn;
2079 qp->r_nak_state = 0;
2080 /* Send an ACK if requested or required. */
2081 if (psn & (1 << 31))
2086 qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
2087 qp->r_ack_psn = qp->r_psn;
2088 /* Queue RNR NAK for later */
2089 if (list_empty(&qp->rspwait)) {
2090 qp->r_flags |= RVT_R_RSP_NAK;
2092 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2097 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2098 qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
2099 qp->r_ack_psn = qp->r_psn;
2100 /* Queue NAK for later */
2101 if (list_empty(&qp->rspwait)) {
2102 qp->r_flags |= RVT_R_RSP_NAK;
2104 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2109 spin_unlock_irqrestore(&qp->s_lock, flags);
2111 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2112 qp->r_nak_state = IB_NAK_INVALID_REQUEST;
2113 qp->r_ack_psn = qp->r_psn;
2114 /* Queue NAK for later */
2115 if (list_empty(&qp->rspwait)) {
2116 qp->r_flags |= RVT_R_RSP_NAK;
2118 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2123 spin_unlock_irqrestore(&qp->s_lock, flags);
2125 rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
2126 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
2127 qp->r_ack_psn = qp->r_psn;
2129 qib_send_rc_ack(qp);
2133 spin_unlock_irqrestore(&qp->s_lock, flags);