1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/if_vlan.h>
37 #include <linux/kernel.h>
38 #include <linux/pci.h>
39 #include <linux/slab.h>
40 #include <linux/stddef.h>
41 #include <linux/workqueue.h>
43 #include <linux/bitops.h>
44 #include <linux/delay.h>
45 #include <linux/errno.h>
46 #include <linux/etherdevice.h>
48 #include <linux/list.h>
49 #include <linux/mutex.h>
50 #include <linux/spinlock.h>
51 #include <linux/string.h>
52 #include <linux/qed/qed_ll2_if.h>
55 #include "qed_dev_api.h"
62 #include "qed_reg_addr.h"
66 #define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registered)
67 #define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registered)
69 #define QED_LL2_TX_SIZE (256)
70 #define QED_LL2_RX_SIZE (4096)
72 struct qed_cb_ll2_info {
77 /* Lock protecting LL2 buffer lists in sleepless context */
79 struct list_head list;
81 const struct qed_ll2_cb_ops *cbs;
85 struct qed_ll2_buffer {
86 struct list_head list;
91 static void qed_ll2b_complete_tx_packet(void *cxt,
94 dma_addr_t first_frag_addr,
98 struct qed_hwfn *p_hwfn = cxt;
99 struct qed_dev *cdev = p_hwfn->cdev;
100 struct sk_buff *skb = cookie;
102 /* All we need to do is release the mapping */
103 dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr,
104 skb_headlen(skb), DMA_TO_DEVICE);
106 if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb)
107 cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
110 dev_kfree_skb_any(skb);
113 static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
114 u8 **data, dma_addr_t *phys_addr)
116 *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
118 DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
122 *phys_addr = dma_map_single(&cdev->pdev->dev,
123 ((*data) + NET_SKB_PAD),
124 cdev->ll2->rx_size, DMA_FROM_DEVICE);
125 if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) {
126 DP_INFO(cdev, "Failed to map LL2 buffer data\n");
134 static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
135 struct qed_ll2_buffer *buffer)
137 spin_lock_bh(&cdev->ll2->lock);
139 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
140 cdev->ll2->rx_size, DMA_FROM_DEVICE);
142 list_del(&buffer->list);
145 if (!cdev->ll2->rx_cnt)
146 DP_INFO(cdev, "All LL2 entries were removed\n");
148 spin_unlock_bh(&cdev->ll2->lock);
153 static void qed_ll2_kill_buffers(struct qed_dev *cdev)
155 struct qed_ll2_buffer *buffer, *tmp_buffer;
157 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list)
158 qed_ll2_dealloc_buffer(cdev, buffer);
161 static void qed_ll2b_complete_rx_packet(void *cxt,
162 struct qed_ll2_comp_rx_data *data)
164 struct qed_hwfn *p_hwfn = cxt;
165 struct qed_ll2_buffer *buffer = data->cookie;
166 struct qed_dev *cdev = p_hwfn->cdev;
167 dma_addr_t new_phys_addr;
174 (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
175 "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
176 (u64)data->rx_buf_addr,
177 data->u.placement_offset,
178 data->length.packet_length,
180 data->vlan, data->opaque_data_0, data->opaque_data_1);
182 if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
183 print_hex_dump(KERN_INFO, "",
184 DUMP_PREFIX_OFFSET, 16, 1,
185 buffer->data, data->length.packet_length, false);
188 /* Determine if data is valid */
189 if (data->length.packet_length < ETH_HLEN)
192 /* Allocate a replacement for buffer; Reuse upon failure */
194 rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data,
197 /* If need to reuse or there's no replacement buffer, repost this */
200 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
201 cdev->ll2->rx_size, DMA_FROM_DEVICE);
203 skb = build_skb(buffer->data, 0);
205 DP_INFO(cdev, "Failed to build SKB\n");
210 data->u.placement_offset += NET_SKB_PAD;
211 skb_reserve(skb, data->u.placement_offset);
212 skb_put(skb, data->length.packet_length);
213 skb_checksum_none_assert(skb);
215 /* Get parital ethernet information instead of eth_type_trans(),
216 * Since we don't have an associated net_device.
218 skb_reset_mac_header(skb);
219 skb->protocol = eth_hdr(skb)->h_proto;
221 /* Pass SKB onward */
222 if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
224 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
226 cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
228 data->opaque_data_1);
230 DP_VERBOSE(p_hwfn, (NETIF_MSG_RX_STATUS | NETIF_MSG_PKTDATA |
231 QED_MSG_LL2 | QED_MSG_STORAGE),
232 "Dropping the packet\n");
237 /* Update Buffer information and update FW producer */
238 buffer->data = new_data;
239 buffer->phys_addr = new_phys_addr;
242 rc = qed_ll2_post_rx_buffer(p_hwfn, cdev->ll2->handle,
243 buffer->phys_addr, 0, buffer, 1);
245 qed_ll2_dealloc_buffer(cdev, buffer);
248 static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
249 u8 connection_handle,
253 struct qed_ll2_info *p_ll2_conn, *p_ret = NULL;
255 if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS)
258 if (!p_hwfn->p_ll2_info)
261 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
265 mutex_lock(&p_ll2_conn->mutex);
266 if (p_ll2_conn->b_active)
269 mutex_unlock(&p_ll2_conn->mutex);
277 static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
278 u8 connection_handle)
280 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true);
283 static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn,
284 u8 connection_handle)
286 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true);
289 static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn
291 u8 connection_handle)
293 return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false);
296 static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
298 bool b_last_packet = false, b_last_frag = false;
299 struct qed_ll2_tx_packet *p_pkt = NULL;
300 struct qed_ll2_info *p_ll2_conn;
301 struct qed_ll2_tx_queue *p_tx;
302 unsigned long flags = 0;
305 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
309 p_tx = &p_ll2_conn->tx_queue;
311 spin_lock_irqsave(&p_tx->lock, flags);
312 while (!list_empty(&p_tx->active_descq)) {
313 p_pkt = list_first_entry(&p_tx->active_descq,
314 struct qed_ll2_tx_packet, list_entry);
318 list_del(&p_pkt->list_entry);
319 b_last_packet = list_empty(&p_tx->active_descq);
320 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
321 spin_unlock_irqrestore(&p_tx->lock, flags);
322 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
323 struct qed_ooo_buffer *p_buffer;
325 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
326 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
329 p_tx->cur_completing_packet = *p_pkt;
330 p_tx->cur_completing_bd_idx = 1;
332 p_tx->cur_completing_bd_idx == p_pkt->bd_used;
333 tx_frag = p_pkt->bds_set[0].tx_frag;
334 p_ll2_conn->cbs.tx_release_cb(p_ll2_conn->cbs.cookie,
341 spin_lock_irqsave(&p_tx->lock, flags);
343 spin_unlock_irqrestore(&p_tx->lock, flags);
346 static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
348 struct qed_ll2_info *p_ll2_conn = p_cookie;
349 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
350 u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0;
351 struct qed_ll2_tx_packet *p_pkt;
352 bool b_last_frag = false;
356 spin_lock_irqsave(&p_tx->lock, flags);
357 if (p_tx->b_completing_packet) {
362 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
363 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
365 if (list_empty(&p_tx->active_descq))
368 p_pkt = list_first_entry(&p_tx->active_descq,
369 struct qed_ll2_tx_packet, list_entry);
373 p_tx->b_completing_packet = true;
374 p_tx->cur_completing_packet = *p_pkt;
375 num_bds_in_packet = p_pkt->bd_used;
376 list_del(&p_pkt->list_entry);
378 if (num_bds < num_bds_in_packet) {
380 "Rest of BDs does not cover whole packet\n");
384 num_bds -= num_bds_in_packet;
385 p_tx->bds_idx += num_bds_in_packet;
386 while (num_bds_in_packet--)
387 qed_chain_consume(&p_tx->txq_chain);
389 p_tx->cur_completing_bd_idx = 1;
390 b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
391 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
393 spin_unlock_irqrestore(&p_tx->lock, flags);
395 p_ll2_conn->cbs.tx_comp_cb(p_ll2_conn->cbs.cookie,
398 p_pkt->bds_set[0].tx_frag,
399 b_last_frag, !num_bds);
401 spin_lock_irqsave(&p_tx->lock, flags);
404 p_tx->b_completing_packet = false;
407 spin_unlock_irqrestore(&p_tx->lock, flags);
411 static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn,
412 union core_rx_cqe_union *p_cqe,
413 struct qed_ll2_comp_rx_data *data)
415 data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
416 data->length.data_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
417 data->vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
418 data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
419 data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
420 data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error;
421 data->qp_id = le16_to_cpu(p_cqe->rx_cqe_gsi.qp_id);
423 data->src_qp = le32_to_cpu(p_cqe->rx_cqe_gsi.src_qp);
426 static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
427 union core_rx_cqe_union *p_cqe,
428 struct qed_ll2_comp_rx_data *data)
430 data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_fp.parse_flags.flags);
431 data->err_flags = le16_to_cpu(p_cqe->rx_cqe_fp.err_flags.flags);
432 data->length.packet_length =
433 le16_to_cpu(p_cqe->rx_cqe_fp.packet_length);
434 data->vlan = le16_to_cpu(p_cqe->rx_cqe_fp.vlan);
435 data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[0]);
436 data->opaque_data_1 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[1]);
437 data->u.placement_offset = p_cqe->rx_cqe_fp.placement_offset;
441 qed_ll2_handle_slowpath(struct qed_hwfn *p_hwfn,
442 struct qed_ll2_info *p_ll2_conn,
443 union core_rx_cqe_union *p_cqe,
444 unsigned long *p_lock_flags)
446 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
447 struct core_rx_slow_path_cqe *sp_cqe;
449 sp_cqe = &p_cqe->rx_cqe_sp;
450 if (sp_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH) {
452 "LL2 - unexpected Rx CQE slowpath ramrod_cmd_id:%d\n",
453 sp_cqe->ramrod_cmd_id);
457 if (!p_ll2_conn->cbs.slowpath_cb) {
459 "LL2 - received RX_QUEUE_FLUSH but no callback was provided\n");
463 spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
465 p_ll2_conn->cbs.slowpath_cb(p_ll2_conn->cbs.cookie,
467 le32_to_cpu(sp_cqe->opaque_data.data[0]),
468 le32_to_cpu(sp_cqe->opaque_data.data[1]));
470 spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
476 qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn,
477 struct qed_ll2_info *p_ll2_conn,
478 union core_rx_cqe_union *p_cqe,
479 unsigned long *p_lock_flags, bool b_last_cqe)
481 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
482 struct qed_ll2_rx_packet *p_pkt = NULL;
483 struct qed_ll2_comp_rx_data data;
485 if (!list_empty(&p_rx->active_descq))
486 p_pkt = list_first_entry(&p_rx->active_descq,
487 struct qed_ll2_rx_packet, list_entry);
490 "[%d] LL2 Rx completion but active_descq is empty\n",
491 p_ll2_conn->input.conn_type);
495 list_del(&p_pkt->list_entry);
497 if (p_cqe->rx_cqe_sp.type == CORE_RX_CQE_TYPE_REGULAR)
498 qed_ll2_rxq_parse_reg(p_hwfn, p_cqe, &data);
500 qed_ll2_rxq_parse_gsi(p_hwfn, p_cqe, &data);
501 if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
503 "Mismatch between active_descq and the LL2 Rx chain\n");
505 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
507 data.connection_handle = p_ll2_conn->my_id;
508 data.cookie = p_pkt->cookie;
509 data.rx_buf_addr = p_pkt->rx_buf_addr;
510 data.b_last_packet = b_last_cqe;
512 spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
513 p_ll2_conn->cbs.rx_comp_cb(p_ll2_conn->cbs.cookie, &data);
515 spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
520 static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
522 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)cookie;
523 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
524 union core_rx_cqe_union *cqe = NULL;
525 u16 cq_new_idx = 0, cq_old_idx = 0;
526 unsigned long flags = 0;
529 spin_lock_irqsave(&p_rx->lock, flags);
530 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
531 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
533 while (cq_new_idx != cq_old_idx) {
534 bool b_last_cqe = (cq_new_idx == cq_old_idx);
537 (union core_rx_cqe_union *)
538 qed_chain_consume(&p_rx->rcq_chain);
539 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
543 "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
544 cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type);
546 switch (cqe->rx_cqe_sp.type) {
547 case CORE_RX_CQE_TYPE_SLOW_PATH:
548 rc = qed_ll2_handle_slowpath(p_hwfn, p_ll2_conn,
551 case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
552 case CORE_RX_CQE_TYPE_REGULAR:
553 rc = qed_ll2_rxq_handle_completion(p_hwfn, p_ll2_conn,
562 spin_unlock_irqrestore(&p_rx->lock, flags);
566 static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
568 struct qed_ll2_info *p_ll2_conn = NULL;
569 struct qed_ll2_rx_packet *p_pkt = NULL;
570 struct qed_ll2_rx_queue *p_rx;
571 unsigned long flags = 0;
573 p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
577 p_rx = &p_ll2_conn->rx_queue;
579 spin_lock_irqsave(&p_rx->lock, flags);
580 while (!list_empty(&p_rx->active_descq)) {
581 p_pkt = list_first_entry(&p_rx->active_descq,
582 struct qed_ll2_rx_packet, list_entry);
585 list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
586 spin_unlock_irqrestore(&p_rx->lock, flags);
588 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
589 struct qed_ooo_buffer *p_buffer;
591 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
592 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
595 dma_addr_t rx_buf_addr = p_pkt->rx_buf_addr;
596 void *cookie = p_pkt->cookie;
599 b_last = list_empty(&p_rx->active_descq);
600 p_ll2_conn->cbs.rx_release_cb(p_ll2_conn->cbs.cookie,
603 rx_buf_addr, b_last);
605 spin_lock_irqsave(&p_rx->lock, flags);
607 spin_unlock_irqrestore(&p_rx->lock, flags);
611 qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn *p_hwfn,
612 struct core_rx_slow_path_cqe *p_cqe)
614 struct ooo_opaque *iscsi_ooo;
617 if (p_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH)
620 iscsi_ooo = (struct ooo_opaque *)&p_cqe->opaque_data;
621 if (iscsi_ooo->ooo_opcode != TCP_EVENT_DELETE_ISLES)
624 /* Need to make a flush */
625 cid = le32_to_cpu(iscsi_ooo->cid);
626 qed_ooo_release_connection_isles(p_hwfn, p_hwfn->p_ooo_info, cid);
631 static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
632 struct qed_ll2_info *p_ll2_conn)
634 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
635 u16 packet_length = 0, parse_flags = 0, vlan = 0;
636 struct qed_ll2_rx_packet *p_pkt = NULL;
637 u32 num_ooo_add_to_peninsula = 0, cid;
638 union core_rx_cqe_union *cqe = NULL;
639 u16 cq_new_idx = 0, cq_old_idx = 0;
640 struct qed_ooo_buffer *p_buffer;
641 struct ooo_opaque *iscsi_ooo;
642 u8 placement_offset = 0;
645 cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
646 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
647 if (cq_new_idx == cq_old_idx)
650 while (cq_new_idx != cq_old_idx) {
651 struct core_rx_fast_path_cqe *p_cqe_fp;
653 cqe = qed_chain_consume(&p_rx->rcq_chain);
654 cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
655 cqe_type = cqe->rx_cqe_sp.type;
657 if (cqe_type == CORE_RX_CQE_TYPE_SLOW_PATH)
658 if (qed_ll2_lb_rxq_handler_slowpath(p_hwfn,
662 if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
664 "Got a non-regular LB LL2 completion [type 0x%02x]\n",
668 p_cqe_fp = &cqe->rx_cqe_fp;
670 placement_offset = p_cqe_fp->placement_offset;
671 parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags);
672 packet_length = le16_to_cpu(p_cqe_fp->packet_length);
673 vlan = le16_to_cpu(p_cqe_fp->vlan);
674 iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
675 qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info,
677 cid = le32_to_cpu(iscsi_ooo->cid);
679 /* Process delete isle first */
680 if (iscsi_ooo->drop_size)
681 qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid,
682 iscsi_ooo->drop_isle,
683 iscsi_ooo->drop_size);
685 if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP)
688 /* Now process create/add/join isles */
689 if (list_empty(&p_rx->active_descq)) {
691 "LL2 OOO RX chain has no submitted buffers\n"
696 p_pkt = list_first_entry(&p_rx->active_descq,
697 struct qed_ll2_rx_packet, list_entry);
699 if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) ||
700 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) ||
701 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) ||
702 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) ||
703 (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) {
706 "LL2 OOO RX packet is not valid\n");
709 list_del(&p_pkt->list_entry);
710 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
711 p_buffer->packet_length = packet_length;
712 p_buffer->parse_flags = parse_flags;
713 p_buffer->vlan = vlan;
714 p_buffer->placement_offset = placement_offset;
715 qed_chain_consume(&p_rx->rxq_chain);
716 list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
718 switch (iscsi_ooo->ooo_opcode) {
719 case TCP_EVENT_ADD_NEW_ISLE:
720 qed_ooo_add_new_isle(p_hwfn,
726 case TCP_EVENT_ADD_ISLE_RIGHT:
727 qed_ooo_add_new_buffer(p_hwfn,
734 case TCP_EVENT_ADD_ISLE_LEFT:
735 qed_ooo_add_new_buffer(p_hwfn,
743 qed_ooo_add_new_buffer(p_hwfn,
746 iscsi_ooo->ooo_isle +
750 qed_ooo_join_isles(p_hwfn,
752 cid, iscsi_ooo->ooo_isle);
754 case TCP_EVENT_ADD_PEN:
755 num_ooo_add_to_peninsula++;
756 qed_ooo_put_ready_buffer(p_hwfn,
763 "Unexpected event (%d) TX OOO completion\n",
764 iscsi_ooo->ooo_opcode);
772 qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
773 struct qed_ll2_info *p_ll2_conn)
775 struct qed_ll2_tx_pkt_info tx_pkt;
776 struct qed_ooo_buffer *p_buffer;
778 dma_addr_t first_frag;
782 /* Submit Tx buffers here */
783 while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn,
784 p_hwfn->p_ooo_info))) {
788 first_frag = p_buffer->rx_buffer_phys_addr +
789 p_buffer->placement_offset;
790 SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
791 SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
793 memset(&tx_pkt, 0, sizeof(tx_pkt));
794 tx_pkt.num_of_bds = 1;
795 tx_pkt.vlan = p_buffer->vlan;
796 tx_pkt.bd_flags = bd_flags;
797 tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w;
798 switch (p_ll2_conn->tx_dest) {
799 case CORE_TX_DEST_NW:
800 tx_pkt.tx_dest = QED_LL2_TX_DEST_NW;
802 case CORE_TX_DEST_LB:
803 tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
805 case CORE_TX_DEST_DROP:
807 tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP;
810 tx_pkt.first_frag = first_frag;
811 tx_pkt.first_frag_len = p_buffer->packet_length;
812 tx_pkt.cookie = p_buffer;
814 rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id,
817 qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info,
825 qed_ooo_submit_rx_buffers(struct qed_hwfn *p_hwfn,
826 struct qed_ll2_info *p_ll2_conn)
828 struct qed_ooo_buffer *p_buffer;
831 while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
832 p_hwfn->p_ooo_info))) {
833 rc = qed_ll2_post_rx_buffer(p_hwfn,
835 p_buffer->rx_buffer_phys_addr,
838 qed_ooo_put_free_buffer(p_hwfn,
839 p_hwfn->p_ooo_info, p_buffer);
845 static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
847 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
850 if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
853 rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
857 qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
858 qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
863 static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
865 struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
866 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
867 struct qed_ll2_tx_packet *p_pkt = NULL;
868 struct qed_ooo_buffer *p_buffer;
869 bool b_dont_submit_rx = false;
870 u16 new_idx = 0, num_bds = 0;
873 if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
876 new_idx = le16_to_cpu(*p_tx->p_fw_cons);
877 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
883 if (list_empty(&p_tx->active_descq))
886 p_pkt = list_first_entry(&p_tx->active_descq,
887 struct qed_ll2_tx_packet, list_entry);
891 if (p_pkt->bd_used != 1) {
893 "Unexpectedly many BDs(%d) in TX OOO completion\n",
898 list_del(&p_pkt->list_entry);
902 qed_chain_consume(&p_tx->txq_chain);
904 p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
905 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
907 if (b_dont_submit_rx) {
908 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
913 rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id,
914 p_buffer->rx_buffer_phys_addr, 0,
917 qed_ooo_put_free_buffer(p_hwfn,
918 p_hwfn->p_ooo_info, p_buffer);
919 b_dont_submit_rx = true;
923 qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
928 static void qed_ll2_stop_ooo(struct qed_hwfn *p_hwfn)
930 u8 *handle = &p_hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
932 DP_VERBOSE(p_hwfn, (QED_MSG_STORAGE | QED_MSG_LL2),
933 "Stopping LL2 OOO queue [%02x]\n", *handle);
935 qed_ll2_terminate_connection(p_hwfn, *handle);
936 qed_ll2_release_connection(p_hwfn, *handle);
937 *handle = QED_LL2_UNUSED_HANDLE;
940 static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
941 struct qed_ll2_info *p_ll2_conn,
944 enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
945 struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
946 struct core_rx_start_ramrod_data *p_ramrod = NULL;
947 struct qed_spq_entry *p_ent = NULL;
948 struct qed_sp_init_data init_data;
953 memset(&init_data, 0, sizeof(init_data));
954 init_data.cid = p_ll2_conn->cid;
955 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
956 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
958 rc = qed_sp_init_request(p_hwfn, &p_ent,
959 CORE_RAMROD_RX_QUEUE_START,
960 PROTOCOLID_CORE, &init_data);
964 p_ramrod = &p_ent->ramrod.core_rx_queue_start;
966 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
967 p_ramrod->sb_index = p_rx->rx_sb_index;
968 p_ramrod->complete_event_flg = 1;
970 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
971 DMA_REGPAIR_LE(p_ramrod->bd_base, p_rx->rxq_chain.p_phys_addr);
972 cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
973 p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
974 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
975 qed_chain_get_pbl_phys(&p_rx->rcq_chain));
977 p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
978 p_ramrod->inner_vlan_stripping_en =
979 p_ll2_conn->input.rx_vlan_removal_en;
981 if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) &&
982 p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE)
983 p_ramrod->report_outer_vlan = 1;
984 p_ramrod->queue_id = p_ll2_conn->queue_id;
985 p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0;
987 if (test_bit(QED_MF_LL2_NON_UNICAST, &p_hwfn->cdev->mf_bits) &&
988 p_ramrod->main_func_queue && conn_type != QED_LL2_TYPE_ROCE &&
989 conn_type != QED_LL2_TYPE_IWARP) {
990 p_ramrod->mf_si_bcast_accept_all = 1;
991 p_ramrod->mf_si_mcast_accept_all = 1;
993 p_ramrod->mf_si_bcast_accept_all = 0;
994 p_ramrod->mf_si_mcast_accept_all = 0;
997 p_ramrod->action_on_error.error_type = action_on_error;
998 p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
999 return qed_spq_post(p_hwfn, p_ent, NULL);
1002 static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
1003 struct qed_ll2_info *p_ll2_conn)
1005 enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
1006 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1007 struct core_tx_start_ramrod_data *p_ramrod = NULL;
1008 struct qed_spq_entry *p_ent = NULL;
1009 struct qed_sp_init_data init_data;
1010 u16 pq_id = 0, pbl_size;
1013 if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
1016 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
1017 p_ll2_conn->tx_stats_en = 0;
1019 p_ll2_conn->tx_stats_en = 1;
1022 memset(&init_data, 0, sizeof(init_data));
1023 init_data.cid = p_ll2_conn->cid;
1024 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1025 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1027 rc = qed_sp_init_request(p_hwfn, &p_ent,
1028 CORE_RAMROD_TX_QUEUE_START,
1029 PROTOCOLID_CORE, &init_data);
1033 p_ramrod = &p_ent->ramrod.core_tx_queue_start;
1035 p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
1036 p_ramrod->sb_index = p_tx->tx_sb_index;
1037 p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
1038 p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
1039 p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
1041 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr,
1042 qed_chain_get_pbl_phys(&p_tx->txq_chain));
1043 pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
1044 p_ramrod->pbl_size = cpu_to_le16(pbl_size);
1046 switch (p_ll2_conn->input.tx_tc) {
1048 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
1051 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO);
1054 pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
1058 p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
1060 switch (conn_type) {
1061 case QED_LL2_TYPE_FCOE:
1062 p_ramrod->conn_type = PROTOCOLID_FCOE;
1064 case QED_LL2_TYPE_ISCSI:
1065 p_ramrod->conn_type = PROTOCOLID_ISCSI;
1067 case QED_LL2_TYPE_ROCE:
1068 p_ramrod->conn_type = PROTOCOLID_ROCE;
1070 case QED_LL2_TYPE_IWARP:
1071 p_ramrod->conn_type = PROTOCOLID_IWARP;
1073 case QED_LL2_TYPE_OOO:
1074 if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
1075 p_ramrod->conn_type = PROTOCOLID_ISCSI;
1077 p_ramrod->conn_type = PROTOCOLID_IWARP;
1080 p_ramrod->conn_type = PROTOCOLID_ETH;
1081 DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
1084 p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
1086 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1090 rc = qed_db_recovery_add(p_hwfn->cdev, p_tx->doorbell_addr,
1091 &p_tx->db_msg, DB_REC_WIDTH_32B,
1096 static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
1097 struct qed_ll2_info *p_ll2_conn)
1099 struct core_rx_stop_ramrod_data *p_ramrod = NULL;
1100 struct qed_spq_entry *p_ent = NULL;
1101 struct qed_sp_init_data init_data;
1105 memset(&init_data, 0, sizeof(init_data));
1106 init_data.cid = p_ll2_conn->cid;
1107 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1108 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1110 rc = qed_sp_init_request(p_hwfn, &p_ent,
1111 CORE_RAMROD_RX_QUEUE_STOP,
1112 PROTOCOLID_CORE, &init_data);
1116 p_ramrod = &p_ent->ramrod.core_rx_queue_stop;
1118 p_ramrod->complete_event_flg = 1;
1119 p_ramrod->queue_id = p_ll2_conn->queue_id;
1121 return qed_spq_post(p_hwfn, p_ent, NULL);
1124 static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
1125 struct qed_ll2_info *p_ll2_conn)
1127 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1128 struct qed_spq_entry *p_ent = NULL;
1129 struct qed_sp_init_data init_data;
1131 qed_db_recovery_del(p_hwfn->cdev, p_tx->doorbell_addr, &p_tx->db_msg);
1134 memset(&init_data, 0, sizeof(init_data));
1135 init_data.cid = p_ll2_conn->cid;
1136 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1137 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1139 rc = qed_sp_init_request(p_hwfn, &p_ent,
1140 CORE_RAMROD_TX_QUEUE_STOP,
1141 PROTOCOLID_CORE, &init_data);
1145 return qed_spq_post(p_hwfn, p_ent, NULL);
1149 qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
1150 struct qed_ll2_info *p_ll2_info)
1152 struct qed_ll2_rx_packet *p_descq;
1156 if (!p_ll2_info->input.rx_num_desc)
1159 rc = qed_chain_alloc(p_hwfn->cdev,
1160 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1161 QED_CHAIN_MODE_NEXT_PTR,
1162 QED_CHAIN_CNT_TYPE_U16,
1163 p_ll2_info->input.rx_num_desc,
1164 sizeof(struct core_rx_bd),
1165 &p_ll2_info->rx_queue.rxq_chain, NULL);
1167 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
1171 capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain);
1172 p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet),
1176 DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n");
1179 p_ll2_info->rx_queue.descq_array = p_descq;
1181 rc = qed_chain_alloc(p_hwfn->cdev,
1182 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1184 QED_CHAIN_CNT_TYPE_U16,
1185 p_ll2_info->input.rx_num_desc,
1186 sizeof(struct core_rx_fast_path_cqe),
1187 &p_ll2_info->rx_queue.rcq_chain, NULL);
1189 DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
1193 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1194 "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
1195 p_ll2_info->input.conn_type, p_ll2_info->input.rx_num_desc);
1201 static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
1202 struct qed_ll2_info *p_ll2_info)
1204 struct qed_ll2_tx_packet *p_descq;
1209 if (!p_ll2_info->input.tx_num_desc)
1212 rc = qed_chain_alloc(p_hwfn->cdev,
1213 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1215 QED_CHAIN_CNT_TYPE_U16,
1216 p_ll2_info->input.tx_num_desc,
1217 sizeof(struct core_tx_bd),
1218 &p_ll2_info->tx_queue.txq_chain, NULL);
1222 capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
1223 /* First element is part of the packet, rest are flexibly added */
1224 desc_size = (sizeof(*p_descq) +
1225 (p_ll2_info->input.tx_max_bds_per_packet - 1) *
1226 sizeof(p_descq->bds_set));
1228 p_descq = kcalloc(capacity, desc_size, GFP_KERNEL);
1233 p_ll2_info->tx_queue.descq_mem = p_descq;
1235 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1236 "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
1237 p_ll2_info->input.conn_type, p_ll2_info->input.tx_num_desc);
1242 "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
1243 p_ll2_info->input.tx_num_desc);
1248 qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
1249 struct qed_ll2_info *p_ll2_info, u16 mtu)
1251 struct qed_ooo_buffer *p_buf = NULL;
1256 if (p_ll2_info->input.conn_type != QED_LL2_TYPE_OOO)
1259 /* Correct number of requested OOO buffers if needed */
1260 if (!p_ll2_info->input.rx_num_ooo_buffers) {
1261 u16 num_desc = p_ll2_info->input.rx_num_desc;
1265 p_ll2_info->input.rx_num_ooo_buffers = num_desc * 2;
1268 for (buf_idx = 0; buf_idx < p_ll2_info->input.rx_num_ooo_buffers;
1270 p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
1276 p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
1277 p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
1278 ETH_CACHE_LINE_SIZE - 1) &
1279 ~(ETH_CACHE_LINE_SIZE - 1);
1280 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
1281 p_buf->rx_buffer_size,
1282 &p_buf->rx_buffer_phys_addr,
1290 p_buf->rx_buffer_virt_addr = p_virt;
1291 qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
1294 DP_VERBOSE(p_hwfn, QED_MSG_LL2,
1295 "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
1296 p_ll2_info->input.rx_num_ooo_buffers, p_buf->rx_buffer_size);
1303 qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs)
1305 if (!cbs || (!cbs->rx_comp_cb ||
1306 !cbs->rx_release_cb ||
1307 !cbs->tx_comp_cb || !cbs->tx_release_cb || !cbs->cookie))
1310 p_ll2_info->cbs.rx_comp_cb = cbs->rx_comp_cb;
1311 p_ll2_info->cbs.rx_release_cb = cbs->rx_release_cb;
1312 p_ll2_info->cbs.tx_comp_cb = cbs->tx_comp_cb;
1313 p_ll2_info->cbs.tx_release_cb = cbs->tx_release_cb;
1314 p_ll2_info->cbs.slowpath_cb = cbs->slowpath_cb;
1315 p_ll2_info->cbs.cookie = cbs->cookie;
1320 static enum core_error_handle
1321 qed_ll2_get_error_choice(enum qed_ll2_error_handle err)
1324 case QED_LL2_DROP_PACKET:
1325 return LL2_DROP_PACKET;
1326 case QED_LL2_DO_NOTHING:
1327 return LL2_DO_NOTHING;
1328 case QED_LL2_ASSERT:
1331 return LL2_DO_NOTHING;
1335 int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
1337 struct qed_hwfn *p_hwfn = cxt;
1338 qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
1339 struct qed_ll2_info *p_ll2_info = NULL;
1343 if (!data->p_connection_handle || !p_hwfn->p_ll2_info)
1346 /* Find a free connection to be used */
1347 for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) {
1348 mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
1349 if (p_hwfn->p_ll2_info[i].b_active) {
1350 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1354 p_hwfn->p_ll2_info[i].b_active = true;
1355 p_ll2_info = &p_hwfn->p_ll2_info[i];
1356 mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
1362 memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input));
1364 switch (data->input.tx_dest) {
1365 case QED_LL2_TX_DEST_NW:
1366 p_ll2_info->tx_dest = CORE_TX_DEST_NW;
1368 case QED_LL2_TX_DEST_LB:
1369 p_ll2_info->tx_dest = CORE_TX_DEST_LB;
1371 case QED_LL2_TX_DEST_DROP:
1372 p_ll2_info->tx_dest = CORE_TX_DEST_DROP;
1378 if (data->input.conn_type == QED_LL2_TYPE_OOO ||
1379 data->input.secondary_queue)
1380 p_ll2_info->main_func_queue = false;
1382 p_ll2_info->main_func_queue = true;
1384 /* Correct maximum number of Tx BDs */
1385 p_tx_max = &p_ll2_info->input.tx_max_bds_per_packet;
1387 *p_tx_max = CORE_LL2_TX_MAX_BDS_PER_PACKET;
1389 *p_tx_max = min_t(u8, *p_tx_max,
1390 CORE_LL2_TX_MAX_BDS_PER_PACKET);
1392 rc = qed_ll2_set_cbs(p_ll2_info, data->cbs);
1394 DP_NOTICE(p_hwfn, "Invalid callback functions\n");
1395 goto q_allocate_fail;
1398 rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info);
1400 goto q_allocate_fail;
1402 rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info);
1404 goto q_allocate_fail;
1406 rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info,
1409 goto q_allocate_fail;
1411 /* Register callbacks for the Rx/Tx queues */
1412 if (data->input.conn_type == QED_LL2_TYPE_OOO) {
1413 comp_rx_cb = qed_ll2_lb_rxq_completion;
1414 comp_tx_cb = qed_ll2_lb_txq_completion;
1416 comp_rx_cb = qed_ll2_rxq_completion;
1417 comp_tx_cb = qed_ll2_txq_completion;
1420 if (data->input.rx_num_desc) {
1421 qed_int_register_cb(p_hwfn, comp_rx_cb,
1422 &p_hwfn->p_ll2_info[i],
1423 &p_ll2_info->rx_queue.rx_sb_index,
1424 &p_ll2_info->rx_queue.p_fw_cons);
1425 p_ll2_info->rx_queue.b_cb_registered = true;
1428 if (data->input.tx_num_desc) {
1429 qed_int_register_cb(p_hwfn,
1431 &p_hwfn->p_ll2_info[i],
1432 &p_ll2_info->tx_queue.tx_sb_index,
1433 &p_ll2_info->tx_queue.p_fw_cons);
1434 p_ll2_info->tx_queue.b_cb_registered = true;
1437 *data->p_connection_handle = i;
1441 qed_ll2_release_connection(p_hwfn, i);
1445 static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
1446 struct qed_ll2_info *p_ll2_conn)
1448 enum qed_ll2_error_handle error_input;
1449 enum core_error_handle error_mode;
1450 u8 action_on_error = 0;
1452 if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
1455 DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
1456 error_input = p_ll2_conn->input.ai_err_packet_too_big;
1457 error_mode = qed_ll2_get_error_choice(error_input);
1458 SET_FIELD(action_on_error,
1459 CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG, error_mode);
1460 error_input = p_ll2_conn->input.ai_err_no_buf;
1461 error_mode = qed_ll2_get_error_choice(error_input);
1462 SET_FIELD(action_on_error, CORE_RX_ACTION_ON_ERROR_NO_BUFF, error_mode);
1464 return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
1468 qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
1469 struct qed_ll2_info *p_ll2_conn)
1471 if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO)
1474 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
1475 qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
1478 int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
1480 struct qed_hwfn *p_hwfn = cxt;
1481 struct qed_ll2_info *p_ll2_conn;
1482 struct qed_ll2_tx_packet *p_pkt;
1483 struct qed_ll2_rx_queue *p_rx;
1484 struct qed_ll2_tx_queue *p_tx;
1485 struct qed_ptt *p_ptt;
1491 p_ptt = qed_ptt_acquire(p_hwfn);
1495 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
1501 p_rx = &p_ll2_conn->rx_queue;
1502 p_tx = &p_ll2_conn->tx_queue;
1504 qed_chain_reset(&p_rx->rxq_chain);
1505 qed_chain_reset(&p_rx->rcq_chain);
1506 INIT_LIST_HEAD(&p_rx->active_descq);
1507 INIT_LIST_HEAD(&p_rx->free_descq);
1508 INIT_LIST_HEAD(&p_rx->posting_descq);
1509 spin_lock_init(&p_rx->lock);
1510 capacity = qed_chain_get_capacity(&p_rx->rxq_chain);
1511 for (i = 0; i < capacity; i++)
1512 list_add_tail(&p_rx->descq_array[i].list_entry,
1514 *p_rx->p_fw_cons = 0;
1516 qed_chain_reset(&p_tx->txq_chain);
1517 INIT_LIST_HEAD(&p_tx->active_descq);
1518 INIT_LIST_HEAD(&p_tx->free_descq);
1519 INIT_LIST_HEAD(&p_tx->sending_descq);
1520 spin_lock_init(&p_tx->lock);
1521 capacity = qed_chain_get_capacity(&p_tx->txq_chain);
1522 /* First element is part of the packet, rest are flexibly added */
1523 desc_size = (sizeof(*p_pkt) +
1524 (p_ll2_conn->input.tx_max_bds_per_packet - 1) *
1525 sizeof(p_pkt->bds_set));
1527 for (i = 0; i < capacity; i++) {
1528 p_pkt = p_tx->descq_mem + desc_size * i;
1529 list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
1531 p_tx->cur_completing_bd_idx = 0;
1533 p_tx->b_completing_packet = false;
1534 p_tx->cur_send_packet = NULL;
1535 p_tx->cur_send_frag_num = 0;
1536 p_tx->cur_completing_frag_num = 0;
1537 *p_tx->p_fw_cons = 0;
1539 rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
1543 qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
1544 p_ll2_conn->queue_id = qid;
1545 p_ll2_conn->tx_stats_id = qid;
1546 p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview +
1547 GTT_BAR0_MAP_REG_TSDM_RAM +
1548 TSTORM_LL2_RX_PRODS_OFFSET(qid);
1549 p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
1550 qed_db_addr(p_ll2_conn->cid,
1552 /* prepare db data */
1553 SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
1554 SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
1555 SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
1556 DQ_XCM_CORE_TX_BD_PROD_CMD);
1557 p_tx->db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
1560 rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
1564 rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
1568 if (!QED_IS_RDMA_PERSONALITY(p_hwfn))
1569 qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1);
1571 qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
1573 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
1574 if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
1575 qed_llh_add_protocol_filter(p_hwfn->cdev, 0,
1576 QED_LLH_FILTER_ETHERTYPE,
1578 qed_llh_add_protocol_filter(p_hwfn->cdev, 0,
1579 QED_LLH_FILTER_ETHERTYPE,
1584 qed_ptt_release(p_hwfn, p_ptt);
1588 static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
1589 struct qed_ll2_rx_queue *p_rx,
1590 struct qed_ll2_rx_packet *p_curp)
1592 struct qed_ll2_rx_packet *p_posting_packet = NULL;
1593 struct core_ll2_rx_prod rx_prod = { 0, 0, 0 };
1594 bool b_notify_fw = false;
1595 u16 bd_prod, cq_prod;
1597 /* This handles the flushing of already posted buffers */
1598 while (!list_empty(&p_rx->posting_descq)) {
1599 p_posting_packet = list_first_entry(&p_rx->posting_descq,
1600 struct qed_ll2_rx_packet,
1602 list_move_tail(&p_posting_packet->list_entry,
1603 &p_rx->active_descq);
1607 /* This handles the supplied packet [if there is one] */
1609 list_add_tail(&p_curp->list_entry, &p_rx->active_descq);
1616 bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
1617 cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
1618 rx_prod.bd_prod = cpu_to_le16(bd_prod);
1619 rx_prod.cqe_prod = cpu_to_le16(cq_prod);
1621 /* Make sure chain element is updated before ringing the doorbell */
1624 DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
1627 int qed_ll2_post_rx_buffer(void *cxt,
1628 u8 connection_handle,
1630 u16 buf_len, void *cookie, u8 notify_fw)
1632 struct qed_hwfn *p_hwfn = cxt;
1633 struct core_rx_bd_with_buff_len *p_curb = NULL;
1634 struct qed_ll2_rx_packet *p_curp = NULL;
1635 struct qed_ll2_info *p_ll2_conn;
1636 struct qed_ll2_rx_queue *p_rx;
1637 unsigned long flags;
1641 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1644 p_rx = &p_ll2_conn->rx_queue;
1646 spin_lock_irqsave(&p_rx->lock, flags);
1647 if (!list_empty(&p_rx->free_descq))
1648 p_curp = list_first_entry(&p_rx->free_descq,
1649 struct qed_ll2_rx_packet, list_entry);
1651 if (qed_chain_get_elem_left(&p_rx->rxq_chain) &&
1652 qed_chain_get_elem_left(&p_rx->rcq_chain)) {
1653 p_data = qed_chain_produce(&p_rx->rxq_chain);
1654 p_curb = (struct core_rx_bd_with_buff_len *)p_data;
1655 qed_chain_produce(&p_rx->rcq_chain);
1659 /* If we're lacking entires, let's try to flush buffers to FW */
1660 if (!p_curp || !p_curb) {
1666 /* We have an Rx packet we can fill */
1667 DMA_REGPAIR_LE(p_curb->addr, addr);
1668 p_curb->buff_length = cpu_to_le16(buf_len);
1669 p_curp->rx_buf_addr = addr;
1670 p_curp->cookie = cookie;
1671 p_curp->rxq_bd = p_curb;
1672 p_curp->buf_length = buf_len;
1673 list_del(&p_curp->list_entry);
1675 /* Check if we only want to enqueue this packet without informing FW */
1677 list_add_tail(&p_curp->list_entry, &p_rx->posting_descq);
1682 qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp);
1684 spin_unlock_irqrestore(&p_rx->lock, flags);
1688 static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
1689 struct qed_ll2_tx_queue *p_tx,
1690 struct qed_ll2_tx_packet *p_curp,
1691 struct qed_ll2_tx_pkt_info *pkt,
1694 list_del(&p_curp->list_entry);
1695 p_curp->cookie = pkt->cookie;
1696 p_curp->bd_used = pkt->num_of_bds;
1697 p_curp->notify_fw = notify_fw;
1698 p_tx->cur_send_packet = p_curp;
1699 p_tx->cur_send_frag_num = 0;
1701 p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = pkt->first_frag;
1702 p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = pkt->first_frag_len;
1703 p_tx->cur_send_frag_num++;
1707 qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
1708 struct qed_ll2_info *p_ll2,
1709 struct qed_ll2_tx_packet *p_curp,
1710 struct qed_ll2_tx_pkt_info *pkt)
1712 struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
1713 u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
1714 struct core_tx_bd *start_bd = NULL;
1715 enum core_roce_flavor_type roce_flavor;
1716 enum core_tx_dest tx_dest;
1717 u16 bd_data = 0, frag_idx;
1719 roce_flavor = (pkt->qed_roce_flavor == QED_LL2_ROCE) ? CORE_ROCE
1722 switch (pkt->tx_dest) {
1723 case QED_LL2_TX_DEST_NW:
1724 tx_dest = CORE_TX_DEST_NW;
1726 case QED_LL2_TX_DEST_LB:
1727 tx_dest = CORE_TX_DEST_LB;
1729 case QED_LL2_TX_DEST_DROP:
1730 tx_dest = CORE_TX_DEST_DROP;
1733 tx_dest = CORE_TX_DEST_LB;
1737 start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
1738 if (QED_IS_IWARP_PERSONALITY(p_hwfn) &&
1739 p_ll2->input.conn_type == QED_LL2_TYPE_OOO) {
1740 start_bd->nw_vlan_or_lb_echo =
1741 cpu_to_le16(IWARP_LL2_IN_ORDER_TX_QUEUE);
1743 start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan);
1744 if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) &&
1745 p_ll2->input.conn_type == QED_LL2_TYPE_FCOE)
1746 pkt->remove_stag = true;
1749 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
1750 cpu_to_le16(pkt->l4_hdr_offset_w));
1751 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
1752 bd_data |= pkt->bd_flags;
1753 SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
1754 SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds);
1755 SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
1756 SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_CSUM, !!(pkt->enable_ip_cksum));
1757 SET_FIELD(bd_data, CORE_TX_BD_DATA_L4_CSUM, !!(pkt->enable_l4_cksum));
1758 SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_LEN, !!(pkt->calc_ip_len));
1759 SET_FIELD(bd_data, CORE_TX_BD_DATA_DISABLE_STAG_INSERTION,
1760 !!(pkt->remove_stag));
1762 start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
1763 DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag);
1764 start_bd->nbytes = cpu_to_le16(pkt->first_frag_len);
1767 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1768 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
1771 p_ll2->input.conn_type,
1773 pkt->first_frag_len,
1775 le32_to_cpu(start_bd->addr.hi),
1776 le32_to_cpu(start_bd->addr.lo));
1778 if (p_ll2->tx_queue.cur_send_frag_num == pkt->num_of_bds)
1781 /* Need to provide the packet with additional BDs for frags */
1782 for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
1783 frag_idx < pkt->num_of_bds; frag_idx++) {
1784 struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
1786 *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
1787 (*p_bd)->bd_data.as_bitfield = 0;
1788 (*p_bd)->bitfield1 = 0;
1789 p_curp->bds_set[frag_idx].tx_frag = 0;
1790 p_curp->bds_set[frag_idx].frag_len = 0;
1794 /* This should be called while the Txq spinlock is being held */
1795 static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
1796 struct qed_ll2_info *p_ll2_conn)
1798 bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
1799 struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
1800 struct qed_ll2_tx_packet *p_pkt = NULL;
1803 /* If there are missing BDs, don't do anything now */
1804 if (p_ll2_conn->tx_queue.cur_send_frag_num !=
1805 p_ll2_conn->tx_queue.cur_send_packet->bd_used)
1808 /* Push the current packet to the list and clean after it */
1809 list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
1810 &p_ll2_conn->tx_queue.sending_descq);
1811 p_ll2_conn->tx_queue.cur_send_packet = NULL;
1812 p_ll2_conn->tx_queue.cur_send_frag_num = 0;
1814 /* Notify FW of packet only if requested to */
1818 bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain);
1820 while (!list_empty(&p_tx->sending_descq)) {
1821 p_pkt = list_first_entry(&p_tx->sending_descq,
1822 struct qed_ll2_tx_packet, list_entry);
1826 list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
1829 p_tx->db_msg.spq_prod = cpu_to_le16(bd_prod);
1831 /* Make sure the BDs data is updated before ringing the doorbell */
1834 DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&p_tx->db_msg));
1837 (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
1838 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
1839 p_ll2_conn->queue_id,
1841 p_ll2_conn->input.conn_type, p_tx->db_msg.spq_prod);
1844 int qed_ll2_prepare_tx_packet(void *cxt,
1845 u8 connection_handle,
1846 struct qed_ll2_tx_pkt_info *pkt,
1849 struct qed_hwfn *p_hwfn = cxt;
1850 struct qed_ll2_tx_packet *p_curp = NULL;
1851 struct qed_ll2_info *p_ll2_conn = NULL;
1852 struct qed_ll2_tx_queue *p_tx;
1853 struct qed_chain *p_tx_chain;
1854 unsigned long flags;
1857 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1860 p_tx = &p_ll2_conn->tx_queue;
1861 p_tx_chain = &p_tx->txq_chain;
1863 if (pkt->num_of_bds > p_ll2_conn->input.tx_max_bds_per_packet)
1866 spin_lock_irqsave(&p_tx->lock, flags);
1867 if (p_tx->cur_send_packet) {
1872 /* Get entry, but only if we have tx elements for it */
1873 if (!list_empty(&p_tx->free_descq))
1874 p_curp = list_first_entry(&p_tx->free_descq,
1875 struct qed_ll2_tx_packet, list_entry);
1876 if (p_curp && qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds)
1884 /* Prepare packet and BD, and perhaps send a doorbell to FW */
1885 qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp, pkt, notify_fw);
1887 qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp, pkt);
1889 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1892 spin_unlock_irqrestore(&p_tx->lock, flags);
1896 int qed_ll2_set_fragment_of_tx_packet(void *cxt,
1897 u8 connection_handle,
1898 dma_addr_t addr, u16 nbytes)
1900 struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
1901 struct qed_hwfn *p_hwfn = cxt;
1902 struct qed_ll2_info *p_ll2_conn = NULL;
1903 u16 cur_send_frag_num = 0;
1904 struct core_tx_bd *p_bd;
1905 unsigned long flags;
1907 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
1911 if (!p_ll2_conn->tx_queue.cur_send_packet)
1914 p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
1915 cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
1917 if (cur_send_frag_num >= p_cur_send_packet->bd_used)
1920 /* Fill the BD information, and possibly notify FW */
1921 p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd;
1922 DMA_REGPAIR_LE(p_bd->addr, addr);
1923 p_bd->nbytes = cpu_to_le16(nbytes);
1924 p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr;
1925 p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes;
1927 p_ll2_conn->tx_queue.cur_send_frag_num++;
1929 spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags);
1930 qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
1931 spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags);
1936 int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
1938 struct qed_hwfn *p_hwfn = cxt;
1939 struct qed_ll2_info *p_ll2_conn = NULL;
1941 struct qed_ptt *p_ptt;
1943 p_ptt = qed_ptt_acquire(p_hwfn);
1947 p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
1953 /* Stop Tx & Rx of connection, if needed */
1954 if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
1955 p_ll2_conn->tx_queue.b_cb_registered = false;
1956 smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
1957 rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
1961 qed_ll2_txq_flush(p_hwfn, connection_handle);
1962 qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
1965 if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
1966 p_ll2_conn->rx_queue.b_cb_registered = false;
1967 smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
1968 rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
1972 qed_ll2_rxq_flush(p_hwfn, connection_handle);
1973 qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
1976 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
1977 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
1979 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
1980 if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
1981 qed_llh_remove_protocol_filter(p_hwfn->cdev, 0,
1982 QED_LLH_FILTER_ETHERTYPE,
1984 qed_llh_remove_protocol_filter(p_hwfn->cdev, 0,
1985 QED_LLH_FILTER_ETHERTYPE,
1990 qed_ptt_release(p_hwfn, p_ptt);
1994 static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
1995 struct qed_ll2_info *p_ll2_conn)
1997 struct qed_ooo_buffer *p_buffer;
1999 if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO)
2002 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
2003 while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
2004 p_hwfn->p_ooo_info))) {
2005 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
2006 p_buffer->rx_buffer_size,
2007 p_buffer->rx_buffer_virt_addr,
2008 p_buffer->rx_buffer_phys_addr);
2013 void qed_ll2_release_connection(void *cxt, u8 connection_handle)
2015 struct qed_hwfn *p_hwfn = cxt;
2016 struct qed_ll2_info *p_ll2_conn = NULL;
2018 p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
2022 kfree(p_ll2_conn->tx_queue.descq_mem);
2023 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
2025 kfree(p_ll2_conn->rx_queue.descq_array);
2026 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain);
2027 qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain);
2029 qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
2031 qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn);
2033 mutex_lock(&p_ll2_conn->mutex);
2034 p_ll2_conn->b_active = false;
2035 mutex_unlock(&p_ll2_conn->mutex);
2038 int qed_ll2_alloc(struct qed_hwfn *p_hwfn)
2040 struct qed_ll2_info *p_ll2_connections;
2043 /* Allocate LL2's set struct */
2044 p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS,
2045 sizeof(struct qed_ll2_info), GFP_KERNEL);
2046 if (!p_ll2_connections) {
2047 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
2051 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
2052 p_ll2_connections[i].my_id = i;
2054 p_hwfn->p_ll2_info = p_ll2_connections;
2058 void qed_ll2_setup(struct qed_hwfn *p_hwfn)
2062 for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
2063 mutex_init(&p_hwfn->p_ll2_info[i].mutex);
2066 void qed_ll2_free(struct qed_hwfn *p_hwfn)
2068 if (!p_hwfn->p_ll2_info)
2071 kfree(p_hwfn->p_ll2_info);
2072 p_hwfn->p_ll2_info = NULL;
2075 static void _qed_ll2_get_port_stats(struct qed_hwfn *p_hwfn,
2076 struct qed_ptt *p_ptt,
2077 struct qed_ll2_stats *p_stats)
2079 struct core_ll2_port_stats port_stats;
2081 memset(&port_stats, 0, sizeof(port_stats));
2082 qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
2083 BAR0_MAP_REG_TSDM_RAM +
2084 TSTORM_LL2_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)),
2085 sizeof(port_stats));
2087 p_stats->gsi_invalid_hdr += HILO_64_REGPAIR(port_stats.gsi_invalid_hdr);
2088 p_stats->gsi_invalid_pkt_length +=
2089 HILO_64_REGPAIR(port_stats.gsi_invalid_pkt_length);
2090 p_stats->gsi_unsupported_pkt_typ +=
2091 HILO_64_REGPAIR(port_stats.gsi_unsupported_pkt_typ);
2092 p_stats->gsi_crcchksm_error +=
2093 HILO_64_REGPAIR(port_stats.gsi_crcchksm_error);
2096 static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
2097 struct qed_ptt *p_ptt,
2098 struct qed_ll2_info *p_ll2_conn,
2099 struct qed_ll2_stats *p_stats)
2101 struct core_ll2_tstorm_per_queue_stat tstats;
2102 u8 qid = p_ll2_conn->queue_id;
2105 memset(&tstats, 0, sizeof(tstats));
2106 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
2107 CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
2108 qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
2110 p_stats->packet_too_big_discard +=
2111 HILO_64_REGPAIR(tstats.packet_too_big_discard);
2112 p_stats->no_buff_discard += HILO_64_REGPAIR(tstats.no_buff_discard);
2115 static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
2116 struct qed_ptt *p_ptt,
2117 struct qed_ll2_info *p_ll2_conn,
2118 struct qed_ll2_stats *p_stats)
2120 struct core_ll2_ustorm_per_queue_stat ustats;
2121 u8 qid = p_ll2_conn->queue_id;
2124 memset(&ustats, 0, sizeof(ustats));
2125 ustats_addr = BAR0_MAP_REG_USDM_RAM +
2126 CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
2127 qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
2129 p_stats->rcv_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
2130 p_stats->rcv_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
2131 p_stats->rcv_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
2132 p_stats->rcv_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
2133 p_stats->rcv_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
2134 p_stats->rcv_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
2137 static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
2138 struct qed_ptt *p_ptt,
2139 struct qed_ll2_info *p_ll2_conn,
2140 struct qed_ll2_stats *p_stats)
2142 struct core_ll2_pstorm_per_queue_stat pstats;
2143 u8 stats_id = p_ll2_conn->tx_stats_id;
2146 memset(&pstats, 0, sizeof(pstats));
2147 pstats_addr = BAR0_MAP_REG_PSDM_RAM +
2148 CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
2149 qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
2151 p_stats->sent_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
2152 p_stats->sent_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
2153 p_stats->sent_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
2154 p_stats->sent_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
2155 p_stats->sent_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
2156 p_stats->sent_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
2159 static int __qed_ll2_get_stats(void *cxt, u8 connection_handle,
2160 struct qed_ll2_stats *p_stats)
2162 struct qed_hwfn *p_hwfn = cxt;
2163 struct qed_ll2_info *p_ll2_conn = NULL;
2164 struct qed_ptt *p_ptt;
2166 if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
2167 !p_hwfn->p_ll2_info)
2170 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
2172 p_ptt = qed_ptt_acquire(p_hwfn);
2174 DP_ERR(p_hwfn, "Failed to acquire ptt\n");
2178 if (p_ll2_conn->input.gsi_enable)
2179 _qed_ll2_get_port_stats(p_hwfn, p_ptt, p_stats);
2181 _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2183 _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2185 if (p_ll2_conn->tx_stats_en)
2186 _qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
2188 qed_ptt_release(p_hwfn, p_ptt);
2193 int qed_ll2_get_stats(void *cxt,
2194 u8 connection_handle, struct qed_ll2_stats *p_stats)
2196 memset(p_stats, 0, sizeof(*p_stats));
2197 return __qed_ll2_get_stats(cxt, connection_handle, p_stats);
2200 static void qed_ll2b_release_rx_packet(void *cxt,
2201 u8 connection_handle,
2203 dma_addr_t rx_buf_addr,
2206 struct qed_hwfn *p_hwfn = cxt;
2208 qed_ll2_dealloc_buffer(p_hwfn->cdev, cookie);
2211 static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
2212 const struct qed_ll2_cb_ops *ops,
2215 cdev->ll2->cbs = ops;
2216 cdev->ll2->cb_cookie = cookie;
2219 struct qed_ll2_cbs ll2_cbs = {
2220 .rx_comp_cb = &qed_ll2b_complete_rx_packet,
2221 .rx_release_cb = &qed_ll2b_release_rx_packet,
2222 .tx_comp_cb = &qed_ll2b_complete_tx_packet,
2223 .tx_release_cb = &qed_ll2b_complete_tx_packet,
2226 static void qed_ll2_set_conn_data(struct qed_hwfn *p_hwfn,
2227 struct qed_ll2_acquire_data *data,
2228 struct qed_ll2_params *params,
2229 enum qed_ll2_conn_type conn_type,
2230 u8 *handle, bool lb)
2232 memset(data, 0, sizeof(*data));
2234 data->input.conn_type = conn_type;
2235 data->input.mtu = params->mtu;
2236 data->input.rx_num_desc = QED_LL2_RX_SIZE;
2237 data->input.rx_drop_ttl0_flg = params->drop_ttl0_packets;
2238 data->input.rx_vlan_removal_en = params->rx_vlan_stripping;
2239 data->input.tx_num_desc = QED_LL2_TX_SIZE;
2240 data->p_connection_handle = handle;
2241 data->cbs = &ll2_cbs;
2242 ll2_cbs.cookie = p_hwfn;
2245 data->input.tx_tc = PKT_LB_TC;
2246 data->input.tx_dest = QED_LL2_TX_DEST_LB;
2248 data->input.tx_tc = 0;
2249 data->input.tx_dest = QED_LL2_TX_DEST_NW;
2253 static int qed_ll2_start_ooo(struct qed_hwfn *p_hwfn,
2254 struct qed_ll2_params *params)
2256 u8 *handle = &p_hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
2257 struct qed_ll2_acquire_data data;
2260 qed_ll2_set_conn_data(p_hwfn, &data, params,
2261 QED_LL2_TYPE_OOO, handle, true);
2263 rc = qed_ll2_acquire_connection(p_hwfn, &data);
2265 DP_INFO(p_hwfn, "Failed to acquire LL2 OOO connection\n");
2269 rc = qed_ll2_establish_connection(p_hwfn, *handle);
2271 DP_INFO(p_hwfn, "Failed to establish LL2 OOO connection\n");
2278 qed_ll2_release_connection(p_hwfn, *handle);
2280 *handle = QED_LL2_UNUSED_HANDLE;
2284 static bool qed_ll2_is_storage_eng1(struct qed_dev *cdev)
2286 return (QED_IS_FCOE_PERSONALITY(QED_LEADING_HWFN(cdev)) ||
2287 QED_IS_ISCSI_PERSONALITY(QED_LEADING_HWFN(cdev))) &&
2288 (QED_AFFIN_HWFN(cdev) != QED_LEADING_HWFN(cdev));
2291 static int __qed_ll2_stop(struct qed_hwfn *p_hwfn)
2293 struct qed_dev *cdev = p_hwfn->cdev;
2296 rc = qed_ll2_terminate_connection(p_hwfn, cdev->ll2->handle);
2298 DP_INFO(cdev, "Failed to terminate LL2 connection\n");
2300 qed_ll2_release_connection(p_hwfn, cdev->ll2->handle);
2305 static int qed_ll2_stop(struct qed_dev *cdev)
2307 bool b_is_storage_eng1 = qed_ll2_is_storage_eng1(cdev);
2308 struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
2309 int rc = 0, rc2 = 0;
2311 if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
2314 qed_llh_remove_mac_filter(cdev, 0, cdev->ll2_mac_address);
2315 eth_zero_addr(cdev->ll2_mac_address);
2317 if (QED_IS_ISCSI_PERSONALITY(p_hwfn))
2318 qed_ll2_stop_ooo(p_hwfn);
2320 /* In CMT mode, LL2 is always started on engine 0 for a storage PF */
2321 if (b_is_storage_eng1) {
2322 rc2 = __qed_ll2_stop(QED_LEADING_HWFN(cdev));
2324 DP_NOTICE(QED_LEADING_HWFN(cdev),
2325 "Failed to stop LL2 on engine 0\n");
2328 rc = __qed_ll2_stop(p_hwfn);
2330 DP_NOTICE(p_hwfn, "Failed to stop LL2\n");
2332 qed_ll2_kill_buffers(cdev);
2334 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2339 static int __qed_ll2_start(struct qed_hwfn *p_hwfn,
2340 struct qed_ll2_params *params)
2342 struct qed_ll2_buffer *buffer, *tmp_buffer;
2343 struct qed_dev *cdev = p_hwfn->cdev;
2344 enum qed_ll2_conn_type conn_type;
2345 struct qed_ll2_acquire_data data;
2348 switch (p_hwfn->hw_info.personality) {
2350 conn_type = QED_LL2_TYPE_FCOE;
2353 conn_type = QED_LL2_TYPE_ISCSI;
2355 case QED_PCI_ETH_ROCE:
2356 conn_type = QED_LL2_TYPE_ROCE;
2360 conn_type = QED_LL2_TYPE_TEST;
2363 qed_ll2_set_conn_data(p_hwfn, &data, params, conn_type,
2364 &cdev->ll2->handle, false);
2366 rc = qed_ll2_acquire_connection(p_hwfn, &data);
2368 DP_INFO(p_hwfn, "Failed to acquire LL2 connection\n");
2372 rc = qed_ll2_establish_connection(p_hwfn, cdev->ll2->handle);
2374 DP_INFO(p_hwfn, "Failed to establish LL2 connection\n");
2378 /* Post all Rx buffers to FW */
2379 spin_lock_bh(&cdev->ll2->lock);
2380 rx_cnt = cdev->ll2->rx_cnt;
2381 list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) {
2382 rc = qed_ll2_post_rx_buffer(p_hwfn,
2384 buffer->phys_addr, 0, buffer, 1);
2387 "Failed to post an Rx buffer; Deleting it\n");
2388 dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
2389 cdev->ll2->rx_size, DMA_FROM_DEVICE);
2390 kfree(buffer->data);
2391 list_del(&buffer->list);
2397 spin_unlock_bh(&cdev->ll2->lock);
2399 if (rx_cnt == cdev->ll2->rx_cnt) {
2400 DP_NOTICE(p_hwfn, "Failed passing even a single Rx buffer\n");
2401 goto terminate_conn;
2403 cdev->ll2->rx_cnt = rx_cnt;
2408 qed_ll2_terminate_connection(p_hwfn, cdev->ll2->handle);
2410 qed_ll2_release_connection(p_hwfn, cdev->ll2->handle);
2414 static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
2416 bool b_is_storage_eng1 = qed_ll2_is_storage_eng1(cdev);
2417 struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
2418 struct qed_ll2_buffer *buffer;
2419 int rx_num_desc, i, rc;
2421 if (!is_valid_ether_addr(params->ll2_mac_address)) {
2422 DP_NOTICE(cdev, "Invalid Ethernet address\n");
2426 WARN_ON(!cdev->ll2->cbs);
2428 /* Initialize LL2 locks & lists */
2429 INIT_LIST_HEAD(&cdev->ll2->list);
2430 spin_lock_init(&cdev->ll2->lock);
2432 cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
2433 L1_CACHE_BYTES + params->mtu;
2435 /* Allocate memory for LL2.
2436 * In CMT mode, in case of a storage PF which is affintized to engine 1,
2437 * LL2 is started also on engine 0 and thus we need twofold buffers.
2439 rx_num_desc = QED_LL2_RX_SIZE * (b_is_storage_eng1 ? 2 : 1);
2440 DP_INFO(cdev, "Allocating %d LL2 buffers of size %08x bytes\n",
2441 rx_num_desc, cdev->ll2->rx_size);
2442 for (i = 0; i < rx_num_desc; i++) {
2443 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
2445 DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
2450 rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
2451 &buffer->phys_addr);
2457 list_add_tail(&buffer->list, &cdev->ll2->list);
2460 rc = __qed_ll2_start(p_hwfn, params);
2462 DP_NOTICE(cdev, "Failed to start LL2\n");
2466 /* In CMT mode, always need to start LL2 on engine 0 for a storage PF,
2467 * since broadcast/mutlicast packets are routed to engine 0.
2469 if (b_is_storage_eng1) {
2470 rc = __qed_ll2_start(QED_LEADING_HWFN(cdev), params);
2472 DP_NOTICE(QED_LEADING_HWFN(cdev),
2473 "Failed to start LL2 on engine 0\n");
2478 if (QED_IS_ISCSI_PERSONALITY(p_hwfn)) {
2479 DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
2480 rc = qed_ll2_start_ooo(p_hwfn, params);
2482 DP_NOTICE(cdev, "Failed to start OOO LL2\n");
2487 rc = qed_llh_add_mac_filter(cdev, 0, params->ll2_mac_address);
2489 DP_NOTICE(cdev, "Failed to add an LLH filter\n");
2493 ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
2498 if (QED_IS_ISCSI_PERSONALITY(p_hwfn))
2499 qed_ll2_stop_ooo(p_hwfn);
2501 if (b_is_storage_eng1)
2502 __qed_ll2_stop(QED_LEADING_HWFN(cdev));
2504 __qed_ll2_stop(p_hwfn);
2506 qed_ll2_kill_buffers(cdev);
2507 cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
2511 static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
2512 unsigned long xmit_flags)
2514 struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
2515 struct qed_ll2_tx_pkt_info pkt;
2516 const skb_frag_t *frag;
2517 u8 flags = 0, nr_frags;
2518 int rc = -EINVAL, i;
2522 if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
2523 DP_INFO(cdev, "Cannot transmit a checksummed packet\n");
2527 /* Cache number of fragments from SKB since SKB may be freed by
2528 * the completion routine after calling qed_ll2_prepare_tx_packet()
2530 nr_frags = skb_shinfo(skb)->nr_frags;
2532 if (1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
2533 DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
2538 mapping = dma_map_single(&cdev->pdev->dev, skb->data,
2539 skb->len, DMA_TO_DEVICE);
2540 if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
2541 DP_NOTICE(cdev, "SKB mapping failed\n");
2545 /* Request HW to calculate IP csum */
2546 if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
2547 ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2548 flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
2550 if (skb_vlan_tag_present(skb)) {
2551 vlan = skb_vlan_tag_get(skb);
2552 flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT);
2555 memset(&pkt, 0, sizeof(pkt));
2556 pkt.num_of_bds = 1 + nr_frags;
2558 pkt.bd_flags = flags;
2559 pkt.tx_dest = QED_LL2_TX_DEST_NW;
2560 pkt.first_frag = mapping;
2561 pkt.first_frag_len = skb->len;
2563 if (test_bit(QED_MF_UFP_SPECIFIC, &cdev->mf_bits) &&
2564 test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags))
2565 pkt.remove_stag = true;
2567 /* qed_ll2_prepare_tx_packet() may actually send the packet if
2568 * there are no fragments in the skb and subsequently the completion
2569 * routine may run and free the SKB, so no dereferencing the SKB
2570 * beyond this point unless skb has any fragments.
2572 rc = qed_ll2_prepare_tx_packet(p_hwfn, cdev->ll2->handle,
2577 for (i = 0; i < nr_frags; i++) {
2578 frag = &skb_shinfo(skb)->frags[i];
2580 mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
2581 skb_frag_size(frag), DMA_TO_DEVICE);
2583 if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
2585 "Unable to map frag - dropping packet\n");
2590 rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn,
2593 skb_frag_size(frag));
2595 /* if failed not much to do here, partial packet has been posted
2596 * we can't free memory, will need to wait for completion
2605 dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
2610 static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
2612 bool b_is_storage_eng1 = qed_ll2_is_storage_eng1(cdev);
2613 struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
2619 rc = qed_ll2_get_stats(p_hwfn, cdev->ll2->handle, stats);
2621 DP_NOTICE(p_hwfn, "Failed to get LL2 stats\n");
2625 /* In CMT mode, LL2 is always started on engine 0 for a storage PF */
2626 if (b_is_storage_eng1) {
2627 rc = __qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
2628 cdev->ll2->handle, stats);
2630 DP_NOTICE(QED_LEADING_HWFN(cdev),
2631 "Failed to get LL2 stats on engine 0\n");
2639 const struct qed_ll2_ops qed_ll2_ops_pass = {
2640 .start = &qed_ll2_start,
2641 .stop = &qed_ll2_stop,
2642 .start_xmit = &qed_ll2_start_xmit,
2643 .register_cb_ops = &qed_ll2_register_cb_ops,
2644 .get_stats = &qed_ll2_stats,
2647 int qed_ll2_alloc_if(struct qed_dev *cdev)
2649 cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL);
2650 return cdev->ll2 ? 0 : -ENOMEM;
2653 void qed_ll2_dealloc_if(struct qed_dev *cdev)