1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Ethernet driver
4 * Copyright (C) 2020 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/interrupt.h>
12 #include <linux/pci.h>
16 #include "otx2_common.h"
17 #include "otx2_struct.h"
19 static void otx2_nix_rq_op_stats(struct queue_stats *stats,
20 struct otx2_nic *pfvf, int qidx)
22 u64 incr = (u64)qidx << 32;
25 ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_OCTS);
26 stats->bytes = otx2_atomic64_add(incr, ptr);
28 ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_RQ_OP_PKTS);
29 stats->pkts = otx2_atomic64_add(incr, ptr);
32 static void otx2_nix_sq_op_stats(struct queue_stats *stats,
33 struct otx2_nic *pfvf, int qidx)
35 u64 incr = (u64)qidx << 32;
38 ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_OCTS);
39 stats->bytes = otx2_atomic64_add(incr, ptr);
41 ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_PKTS);
42 stats->pkts = otx2_atomic64_add(incr, ptr);
45 void otx2_update_lmac_stats(struct otx2_nic *pfvf)
49 if (!netif_running(pfvf->netdev))
52 mutex_lock(&pfvf->mbox.lock);
53 req = otx2_mbox_alloc_msg_cgx_stats(&pfvf->mbox);
55 mutex_unlock(&pfvf->mbox.lock);
59 otx2_sync_mbox_msg(&pfvf->mbox);
60 mutex_unlock(&pfvf->mbox.lock);
63 int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx)
65 struct otx2_rcv_queue *rq = &pfvf->qset.rq[qidx];
70 otx2_nix_rq_op_stats(&rq->stats, pfvf, qidx);
74 int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx)
76 struct otx2_snd_queue *sq = &pfvf->qset.sq[qidx];
81 otx2_nix_sq_op_stats(&sq->stats, pfvf, qidx);
85 void otx2_get_dev_stats(struct otx2_nic *pfvf)
87 struct otx2_dev_stats *dev_stats = &pfvf->hw.dev_stats;
89 #define OTX2_GET_RX_STATS(reg) \
90 otx2_read64(pfvf, NIX_LF_RX_STATX(reg))
91 #define OTX2_GET_TX_STATS(reg) \
92 otx2_read64(pfvf, NIX_LF_TX_STATX(reg))
94 dev_stats->rx_bytes = OTX2_GET_RX_STATS(RX_OCTS);
95 dev_stats->rx_drops = OTX2_GET_RX_STATS(RX_DROP);
96 dev_stats->rx_bcast_frames = OTX2_GET_RX_STATS(RX_BCAST);
97 dev_stats->rx_mcast_frames = OTX2_GET_RX_STATS(RX_MCAST);
98 dev_stats->rx_ucast_frames = OTX2_GET_RX_STATS(RX_UCAST);
99 dev_stats->rx_frames = dev_stats->rx_bcast_frames +
100 dev_stats->rx_mcast_frames +
101 dev_stats->rx_ucast_frames;
103 dev_stats->tx_bytes = OTX2_GET_TX_STATS(TX_OCTS);
104 dev_stats->tx_drops = OTX2_GET_TX_STATS(TX_DROP);
105 dev_stats->tx_bcast_frames = OTX2_GET_TX_STATS(TX_BCAST);
106 dev_stats->tx_mcast_frames = OTX2_GET_TX_STATS(TX_MCAST);
107 dev_stats->tx_ucast_frames = OTX2_GET_TX_STATS(TX_UCAST);
108 dev_stats->tx_frames = dev_stats->tx_bcast_frames +
109 dev_stats->tx_mcast_frames +
110 dev_stats->tx_ucast_frames;
113 void otx2_get_stats64(struct net_device *netdev,
114 struct rtnl_link_stats64 *stats)
116 struct otx2_nic *pfvf = netdev_priv(netdev);
117 struct otx2_dev_stats *dev_stats;
119 otx2_get_dev_stats(pfvf);
121 dev_stats = &pfvf->hw.dev_stats;
122 stats->rx_bytes = dev_stats->rx_bytes;
123 stats->rx_packets = dev_stats->rx_frames;
124 stats->rx_dropped = dev_stats->rx_drops;
125 stats->multicast = dev_stats->rx_mcast_frames;
127 stats->tx_bytes = dev_stats->tx_bytes;
128 stats->tx_packets = dev_stats->tx_frames;
129 stats->tx_dropped = dev_stats->tx_drops;
131 EXPORT_SYMBOL(otx2_get_stats64);
133 /* Sync MAC address with RVU AF */
134 static int otx2_hw_set_mac_addr(struct otx2_nic *pfvf, u8 *mac)
136 struct nix_set_mac_addr *req;
139 mutex_lock(&pfvf->mbox.lock);
140 req = otx2_mbox_alloc_msg_nix_set_mac_addr(&pfvf->mbox);
142 mutex_unlock(&pfvf->mbox.lock);
146 ether_addr_copy(req->mac_addr, mac);
148 err = otx2_sync_mbox_msg(&pfvf->mbox);
149 mutex_unlock(&pfvf->mbox.lock);
153 static int otx2_hw_get_mac_addr(struct otx2_nic *pfvf,
154 struct net_device *netdev)
156 struct nix_get_mac_addr_rsp *rsp;
157 struct mbox_msghdr *msghdr;
161 mutex_lock(&pfvf->mbox.lock);
162 req = otx2_mbox_alloc_msg_nix_get_mac_addr(&pfvf->mbox);
164 mutex_unlock(&pfvf->mbox.lock);
168 err = otx2_sync_mbox_msg(&pfvf->mbox);
170 mutex_unlock(&pfvf->mbox.lock);
174 msghdr = otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
175 if (IS_ERR(msghdr)) {
176 mutex_unlock(&pfvf->mbox.lock);
177 return PTR_ERR(msghdr);
179 rsp = (struct nix_get_mac_addr_rsp *)msghdr;
180 ether_addr_copy(netdev->dev_addr, rsp->mac_addr);
181 mutex_unlock(&pfvf->mbox.lock);
186 int otx2_set_mac_address(struct net_device *netdev, void *p)
188 struct otx2_nic *pfvf = netdev_priv(netdev);
189 struct sockaddr *addr = p;
191 if (!is_valid_ether_addr(addr->sa_data))
192 return -EADDRNOTAVAIL;
194 if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data))
195 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
201 EXPORT_SYMBOL(otx2_set_mac_address);
203 int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
205 struct nix_frs_cfg *req;
208 mutex_lock(&pfvf->mbox.lock);
209 req = otx2_mbox_alloc_msg_nix_set_hw_frs(&pfvf->mbox);
211 mutex_unlock(&pfvf->mbox.lock);
215 pfvf->max_frs = mtu + OTX2_ETH_HLEN;
216 req->maxlen = pfvf->max_frs;
218 err = otx2_sync_mbox_msg(&pfvf->mbox);
219 mutex_unlock(&pfvf->mbox.lock);
223 int otx2_config_pause_frm(struct otx2_nic *pfvf)
225 struct cgx_pause_frm_cfg *req;
228 if (is_otx2_lbkvf(pfvf->pdev))
231 mutex_lock(&pfvf->mbox.lock);
232 req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(&pfvf->mbox);
238 req->rx_pause = !!(pfvf->flags & OTX2_FLAG_RX_PAUSE_ENABLED);
239 req->tx_pause = !!(pfvf->flags & OTX2_FLAG_TX_PAUSE_ENABLED);
242 err = otx2_sync_mbox_msg(&pfvf->mbox);
244 mutex_unlock(&pfvf->mbox.lock);
248 int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
250 struct otx2_rss_info *rss = &pfvf->hw.rss_info;
251 struct nix_rss_flowkey_cfg *req;
254 mutex_lock(&pfvf->mbox.lock);
255 req = otx2_mbox_alloc_msg_nix_rss_flowkey_cfg(&pfvf->mbox);
257 mutex_unlock(&pfvf->mbox.lock);
260 req->mcam_index = -1; /* Default or reserved index */
261 req->flowkey_cfg = rss->flowkey_cfg;
262 req->group = DEFAULT_RSS_CONTEXT_GROUP;
264 err = otx2_sync_mbox_msg(&pfvf->mbox);
265 mutex_unlock(&pfvf->mbox.lock);
269 int otx2_set_rss_table(struct otx2_nic *pfvf)
271 struct otx2_rss_info *rss = &pfvf->hw.rss_info;
272 struct mbox *mbox = &pfvf->mbox;
273 struct nix_aq_enq_req *aq;
276 mutex_lock(&mbox->lock);
277 /* Get memory to put this msg */
278 for (idx = 0; idx < rss->rss_size; idx++) {
279 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
281 /* The shared memory buffer can be full.
284 err = otx2_sync_mbox_msg(mbox);
286 mutex_unlock(&mbox->lock);
289 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
291 mutex_unlock(&mbox->lock);
296 aq->rss.rq = rss->ind_tbl[idx];
300 aq->ctype = NIX_AQ_CTYPE_RSS;
301 aq->op = NIX_AQ_INSTOP_INIT;
303 err = otx2_sync_mbox_msg(mbox);
304 mutex_unlock(&mbox->lock);
308 void otx2_set_rss_key(struct otx2_nic *pfvf)
310 struct otx2_rss_info *rss = &pfvf->hw.rss_info;
311 u64 *key = (u64 *)&rss->key[4];
314 /* 352bit or 44byte key needs to be configured as below
315 * NIX_LF_RX_SECRETX0 = key<351:288>
316 * NIX_LF_RX_SECRETX1 = key<287:224>
317 * NIX_LF_RX_SECRETX2 = key<223:160>
318 * NIX_LF_RX_SECRETX3 = key<159:96>
319 * NIX_LF_RX_SECRETX4 = key<95:32>
320 * NIX_LF_RX_SECRETX5<63:32> = key<31:0>
322 otx2_write64(pfvf, NIX_LF_RX_SECRETX(5),
323 (u64)(*((u32 *)&rss->key)) << 32);
324 idx = sizeof(rss->key) / sizeof(u64);
327 otx2_write64(pfvf, NIX_LF_RX_SECRETX(idx), *key++);
331 int otx2_rss_init(struct otx2_nic *pfvf)
333 struct otx2_rss_info *rss = &pfvf->hw.rss_info;
336 rss->rss_size = sizeof(rss->ind_tbl);
338 /* Init RSS key if it is not setup already */
340 netdev_rss_key_fill(rss->key, sizeof(rss->key));
341 otx2_set_rss_key(pfvf);
343 if (!netif_is_rxfh_configured(pfvf->netdev)) {
344 /* Default indirection table */
345 for (idx = 0; idx < rss->rss_size; idx++)
347 ethtool_rxfh_indir_default(idx,
350 ret = otx2_set_rss_table(pfvf);
354 /* Flowkey or hash config to be used for generating flow tag */
355 rss->flowkey_cfg = rss->enable ? rss->flowkey_cfg :
356 NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6 |
357 NIX_FLOW_KEY_TYPE_TCP | NIX_FLOW_KEY_TYPE_UDP |
358 NIX_FLOW_KEY_TYPE_SCTP;
360 ret = otx2_set_flowkey_cfg(pfvf);
368 void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx)
370 /* Configure CQE interrupt coalescing parameters
372 * HW triggers an irq when ECOUNT > cq_ecount_wait, hence
373 * set 1 less than cq_ecount_wait. And cq_time_wait is in
374 * usecs, convert that to 100ns count.
376 otx2_write64(pfvf, NIX_LF_CINTX_WAIT(qidx),
377 ((u64)(pfvf->hw.cq_time_wait * 10) << 48) |
378 ((u64)pfvf->hw.cq_qcount_wait << 32) |
379 (pfvf->hw.cq_ecount_wait - 1));
382 dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
387 /* Check if request can be accommodated in previous allocated page */
388 if (pool->page && ((pool->page_offset + pool->rbsize) <=
389 (PAGE_SIZE << pool->rbpage_order))) {
396 /* Allocate a new page */
397 pool->page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
399 if (unlikely(!pool->page))
402 pool->page_offset = 0;
404 iova = (u64)otx2_dma_map_page(pfvf, pool->page, pool->page_offset,
405 pool->rbsize, DMA_FROM_DEVICE);
407 if (!pool->page_offset)
408 __free_pages(pool->page, pool->rbpage_order);
412 pool->page_offset += pool->rbsize;
416 void otx2_tx_timeout(struct net_device *netdev, unsigned int txq)
418 struct otx2_nic *pfvf = netdev_priv(netdev);
420 schedule_work(&pfvf->reset_task);
422 EXPORT_SYMBOL(otx2_tx_timeout);
424 void otx2_get_mac_from_af(struct net_device *netdev)
426 struct otx2_nic *pfvf = netdev_priv(netdev);
429 err = otx2_hw_get_mac_addr(pfvf, netdev);
431 dev_warn(pfvf->dev, "Failed to read mac from hardware\n");
433 /* If AF doesn't provide a valid MAC, generate a random one */
434 if (!is_valid_ether_addr(netdev->dev_addr))
435 eth_hw_addr_random(netdev);
437 EXPORT_SYMBOL(otx2_get_mac_from_af);
439 static int otx2_get_link(struct otx2_nic *pfvf)
445 if (pfvf->hw.tx_chan_base >= CGX_CHAN_BASE) {
446 map = pfvf->hw.tx_chan_base & 0x7FF;
447 link = 4 * ((map >> 8) & 0xF) + ((map >> 4) & 0xF);
450 if (pfvf->hw.tx_chan_base < SDP_CHAN_BASE)
456 int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
458 struct otx2_hw *hw = &pfvf->hw;
459 struct nix_txschq_config *req;
462 req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
469 schq = hw->txschq_list[lvl][0];
470 /* Set topology e.t.c configuration */
471 if (lvl == NIX_TXSCH_LVL_SMQ) {
472 req->reg[0] = NIX_AF_SMQX_CFG(schq);
473 req->regval[0] = ((OTX2_MAX_MTU + OTX2_ETH_HLEN) << 8) |
476 req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) |
480 parent = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
481 req->reg[1] = NIX_AF_MDQX_PARENT(schq);
482 req->regval[1] = parent << 16;
484 /* Set DWRR quantum */
485 req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq);
486 req->regval[2] = DFLT_RR_QTM;
487 } else if (lvl == NIX_TXSCH_LVL_TL4) {
488 parent = hw->txschq_list[NIX_TXSCH_LVL_TL3][0];
489 req->reg[0] = NIX_AF_TL4X_PARENT(schq);
490 req->regval[0] = parent << 16;
492 req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq);
493 req->regval[1] = DFLT_RR_QTM;
494 } else if (lvl == NIX_TXSCH_LVL_TL3) {
495 parent = hw->txschq_list[NIX_TXSCH_LVL_TL2][0];
496 req->reg[0] = NIX_AF_TL3X_PARENT(schq);
497 req->regval[0] = parent << 16;
499 req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq);
500 req->regval[1] = DFLT_RR_QTM;
501 } else if (lvl == NIX_TXSCH_LVL_TL2) {
502 parent = hw->txschq_list[NIX_TXSCH_LVL_TL1][0];
503 req->reg[0] = NIX_AF_TL2X_PARENT(schq);
504 req->regval[0] = parent << 16;
507 req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq);
508 req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | DFLT_RR_QTM;
511 req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
512 otx2_get_link(pfvf));
513 /* Enable this queue and backpressure */
514 req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
516 } else if (lvl == NIX_TXSCH_LVL_TL1) {
517 /* Default config for TL1.
518 * For VF this is always ignored.
521 /* Set DWRR quantum */
522 req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq);
523 req->regval[0] = TXSCH_TL1_DFLT_RR_QTM;
526 req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq);
527 req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1);
530 req->reg[2] = NIX_AF_TL1X_CIR(schq);
534 return otx2_sync_mbox_msg(&pfvf->mbox);
537 int otx2_txsch_alloc(struct otx2_nic *pfvf)
539 struct nix_txsch_alloc_req *req;
542 /* Get memory to put this msg */
543 req = otx2_mbox_alloc_msg_nix_txsch_alloc(&pfvf->mbox);
547 /* Request one schq per level */
548 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
551 return otx2_sync_mbox_msg(&pfvf->mbox);
554 int otx2_txschq_stop(struct otx2_nic *pfvf)
556 struct nix_txsch_free_req *free_req;
559 mutex_lock(&pfvf->mbox.lock);
560 /* Free the transmit schedulers */
561 free_req = otx2_mbox_alloc_msg_nix_txsch_free(&pfvf->mbox);
563 mutex_unlock(&pfvf->mbox.lock);
567 free_req->flags = TXSCHQ_FREE_ALL;
568 err = otx2_sync_mbox_msg(&pfvf->mbox);
569 mutex_unlock(&pfvf->mbox.lock);
571 /* Clear the txschq list */
572 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
573 for (schq = 0; schq < MAX_TXSCHQ_PER_FUNC; schq++)
574 pfvf->hw.txschq_list[lvl][schq] = 0;
579 void otx2_sqb_flush(struct otx2_nic *pfvf)
581 int qidx, sqe_tail, sqe_head;
585 ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
586 for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
587 incr = (u64)qidx << 32;
589 val = otx2_atomic64_add(incr, ptr);
590 sqe_head = (val >> 20) & 0x3F;
591 sqe_tail = (val >> 28) & 0x3F;
592 if (sqe_head == sqe_tail)
600 /* RED and drop levels of CQ on packet reception.
601 * For CQ level is measure of emptiness ( 0x0 = full, 255 = empty).
603 #define RQ_PASS_LVL_CQ(skid, qsize) ((((skid) + 16) * 256) / (qsize))
604 #define RQ_DROP_LVL_CQ(skid, qsize) (((skid) * 256) / (qsize))
606 /* RED and drop levels of AURA for packet reception.
607 * For AURA level is measure of fullness (0x0 = empty, 255 = full).
608 * Eg: For RQ length 1K, for pass/drop level 204/230.
609 * RED accepts pkts if free pointers > 102 & <= 205.
610 * Drops pkts if free pointers < 102.
612 #define RQ_BP_LVL_AURA (255 - ((85 * 256) / 100)) /* BP when 85% is full */
613 #define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */
614 #define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */
616 /* Send skid of 2000 packets required for CQ size of 4K CQEs. */
617 #define SEND_CQ_SKID 2000
619 static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
621 struct otx2_qset *qset = &pfvf->qset;
622 struct nix_aq_enq_req *aq;
624 /* Get memory to put this msg */
625 aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
631 aq->rq.pb_caching = 1;
632 aq->rq.lpb_aura = lpb_aura; /* Use large packet buffer aura */
633 aq->rq.lpb_sizem1 = (DMA_BUFFER_LEN(pfvf->rbsize) / 8) - 1;
634 aq->rq.xqe_imm_size = 0; /* Copying of packet to CQE not needed */
635 aq->rq.flow_tagw = 32; /* Copy full 32bit flow_tag to CQE header */
637 aq->rq.lpb_drop_ena = 1; /* Enable RED dropping for AURA */
638 aq->rq.xqe_drop_ena = 1; /* Enable RED dropping for CQ/SSO */
639 aq->rq.xqe_pass = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
640 aq->rq.xqe_drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
641 aq->rq.lpb_aura_pass = RQ_PASS_LVL_AURA;
642 aq->rq.lpb_aura_drop = RQ_DROP_LVL_AURA;
646 aq->ctype = NIX_AQ_CTYPE_RQ;
647 aq->op = NIX_AQ_INSTOP_INIT;
649 return otx2_sync_mbox_msg(&pfvf->mbox);
652 static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
654 struct otx2_qset *qset = &pfvf->qset;
655 struct otx2_snd_queue *sq;
656 struct nix_aq_enq_req *aq;
657 struct otx2_pool *pool;
660 pool = &pfvf->qset.pool[sqb_aura];
661 sq = &qset->sq[qidx];
662 sq->sqe_size = NIX_SQESZ_W16 ? 64 : 128;
663 sq->sqe_cnt = qset->sqe_cnt;
665 err = qmem_alloc(pfvf->dev, &sq->sqe, 1, sq->sqe_size);
669 err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt,
674 sq->sqe_base = sq->sqe->base;
675 sq->sg = kcalloc(qset->sqe_cnt, sizeof(struct sg_list), GFP_KERNEL);
680 sq->sqe_per_sqb = (pfvf->hw.sqb_size / sq->sqe_size) - 1;
681 sq->num_sqbs = (qset->sqe_cnt + sq->sqe_per_sqb) / sq->sqe_per_sqb;
682 /* Set SQE threshold to 10% of total SQEs */
683 sq->sqe_thresh = ((sq->num_sqbs * sq->sqe_per_sqb) * 10) / 100;
684 sq->aura_id = sqb_aura;
685 sq->aura_fc_addr = pool->fc_addr->base;
686 sq->lmt_addr = (__force u64 *)(pfvf->reg_base + LMT_LF_LMTLINEX(qidx));
687 sq->io_addr = (__force u64)otx2_get_regaddr(pfvf, NIX_LF_OP_SENDX(0));
692 /* Get memory to put this msg */
693 aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
697 aq->sq.cq = pfvf->hw.rx_queues + qidx;
698 aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
701 /* Only one SMQ is allocated, map all SQ's to that SMQ */
702 aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
703 aq->sq.smq_rr_quantum = DFLT_RR_QTM;
704 aq->sq.default_chan = pfvf->hw.tx_chan_base;
705 aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
706 aq->sq.sqb_aura = sqb_aura;
707 aq->sq.sq_int_ena = NIX_SQINT_BITS;
709 /* Due pipelining impact minimum 2000 unused SQ CQE's
710 * need to maintain to avoid CQ overflow.
712 aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (sq->sqe_cnt));
716 aq->ctype = NIX_AQ_CTYPE_SQ;
717 aq->op = NIX_AQ_INSTOP_INIT;
719 return otx2_sync_mbox_msg(&pfvf->mbox);
722 static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
724 struct otx2_qset *qset = &pfvf->qset;
725 struct nix_aq_enq_req *aq;
726 struct otx2_cq_queue *cq;
729 cq = &qset->cq[qidx];
731 if (qidx < pfvf->hw.rx_queues) {
734 cq->cqe_cnt = qset->rqe_cnt;
737 cq->cint_idx = qidx - pfvf->hw.rx_queues;
738 cq->cqe_cnt = qset->sqe_cnt;
740 cq->cqe_size = pfvf->qset.xqe_size;
742 /* Allocate memory for CQEs */
743 err = qmem_alloc(pfvf->dev, &cq->cqe, cq->cqe_cnt, cq->cqe_size);
747 /* Save CQE CPU base for faster reference */
748 cq->cqe_base = cq->cqe->base;
749 /* In case where all RQs auras point to single pool,
750 * all CQs receive buffer pool also point to same pool.
752 pool_id = ((cq->cq_type == CQ_RX) &&
753 (pfvf->hw.rqpool_cnt != pfvf->hw.rx_queues)) ? 0 : qidx;
754 cq->rbpool = &qset->pool[pool_id];
755 cq->refill_task_sched = false;
757 /* Get memory to put this msg */
758 aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
763 aq->cq.qsize = Q_SIZE(cq->cqe_cnt, 4);
765 aq->cq.base = cq->cqe->iova;
766 aq->cq.cint_idx = cq->cint_idx;
767 aq->cq.cq_err_int_ena = NIX_CQERRINT_BITS;
769 aq->cq.avg_level = 255;
771 if (qidx < pfvf->hw.rx_queues) {
772 aq->cq.drop = RQ_DROP_LVL_CQ(pfvf->hw.rq_skid, cq->cqe_cnt);
775 /* Enable receive CQ backpressure */
777 aq->cq.bpid = pfvf->bpid[0];
779 /* Set backpressure level is same as cq pass level */
780 aq->cq.bp = RQ_PASS_LVL_CQ(pfvf->hw.rq_skid, qset->rqe_cnt);
785 aq->ctype = NIX_AQ_CTYPE_CQ;
786 aq->op = NIX_AQ_INSTOP_INIT;
788 return otx2_sync_mbox_msg(&pfvf->mbox);
791 static void otx2_pool_refill_task(struct work_struct *work)
793 struct otx2_cq_queue *cq;
794 struct otx2_pool *rbpool;
795 struct refill_work *wrk;
796 int qidx, free_ptrs = 0;
797 struct otx2_nic *pfvf;
800 wrk = container_of(work, struct refill_work, pool_refill_work.work);
802 qidx = wrk - pfvf->refill_wrk;
803 cq = &pfvf->qset.cq[qidx];
805 free_ptrs = cq->pool_ptrs;
807 while (cq->pool_ptrs) {
808 bufptr = otx2_alloc_rbuf(pfvf, rbpool, GFP_KERNEL);
810 /* Schedule a WQ if we fails to free atleast half of the
811 * pointers else enable napi for this RQ.
813 if (!((free_ptrs - cq->pool_ptrs) > free_ptrs / 2)) {
814 struct delayed_work *dwork;
816 dwork = &wrk->pool_refill_work;
817 schedule_delayed_work(dwork,
818 msecs_to_jiffies(100));
820 cq->refill_task_sched = false;
824 otx2_aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM);
827 cq->refill_task_sched = false;
830 int otx2_config_nix_queues(struct otx2_nic *pfvf)
834 /* Initialize RX queues */
835 for (qidx = 0; qidx < pfvf->hw.rx_queues; qidx++) {
836 u16 lpb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx);
838 err = otx2_rq_init(pfvf, qidx, lpb_aura);
843 /* Initialize TX queues */
844 for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
845 u16 sqb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
847 err = otx2_sq_init(pfvf, qidx, sqb_aura);
852 /* Initialize completion queues */
853 for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) {
854 err = otx2_cq_init(pfvf, qidx);
859 /* Initialize work queue for receive buffer refill */
860 pfvf->refill_wrk = devm_kcalloc(pfvf->dev, pfvf->qset.cq_cnt,
861 sizeof(struct refill_work), GFP_KERNEL);
862 if (!pfvf->refill_wrk)
865 for (qidx = 0; qidx < pfvf->qset.cq_cnt; qidx++) {
866 pfvf->refill_wrk[qidx].pf = pfvf;
867 INIT_DELAYED_WORK(&pfvf->refill_wrk[qidx].pool_refill_work,
868 otx2_pool_refill_task);
873 int otx2_config_nix(struct otx2_nic *pfvf)
875 struct nix_lf_alloc_req *nixlf;
876 struct nix_lf_alloc_rsp *rsp;
879 pfvf->qset.xqe_size = NIX_XQESZ_W16 ? 128 : 512;
881 /* Get memory to put this msg */
882 nixlf = otx2_mbox_alloc_msg_nix_lf_alloc(&pfvf->mbox);
886 /* Set RQ/SQ/CQ counts */
887 nixlf->rq_cnt = pfvf->hw.rx_queues;
888 nixlf->sq_cnt = pfvf->hw.tx_queues;
889 nixlf->cq_cnt = pfvf->qset.cq_cnt;
890 nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE;
891 nixlf->rss_grps = 1; /* Single RSS indir table supported, for now */
892 nixlf->xqe_sz = NIX_XQESZ_W16;
893 /* We don't know absolute NPA LF idx attached.
894 * AF will replace 'RVU_DEFAULT_PF_FUNC' with
895 * NPA LF attached to this RVU PF/VF.
897 nixlf->npa_func = RVU_DEFAULT_PF_FUNC;
898 /* Disable alignment pad, enable L2 length check,
899 * enable L4 TCP/UDP checksum verification.
901 nixlf->rx_cfg = BIT_ULL(33) | BIT_ULL(35) | BIT_ULL(37);
903 err = otx2_sync_mbox_msg(&pfvf->mbox);
907 rsp = (struct nix_lf_alloc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0,
918 void otx2_sq_free_sqbs(struct otx2_nic *pfvf)
920 struct otx2_qset *qset = &pfvf->qset;
921 struct otx2_hw *hw = &pfvf->hw;
922 struct otx2_snd_queue *sq;
926 for (qidx = 0; qidx < hw->tx_queues; qidx++) {
927 sq = &qset->sq[qidx];
930 for (sqb = 0; sqb < sq->sqb_count; sqb++) {
931 if (!sq->sqb_ptrs[sqb])
933 iova = sq->sqb_ptrs[sqb];
934 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
935 dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size,
937 DMA_ATTR_SKIP_CPU_SYNC);
938 put_page(virt_to_page(phys_to_virt(pa)));
944 void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type)
946 int pool_id, pool_start = 0, pool_end = 0, size = 0;
949 if (type == AURA_NIX_SQ) {
950 pool_start = otx2_get_pool_idx(pfvf, type, 0);
951 pool_end = pool_start + pfvf->hw.sqpool_cnt;
952 size = pfvf->hw.sqb_size;
954 if (type == AURA_NIX_RQ) {
955 pool_start = otx2_get_pool_idx(pfvf, type, 0);
956 pool_end = pfvf->hw.rqpool_cnt;
960 /* Free SQB and RQB pointers from the aura pool */
961 for (pool_id = pool_start; pool_id < pool_end; pool_id++) {
962 iova = otx2_aura_allocptr(pfvf, pool_id);
964 if (type == AURA_NIX_RQ)
965 iova -= OTX2_HEAD_ROOM;
967 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
968 dma_unmap_page_attrs(pfvf->dev, iova, size,
970 DMA_ATTR_SKIP_CPU_SYNC);
971 put_page(virt_to_page(phys_to_virt(pa)));
972 iova = otx2_aura_allocptr(pfvf, pool_id);
977 void otx2_aura_pool_free(struct otx2_nic *pfvf)
979 struct otx2_pool *pool;
982 if (!pfvf->qset.pool)
985 for (pool_id = 0; pool_id < pfvf->hw.pool_cnt; pool_id++) {
986 pool = &pfvf->qset.pool[pool_id];
987 qmem_free(pfvf->dev, pool->stack);
988 qmem_free(pfvf->dev, pool->fc_addr);
990 devm_kfree(pfvf->dev, pfvf->qset.pool);
991 pfvf->qset.pool = NULL;
994 static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id,
995 int pool_id, int numptrs)
997 struct npa_aq_enq_req *aq;
998 struct otx2_pool *pool;
1001 pool = &pfvf->qset.pool[pool_id];
1003 /* Allocate memory for HW to update Aura count.
1004 * Alloc one cache line, so that it fits all FC_STYPE modes.
1006 if (!pool->fc_addr) {
1007 err = qmem_alloc(pfvf->dev, &pool->fc_addr, 1, OTX2_ALIGN);
1012 /* Initialize this aura's context via AF */
1013 aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
1015 /* Shared mbox memory buffer is full, flush it and retry */
1016 err = otx2_sync_mbox_msg(&pfvf->mbox);
1019 aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
1024 aq->aura_id = aura_id;
1025 /* Will be filled by AF with correct pool context address */
1026 aq->aura.pool_addr = pool_id;
1027 aq->aura.pool_caching = 1;
1028 aq->aura.shift = ilog2(numptrs) - 8;
1029 aq->aura.count = numptrs;
1030 aq->aura.limit = numptrs;
1031 aq->aura.avg_level = 255;
1033 aq->aura.fc_ena = 1;
1034 aq->aura.fc_addr = pool->fc_addr->iova;
1035 aq->aura.fc_hyst_bits = 0; /* Store count on all updates */
1037 /* Enable backpressure for RQ aura */
1038 if (aura_id < pfvf->hw.rqpool_cnt) {
1039 aq->aura.bp_ena = 0;
1040 aq->aura.nix0_bpid = pfvf->bpid[0];
1041 /* Set backpressure level for RQ's Aura */
1042 aq->aura.bp = RQ_BP_LVL_AURA;
1046 aq->ctype = NPA_AQ_CTYPE_AURA;
1047 aq->op = NPA_AQ_INSTOP_INIT;
1052 static int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
1053 int stack_pages, int numptrs, int buf_size)
1055 struct npa_aq_enq_req *aq;
1056 struct otx2_pool *pool;
1059 pool = &pfvf->qset.pool[pool_id];
1060 /* Alloc memory for stack which is used to store buffer pointers */
1061 err = qmem_alloc(pfvf->dev, &pool->stack,
1062 stack_pages, pfvf->hw.stack_pg_bytes);
1066 pool->rbsize = buf_size;
1067 pool->rbpage_order = get_order(buf_size);
1069 /* Initialize this pool's context via AF */
1070 aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
1072 /* Shared mbox memory buffer is full, flush it and retry */
1073 err = otx2_sync_mbox_msg(&pfvf->mbox);
1075 qmem_free(pfvf->dev, pool->stack);
1078 aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
1080 qmem_free(pfvf->dev, pool->stack);
1085 aq->aura_id = pool_id;
1086 aq->pool.stack_base = pool->stack->iova;
1087 aq->pool.stack_caching = 1;
1089 aq->pool.buf_size = buf_size / 128;
1090 aq->pool.stack_max_pages = stack_pages;
1091 aq->pool.shift = ilog2(numptrs) - 8;
1092 aq->pool.ptr_start = 0;
1093 aq->pool.ptr_end = ~0ULL;
1096 aq->ctype = NPA_AQ_CTYPE_POOL;
1097 aq->op = NPA_AQ_INSTOP_INIT;
1102 int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
1104 int qidx, pool_id, stack_pages, num_sqbs;
1105 struct otx2_qset *qset = &pfvf->qset;
1106 struct otx2_hw *hw = &pfvf->hw;
1107 struct otx2_snd_queue *sq;
1108 struct otx2_pool *pool;
1112 /* Calculate number of SQBs needed.
1114 * For a 128byte SQE, and 4K size SQB, 31 SQEs will fit in one SQB.
1115 * Last SQE is used for pointing to next SQB.
1117 num_sqbs = (hw->sqb_size / 128) - 1;
1118 num_sqbs = (qset->sqe_cnt + num_sqbs) / num_sqbs;
1120 /* Get no of stack pages needed */
1122 (num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
1124 for (qidx = 0; qidx < hw->tx_queues; qidx++) {
1125 pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
1126 /* Initialize aura context */
1127 err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs);
1131 /* Initialize pool context */
1132 err = otx2_pool_init(pfvf, pool_id, stack_pages,
1133 num_sqbs, hw->sqb_size);
1138 /* Flush accumulated messages */
1139 err = otx2_sync_mbox_msg(&pfvf->mbox);
1143 /* Allocate pointers and free them to aura/pool */
1144 for (qidx = 0; qidx < hw->tx_queues; qidx++) {
1145 pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
1146 pool = &pfvf->qset.pool[pool_id];
1148 sq = &qset->sq[qidx];
1150 sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(u64 *), GFP_KERNEL);
1154 for (ptr = 0; ptr < num_sqbs; ptr++) {
1155 bufptr = otx2_alloc_rbuf(pfvf, pool, GFP_KERNEL);
1158 otx2_aura_freeptr(pfvf, pool_id, bufptr);
1159 sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
1161 otx2_get_page(pool);
1166 otx2_mbox_reset(&pfvf->mbox.mbox, 0);
1167 otx2_aura_pool_free(pfvf);
1171 int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
1173 struct otx2_hw *hw = &pfvf->hw;
1174 int stack_pages, pool_id, rq;
1175 struct otx2_pool *pool;
1176 int err, ptr, num_ptrs;
1179 num_ptrs = pfvf->qset.rqe_cnt;
1182 (num_ptrs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
1184 for (rq = 0; rq < hw->rx_queues; rq++) {
1185 pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, rq);
1186 /* Initialize aura context */
1187 err = otx2_aura_init(pfvf, pool_id, pool_id, num_ptrs);
1191 for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
1192 err = otx2_pool_init(pfvf, pool_id, stack_pages,
1193 num_ptrs, pfvf->rbsize);
1198 /* Flush accumulated messages */
1199 err = otx2_sync_mbox_msg(&pfvf->mbox);
1203 /* Allocate pointers and free them to aura/pool */
1204 for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
1205 pool = &pfvf->qset.pool[pool_id];
1206 for (ptr = 0; ptr < num_ptrs; ptr++) {
1207 bufptr = otx2_alloc_rbuf(pfvf, pool, GFP_KERNEL);
1210 otx2_aura_freeptr(pfvf, pool_id,
1211 bufptr + OTX2_HEAD_ROOM);
1213 otx2_get_page(pool);
1218 otx2_mbox_reset(&pfvf->mbox.mbox, 0);
1219 otx2_aura_pool_free(pfvf);
1223 int otx2_config_npa(struct otx2_nic *pfvf)
1225 struct otx2_qset *qset = &pfvf->qset;
1226 struct npa_lf_alloc_req *npalf;
1227 struct otx2_hw *hw = &pfvf->hw;
1230 /* Pool - Stack of free buffer pointers
1231 * Aura - Alloc/frees pointers from/to pool for NIX DMA.
1237 qset->pool = devm_kzalloc(pfvf->dev, sizeof(struct otx2_pool) *
1238 hw->pool_cnt, GFP_KERNEL);
1242 /* Get memory to put this msg */
1243 npalf = otx2_mbox_alloc_msg_npa_lf_alloc(&pfvf->mbox);
1247 /* Set aura and pool counts */
1248 npalf->nr_pools = hw->pool_cnt;
1249 aura_cnt = ilog2(roundup_pow_of_two(hw->pool_cnt));
1250 npalf->aura_sz = (aura_cnt >= ilog2(128)) ? (aura_cnt - 6) : 1;
1252 return otx2_sync_mbox_msg(&pfvf->mbox);
1255 int otx2_detach_resources(struct mbox *mbox)
1257 struct rsrc_detach *detach;
1259 mutex_lock(&mbox->lock);
1260 detach = otx2_mbox_alloc_msg_detach_resources(mbox);
1262 mutex_unlock(&mbox->lock);
1267 detach->partial = false;
1269 /* Send detach request to AF */
1270 otx2_mbox_msg_send(&mbox->mbox, 0);
1271 mutex_unlock(&mbox->lock);
1274 EXPORT_SYMBOL(otx2_detach_resources);
1276 int otx2_attach_npa_nix(struct otx2_nic *pfvf)
1278 struct rsrc_attach *attach;
1279 struct msg_req *msix;
1282 mutex_lock(&pfvf->mbox.lock);
1283 /* Get memory to put this msg */
1284 attach = otx2_mbox_alloc_msg_attach_resources(&pfvf->mbox);
1286 mutex_unlock(&pfvf->mbox.lock);
1290 attach->npalf = true;
1291 attach->nixlf = true;
1293 /* Send attach request to AF */
1294 err = otx2_sync_mbox_msg(&pfvf->mbox);
1296 mutex_unlock(&pfvf->mbox.lock);
1300 pfvf->nix_blkaddr = BLKADDR_NIX0;
1302 /* If the platform has two NIX blocks then LF may be
1303 * allocated from NIX1.
1305 if (otx2_read64(pfvf, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_NIX1)) & 0x1FFULL)
1306 pfvf->nix_blkaddr = BLKADDR_NIX1;
1308 /* Get NPA and NIX MSIX vector offsets */
1309 msix = otx2_mbox_alloc_msg_msix_offset(&pfvf->mbox);
1311 mutex_unlock(&pfvf->mbox.lock);
1315 err = otx2_sync_mbox_msg(&pfvf->mbox);
1317 mutex_unlock(&pfvf->mbox.lock);
1320 mutex_unlock(&pfvf->mbox.lock);
1322 if (pfvf->hw.npa_msixoff == MSIX_VECTOR_INVALID ||
1323 pfvf->hw.nix_msixoff == MSIX_VECTOR_INVALID) {
1325 "RVUPF: Invalid MSIX vector offset for NPA/NIX\n");
1331 EXPORT_SYMBOL(otx2_attach_npa_nix);
1333 void otx2_ctx_disable(struct mbox *mbox, int type, bool npa)
1335 struct hwctx_disable_req *req;
1337 mutex_lock(&mbox->lock);
1338 /* Request AQ to disable this context */
1340 req = otx2_mbox_alloc_msg_npa_hwctx_disable(mbox);
1342 req = otx2_mbox_alloc_msg_nix_hwctx_disable(mbox);
1345 mutex_unlock(&mbox->lock);
1351 if (otx2_sync_mbox_msg(mbox))
1352 dev_err(mbox->pfvf->dev, "%s failed to disable context\n",
1355 mutex_unlock(&mbox->lock);
1358 int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable)
1360 struct nix_bp_cfg_req *req;
1363 req = otx2_mbox_alloc_msg_nix_bp_enable(&pfvf->mbox);
1365 req = otx2_mbox_alloc_msg_nix_bp_disable(&pfvf->mbox);
1372 req->bpid_per_chan = 0;
1374 return otx2_sync_mbox_msg(&pfvf->mbox);
1377 /* Mbox message handlers */
1378 void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
1379 struct cgx_stats_rsp *rsp)
1383 for (id = 0; id < CGX_RX_STATS_COUNT; id++)
1384 pfvf->hw.cgx_rx_stats[id] = rsp->rx_stats[id];
1385 for (id = 0; id < CGX_TX_STATS_COUNT; id++)
1386 pfvf->hw.cgx_tx_stats[id] = rsp->tx_stats[id];
1389 void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
1390 struct nix_txsch_alloc_rsp *rsp)
1394 /* Setup transmit scheduler list */
1395 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++)
1396 for (schq = 0; schq < rsp->schq[lvl]; schq++)
1397 pf->hw.txschq_list[lvl][schq] =
1398 rsp->schq_list[lvl][schq];
1400 EXPORT_SYMBOL(mbox_handler_nix_txsch_alloc);
1402 void mbox_handler_npa_lf_alloc(struct otx2_nic *pfvf,
1403 struct npa_lf_alloc_rsp *rsp)
1405 pfvf->hw.stack_pg_ptrs = rsp->stack_pg_ptrs;
1406 pfvf->hw.stack_pg_bytes = rsp->stack_pg_bytes;
1408 EXPORT_SYMBOL(mbox_handler_npa_lf_alloc);
1410 void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf,
1411 struct nix_lf_alloc_rsp *rsp)
1413 pfvf->hw.sqb_size = rsp->sqb_size;
1414 pfvf->hw.rx_chan_base = rsp->rx_chan_base;
1415 pfvf->hw.tx_chan_base = rsp->tx_chan_base;
1416 pfvf->hw.lso_tsov4_idx = rsp->lso_tsov4_idx;
1417 pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx;
1419 EXPORT_SYMBOL(mbox_handler_nix_lf_alloc);
1421 void mbox_handler_msix_offset(struct otx2_nic *pfvf,
1422 struct msix_offset_rsp *rsp)
1424 pfvf->hw.npa_msixoff = rsp->npa_msixoff;
1425 pfvf->hw.nix_msixoff = rsp->nix_msixoff;
1427 EXPORT_SYMBOL(mbox_handler_msix_offset);
1429 void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf,
1430 struct nix_bp_cfg_rsp *rsp)
1434 for (chan = 0; chan < rsp->chan_cnt; chan++) {
1435 chan_id = ((rsp->chan_bpid[chan] >> 10) & 0x7F);
1436 pfvf->bpid[chan_id] = rsp->chan_bpid[chan] & 0x3FF;
1439 EXPORT_SYMBOL(mbox_handler_nix_bp_enable);
1441 void otx2_free_cints(struct otx2_nic *pfvf, int n)
1443 struct otx2_qset *qset = &pfvf->qset;
1444 struct otx2_hw *hw = &pfvf->hw;
1447 for (qidx = 0, irq = hw->nix_msixoff + NIX_LF_CINT_VEC_START;
1450 int vector = pci_irq_vector(pfvf->pdev, irq);
1452 irq_set_affinity_hint(vector, NULL);
1453 free_cpumask_var(hw->affinity_mask[irq]);
1454 free_irq(vector, &qset->napi[qidx]);
1458 void otx2_set_cints_affinity(struct otx2_nic *pfvf)
1460 struct otx2_hw *hw = &pfvf->hw;
1461 int vec, cpu, irq, cint;
1463 vec = hw->nix_msixoff + NIX_LF_CINT_VEC_START;
1464 cpu = cpumask_first(cpu_online_mask);
1467 for (cint = 0; cint < pfvf->hw.cint_cnt; cint++, vec++) {
1468 if (!alloc_cpumask_var(&hw->affinity_mask[vec], GFP_KERNEL))
1471 cpumask_set_cpu(cpu, hw->affinity_mask[vec]);
1473 irq = pci_irq_vector(pfvf->pdev, vec);
1474 irq_set_affinity_hint(irq, hw->affinity_mask[vec]);
1476 cpu = cpumask_next(cpu, cpu_online_mask);
1477 if (unlikely(cpu >= nr_cpu_ids))
1482 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
1484 otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
1485 struct _req_type *req, \
1486 struct _rsp_type *rsp) \
1488 /* Nothing to do here */ \
1491 EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name);
1492 MBOX_UP_CGX_MESSAGES