1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 - 2019 Intel Corporation
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
22 * The full GNU General Public License is included in this distribution in the
23 * file called COPYING.
25 * Contact Information:
26 * Intel Linux Wireless <linuxwifi@intel.com>
27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34 * Copyright(c) 2018 - 2019 Intel Corporation
35 * All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
47 * * Neither the name Intel Corporation nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *****************************************************************************/
64 #include <linux/sched.h>
65 #include <linux/wait.h>
66 #include <linux/gfp.h>
71 #include "iwl-op-mode.h"
72 #include "iwl-context-info-gen3.h"
74 /******************************************************************************
78 ******************************************************************************/
81 * Rx theory of operation
83 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
84 * each of which point to Receive Buffers to be filled by the NIC. These get
85 * used not only for Rx frames, but for any command response or notification
86 * from the NIC. The driver and NIC manage the Rx buffers by means
87 * of indexes into the circular buffer.
90 * The host/firmware share two index registers for managing the Rx buffers.
92 * The READ index maps to the first position that the firmware may be writing
93 * to -- the driver can read up to (but not including) this position and get
95 * The READ index is managed by the firmware once the card is enabled.
97 * The WRITE index maps to the last position the driver has read from -- the
98 * position preceding WRITE is the last slot the firmware can place a packet.
100 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
103 * During initialization, the host sets up the READ queue position to the first
104 * INDEX position, and WRITE to the last (READ - 1 wrapped)
106 * When the firmware places a packet in a buffer, it will advance the READ index
107 * and fire the RX interrupt. The driver can then query the READ index and
108 * process as many packets as possible, moving the WRITE index forward as it
109 * resets the Rx queue buffers with new memory.
111 * The management in the driver is as follows:
112 * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
113 * When the interrupt handler is called, the request is processed.
114 * The page is either stolen - transferred to the upper layer
115 * or reused - added immediately to the iwl->rxq->rx_free list.
116 * + When the page is stolen - the driver updates the matching queue's used
117 * count, detaches the RBD and transfers it to the queue used list.
118 * When there are two used RBDs - they are transferred to the allocator empty
119 * list. Work is then scheduled for the allocator to start allocating
121 * When there are another 6 used RBDs - they are transferred to the allocator
122 * empty list and the driver tries to claim the pre-allocated buffers and
123 * add them to iwl->rxq->rx_free. If it fails - it continues to claim them
125 * When there are 8+ buffers in the free list - either from allocation or from
126 * 8 reused unstolen pages - restock is called to update the FW and indexes.
127 * + In order to make sure the allocator always has RBDs to use for allocation
128 * the allocator has initial pool in the size of num_queues*(8-2) - the
129 * maximum missing RBDs per allocation request (request posted with 2
130 * empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
131 * The queues supplies the recycle of the rest of the RBDs.
132 * + A received packet is processed and handed to the kernel network stack,
133 * detached from the iwl->rxq. The driver 'processed' index is updated.
134 * + If there are no allocated buffers in iwl->rxq->rx_free,
135 * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
136 * If there were enough free buffers and RX_STALLED is set it is cleared.
141 * iwl_rxq_alloc() Allocates rx_free
142 * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
143 * iwl_pcie_rxq_restock.
144 * Used only during initialization.
145 * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
146 * queue, updates firmware pointers, and updates
148 * iwl_pcie_rx_allocator() Background work for allocating pages.
150 * -- enable interrupts --
151 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
152 * READ INDEX, detaching the SKB from the pool.
153 * Moves the packet buffer from queue to rx_used.
154 * Posts and claims requests to the allocator.
155 * Calls iwl_pcie_rxq_restock to refill any empty
161 * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
163 * Regular Receive interrupt:
165 * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
166 * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
168 * rxq.queue -> rxq.rx_free -> rxq.queue
174 * iwl_rxq_space - Return number of free slots available in queue.
176 static int iwl_rxq_space(const struct iwl_rxq *rxq)
178 /* Make sure rx queue size is a power of 2 */
179 WARN_ON(rxq->queue_size & (rxq->queue_size - 1));
182 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
183 * between empty and completely full queues.
184 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
185 * defined for negative dividends.
187 return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1);
191 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
193 static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
195 return cpu_to_le32((u32)(dma_addr >> 8));
199 * iwl_pcie_rx_stop - stops the Rx DMA
201 int iwl_pcie_rx_stop(struct iwl_trans *trans)
203 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
204 /* TODO: remove this for 22560 once fw does it */
205 iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0);
206 return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_GEN3,
207 RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
208 } else if (trans->cfg->mq_rx_supported) {
209 iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
210 return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
211 RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
213 iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
214 return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
215 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
221 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
223 static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
228 lockdep_assert_held(&rxq->lock);
231 * explicitly wake up the NIC if:
232 * 1. shadow registers aren't enabled
233 * 2. there is a chance that the NIC is asleep
235 if (!trans->cfg->base_params->shadow_reg_enable &&
236 test_bit(STATUS_TPOWER_PMI, &trans->status)) {
237 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
239 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
240 IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
242 iwl_set_bit(trans, CSR_GP_CNTRL,
243 BIT(trans->cfg->csr->flag_mac_access_req));
244 rxq->need_update = true;
249 rxq->write_actual = round_down(rxq->write, 8);
250 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560)
251 iwl_write32(trans, HBUS_TARG_WRPTR,
253 ((FIRST_RX_QUEUE + rxq->id) << 16)));
254 else if (trans->cfg->mq_rx_supported)
255 iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
258 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
261 static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
263 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
266 for (i = 0; i < trans->num_rx_queues; i++) {
267 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
269 if (!rxq->need_update)
271 spin_lock(&rxq->lock);
272 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
273 rxq->need_update = false;
274 spin_unlock(&rxq->lock);
278 static void iwl_pcie_restock_bd(struct iwl_trans *trans,
280 struct iwl_rx_mem_buffer *rxb)
282 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
283 struct iwl_rx_transfer_desc *bd = rxq->bd;
285 BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64));
287 bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
288 bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
290 __le64 *bd = rxq->bd;
292 bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
295 IWL_DEBUG_RX(trans, "Assigned virtual RB ID %u to queue %d index %d\n",
296 (u32)rxb->vid, rxq->id, rxq->write);
300 * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
302 static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
305 struct iwl_rx_mem_buffer *rxb;
308 * If the device isn't enabled - no need to try to add buffers...
309 * This can happen when we stop the device and still have an interrupt
310 * pending. We stop the APM before we sync the interrupts because we
311 * have to (see comment there). On the other hand, since the APM is
312 * stopped, we cannot access the HW (in particular not prph).
313 * So don't try to restock if the APM has been already stopped.
315 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
318 spin_lock(&rxq->lock);
319 while (rxq->free_count) {
320 /* Get next free Rx buffer, remove from free list */
321 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
323 list_del(&rxb->list);
324 rxb->invalid = false;
325 /* 12 first bits are expected to be empty */
326 WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
327 /* Point to Rx buffer via next RBD in circular buffer */
328 iwl_pcie_restock_bd(trans, rxq, rxb);
329 rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK;
332 spin_unlock(&rxq->lock);
335 * If we've added more space for the firmware to place data, tell it.
336 * Increment device's write pointer in multiples of 8.
338 if (rxq->write_actual != (rxq->write & ~0x7)) {
339 spin_lock(&rxq->lock);
340 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
341 spin_unlock(&rxq->lock);
346 * iwl_pcie_rxsq_restock - restock implementation for single queue rx
348 static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
351 struct iwl_rx_mem_buffer *rxb;
354 * If the device isn't enabled - not need to try to add buffers...
355 * This can happen when we stop the device and still have an interrupt
356 * pending. We stop the APM before we sync the interrupts because we
357 * have to (see comment there). On the other hand, since the APM is
358 * stopped, we cannot access the HW (in particular not prph).
359 * So don't try to restock if the APM has been already stopped.
361 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
364 spin_lock(&rxq->lock);
365 while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
366 __le32 *bd = (__le32 *)rxq->bd;
367 /* The overwritten rxb must be a used one */
368 rxb = rxq->queue[rxq->write];
369 BUG_ON(rxb && rxb->page);
371 /* Get next free Rx buffer, remove from free list */
372 rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
374 list_del(&rxb->list);
375 rxb->invalid = false;
377 /* Point to Rx buffer via next RBD in circular buffer */
378 bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
379 rxq->queue[rxq->write] = rxb;
380 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
383 spin_unlock(&rxq->lock);
385 /* If we've added more space for the firmware to place data, tell it.
386 * Increment device's write pointer in multiples of 8. */
387 if (rxq->write_actual != (rxq->write & ~0x7)) {
388 spin_lock(&rxq->lock);
389 iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
390 spin_unlock(&rxq->lock);
395 * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
397 * If there are slots in the RX queue that need to be restocked,
398 * and we have free pre-allocated buffers, fill the ranks as much
399 * as we can, pulling from rx_free.
401 * This moves the 'write' index forward to catch up with 'processed', and
402 * also updates the memory address in the firmware to reference the new
406 void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
408 if (trans->cfg->mq_rx_supported)
409 iwl_pcie_rxmq_restock(trans, rxq);
411 iwl_pcie_rxsq_restock(trans, rxq);
415 * iwl_pcie_rx_alloc_page - allocates and returns a page.
418 static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
421 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
423 gfp_t gfp_mask = priority;
425 if (trans_pcie->rx_page_order > 0)
426 gfp_mask |= __GFP_COMP;
428 /* Alloc a new receive buffer */
429 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
432 IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
433 trans_pcie->rx_page_order);
435 * Issue an error if we don't have enough pre-allocated
438 if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())
440 "Failed to alloc_pages\n");
447 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
449 * A used RBD is an Rx buffer that has been given to the stack. To use it again
450 * a page must be allocated and the RBD must point to the page. This function
451 * doesn't change the HW pointer but handles the list of pages that is used by
452 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
455 void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
458 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
459 struct iwl_rx_mem_buffer *rxb;
463 spin_lock(&rxq->lock);
464 if (list_empty(&rxq->rx_used)) {
465 spin_unlock(&rxq->lock);
468 spin_unlock(&rxq->lock);
470 /* Alloc a new receive buffer */
471 page = iwl_pcie_rx_alloc_page(trans, priority);
475 spin_lock(&rxq->lock);
477 if (list_empty(&rxq->rx_used)) {
478 spin_unlock(&rxq->lock);
479 __free_pages(page, trans_pcie->rx_page_order);
482 rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
484 list_del(&rxb->list);
485 spin_unlock(&rxq->lock);
489 /* Get physical address of the RB */
491 dma_map_page(trans->dev, page, 0,
492 PAGE_SIZE << trans_pcie->rx_page_order,
494 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
496 spin_lock(&rxq->lock);
497 list_add(&rxb->list, &rxq->rx_used);
498 spin_unlock(&rxq->lock);
499 __free_pages(page, trans_pcie->rx_page_order);
503 spin_lock(&rxq->lock);
505 list_add_tail(&rxb->list, &rxq->rx_free);
508 spin_unlock(&rxq->lock);
512 void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
514 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
517 for (i = 0; i < RX_POOL_SIZE; i++) {
518 if (!trans_pcie->rx_pool[i].page)
520 dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
521 PAGE_SIZE << trans_pcie->rx_page_order,
523 __free_pages(trans_pcie->rx_pool[i].page,
524 trans_pcie->rx_page_order);
525 trans_pcie->rx_pool[i].page = NULL;
530 * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
532 * Allocates for each received request 8 pages
533 * Called as a scheduled work item.
535 static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
537 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
538 struct iwl_rb_allocator *rba = &trans_pcie->rba;
539 struct list_head local_empty;
540 int pending = atomic_read(&rba->req_pending);
542 IWL_DEBUG_TPT(trans, "Pending allocation requests = %d\n", pending);
544 /* If we were scheduled - there is at least one request */
545 spin_lock(&rba->lock);
546 /* swap out the rba->rbd_empty to a local list */
547 list_replace_init(&rba->rbd_empty, &local_empty);
548 spin_unlock(&rba->lock);
552 LIST_HEAD(local_allocated);
553 gfp_t gfp_mask = GFP_KERNEL;
555 /* Do not post a warning if there are only a few requests */
556 if (pending < RX_PENDING_WATERMARK)
557 gfp_mask |= __GFP_NOWARN;
559 for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
560 struct iwl_rx_mem_buffer *rxb;
563 /* List should never be empty - each reused RBD is
564 * returned to the list, and initial pool covers any
565 * possible gap between the time the page is allocated
566 * to the time the RBD is added.
568 BUG_ON(list_empty(&local_empty));
569 /* Get the first rxb from the rbd list */
570 rxb = list_first_entry(&local_empty,
571 struct iwl_rx_mem_buffer, list);
574 /* Alloc a new receive buffer */
575 page = iwl_pcie_rx_alloc_page(trans, gfp_mask);
580 /* Get physical address of the RB */
581 rxb->page_dma = dma_map_page(trans->dev, page, 0,
582 PAGE_SIZE << trans_pcie->rx_page_order,
584 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
586 __free_pages(page, trans_pcie->rx_page_order);
590 /* move the allocated entry to the out list */
591 list_move(&rxb->list, &local_allocated);
595 atomic_dec(&rba->req_pending);
599 pending = atomic_read(&rba->req_pending);
602 "Got more pending allocation requests = %d\n",
606 spin_lock(&rba->lock);
607 /* add the allocated rbds to the allocator allocated list */
608 list_splice_tail(&local_allocated, &rba->rbd_allocated);
609 /* get more empty RBDs for current pending requests */
610 list_splice_tail_init(&rba->rbd_empty, &local_empty);
611 spin_unlock(&rba->lock);
613 atomic_inc(&rba->req_ready);
617 spin_lock(&rba->lock);
618 /* return unused rbds to the allocator empty list */
619 list_splice_tail(&local_empty, &rba->rbd_empty);
620 spin_unlock(&rba->lock);
622 IWL_DEBUG_TPT(trans, "%s, exit.\n", __func__);
626 * iwl_pcie_rx_allocator_get - returns the pre-allocated pages
628 .* Called by queue when the queue posted allocation request and
629 * has freed 8 RBDs in order to restock itself.
630 * This function directly moves the allocated RBs to the queue's ownership
631 * and updates the relevant counters.
633 static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
636 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
637 struct iwl_rb_allocator *rba = &trans_pcie->rba;
640 lockdep_assert_held(&rxq->lock);
643 * atomic_dec_if_positive returns req_ready - 1 for any scenario.
644 * If req_ready is 0 atomic_dec_if_positive will return -1 and this
645 * function will return early, as there are no ready requests.
646 * atomic_dec_if_positive will perofrm the *actual* decrement only if
647 * req_ready > 0, i.e. - there are ready requests and the function
648 * hands one request to the caller.
650 if (atomic_dec_if_positive(&rba->req_ready) < 0)
653 spin_lock(&rba->lock);
654 for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
655 /* Get next free Rx buffer, remove it from free list */
656 struct iwl_rx_mem_buffer *rxb =
657 list_first_entry(&rba->rbd_allocated,
658 struct iwl_rx_mem_buffer, list);
660 list_move(&rxb->list, &rxq->rx_free);
662 spin_unlock(&rba->lock);
664 rxq->used_count -= RX_CLAIM_REQ_ALLOC;
665 rxq->free_count += RX_CLAIM_REQ_ALLOC;
668 void iwl_pcie_rx_allocator_work(struct work_struct *data)
670 struct iwl_rb_allocator *rba_p =
671 container_of(data, struct iwl_rb_allocator, rx_alloc);
672 struct iwl_trans_pcie *trans_pcie =
673 container_of(rba_p, struct iwl_trans_pcie, rba);
675 iwl_pcie_rx_allocator(trans_pcie->trans);
678 static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td)
680 struct iwl_rx_transfer_desc *rx_td;
683 return sizeof(*rx_td);
685 return trans->cfg->mq_rx_supported ? sizeof(__le64) :
689 static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
692 struct device *dev = trans->dev;
693 bool use_rx_td = (trans->cfg->device_family >=
694 IWL_DEVICE_FAMILY_22560);
695 int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
698 dma_free_coherent(trans->dev,
699 free_size * rxq->queue_size,
700 rxq->bd, rxq->bd_dma);
704 rxq->rb_stts_dma = 0;
708 dma_free_coherent(trans->dev,
709 (use_rx_td ? sizeof(*rxq->cd) :
710 sizeof(__le32)) * rxq->queue_size,
711 rxq->used_bd, rxq->used_bd_dma);
712 rxq->used_bd_dma = 0;
715 if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
719 dma_free_coherent(dev, sizeof(__le16),
720 rxq->tr_tail, rxq->tr_tail_dma);
721 rxq->tr_tail_dma = 0;
725 dma_free_coherent(dev, sizeof(__le16),
726 rxq->cr_tail, rxq->cr_tail_dma);
727 rxq->cr_tail_dma = 0;
731 static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
734 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
735 struct device *dev = trans->dev;
738 bool use_rx_td = (trans->cfg->device_family >=
739 IWL_DEVICE_FAMILY_22560);
740 size_t rb_stts_size = use_rx_td ? sizeof(__le16) :
741 sizeof(struct iwl_rb_status);
743 spin_lock_init(&rxq->lock);
744 if (trans->cfg->mq_rx_supported)
745 rxq->queue_size = MQ_RX_TABLE_SIZE;
747 rxq->queue_size = RX_QUEUE_SIZE;
749 free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
752 * Allocate the circular buffer of Read Buffer Descriptors
755 rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size,
756 &rxq->bd_dma, GFP_KERNEL);
760 if (trans->cfg->mq_rx_supported) {
761 rxq->used_bd = dma_alloc_coherent(dev,
762 (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size,
769 rxq->rb_stts = trans_pcie->base_rb_stts + rxq->id * rb_stts_size;
771 trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size;
776 /* Allocate the driver's pointer to TR tail */
777 rxq->tr_tail = dma_alloc_coherent(dev, sizeof(__le16),
778 &rxq->tr_tail_dma, GFP_KERNEL);
782 /* Allocate the driver's pointer to CR tail */
783 rxq->cr_tail = dma_alloc_coherent(dev, sizeof(__le16),
784 &rxq->cr_tail_dma, GFP_KERNEL);
788 * W/A 22560 device step Z0 must be non zero bug
789 * TODO: remove this when stop supporting Z0
791 *rxq->cr_tail = cpu_to_le16(500);
796 for (i = 0; i < trans->num_rx_queues; i++) {
797 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
799 iwl_pcie_free_rxq_dma(trans, rxq);
805 int iwl_pcie_rx_alloc(struct iwl_trans *trans)
807 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
808 struct iwl_rb_allocator *rba = &trans_pcie->rba;
810 size_t rb_stts_size = trans->cfg->device_family >=
811 IWL_DEVICE_FAMILY_22560 ?
812 sizeof(__le16) : sizeof(struct iwl_rb_status);
814 if (WARN_ON(trans_pcie->rxq))
817 trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
819 if (!trans_pcie->rxq)
822 spin_lock_init(&rba->lock);
825 * Allocate the driver's pointer to receive buffer status.
826 * Allocate for all queues continuously (HW requirement).
828 trans_pcie->base_rb_stts =
829 dma_alloc_coherent(trans->dev,
830 rb_stts_size * trans->num_rx_queues,
831 &trans_pcie->base_rb_stts_dma,
833 if (!trans_pcie->base_rb_stts) {
838 for (i = 0; i < trans->num_rx_queues; i++) {
839 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
842 ret = iwl_pcie_alloc_rxq_dma(trans, rxq);
849 if (trans_pcie->base_rb_stts) {
850 dma_free_coherent(trans->dev,
851 rb_stts_size * trans->num_rx_queues,
852 trans_pcie->base_rb_stts,
853 trans_pcie->base_rb_stts_dma);
854 trans_pcie->base_rb_stts = NULL;
855 trans_pcie->base_rb_stts_dma = 0;
857 kfree(trans_pcie->rxq);
862 static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
864 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
867 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
869 switch (trans_pcie->rx_buf_size) {
871 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
874 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
877 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K;
881 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
884 if (!iwl_trans_grab_nic_access(trans, &flags))
888 iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
889 /* reset and flush pointers */
890 iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
891 iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
892 iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
894 /* Reset driver's Rx queue write index */
895 iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
897 /* Tell device where to find RBD circular buffer in DRAM */
898 iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
899 (u32)(rxq->bd_dma >> 8));
901 /* Tell device where in DRAM to update its Rx status */
902 iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
903 rxq->rb_stts_dma >> 4);
906 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
907 * the credit mechanism in 5000 HW RX FIFO
908 * Direct rx interrupts to hosts
909 * Rx buffer size 4 or 8k or 12k
913 iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
914 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
915 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
916 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
918 (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
919 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
921 iwl_trans_release_nic_access(trans, &flags);
923 /* Set interrupt coalescing timer to default (2048 usecs) */
924 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
926 /* W/A for interrupt coalescing bug in 7260 and 3160 */
927 if (trans->cfg->host_interrupt_operation_mode)
928 iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
931 static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
933 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
934 u32 rb_size, enabled = 0;
938 switch (trans_pcie->rx_buf_size) {
940 rb_size = RFH_RXF_DMA_RB_SIZE_2K;
943 rb_size = RFH_RXF_DMA_RB_SIZE_4K;
946 rb_size = RFH_RXF_DMA_RB_SIZE_8K;
949 rb_size = RFH_RXF_DMA_RB_SIZE_12K;
953 rb_size = RFH_RXF_DMA_RB_SIZE_4K;
956 if (!iwl_trans_grab_nic_access(trans, &flags))
960 iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0);
961 /* disable free amd used rx queue operation */
962 iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0);
964 for (i = 0; i < trans->num_rx_queues; i++) {
965 /* Tell device where to find RBD free table in DRAM */
966 iwl_write_prph64_no_grab(trans,
967 RFH_Q_FRBDCB_BA_LSB(i),
968 trans_pcie->rxq[i].bd_dma);
969 /* Tell device where to find RBD used table in DRAM */
970 iwl_write_prph64_no_grab(trans,
971 RFH_Q_URBDCB_BA_LSB(i),
972 trans_pcie->rxq[i].used_bd_dma);
973 /* Tell device where in DRAM to update its Rx status */
974 iwl_write_prph64_no_grab(trans,
975 RFH_Q_URBD_STTS_WPTR_LSB(i),
976 trans_pcie->rxq[i].rb_stts_dma);
977 /* Reset device indice tables */
978 iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0);
979 iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0);
980 iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0);
982 enabled |= BIT(i) | BIT(i + 16);
987 * Rx buffer size 4 or 8k or 12k
989 * Drop frames that exceed RB size
992 iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG,
993 RFH_DMA_EN_ENABLE_VAL | rb_size |
994 RFH_RXF_DMA_MIN_RB_4_8 |
995 RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
996 RFH_RXF_DMA_RBDCB_SIZE_512);
999 * Activate DMA snooping.
1000 * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe
1001 * Default queue is 0
1003 iwl_write_prph_no_grab(trans, RFH_GEN_CFG,
1004 RFH_GEN_CFG_RFH_DMA_SNOOP |
1005 RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) |
1006 RFH_GEN_CFG_SERVICE_DMA_SNOOP |
1007 RFH_GEN_CFG_VAL(RB_CHUNK_SIZE,
1008 trans->cfg->integrated ?
1009 RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
1010 RFH_GEN_CFG_RB_CHUNK_SIZE_128));
1011 /* Enable the relevant rx queues */
1012 iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);
1014 iwl_trans_release_nic_access(trans, &flags);
1016 /* Set interrupt coalescing timer to default (2048 usecs) */
1017 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
1020 void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
1022 lockdep_assert_held(&rxq->lock);
1024 INIT_LIST_HEAD(&rxq->rx_free);
1025 INIT_LIST_HEAD(&rxq->rx_used);
1026 rxq->free_count = 0;
1027 rxq->used_count = 0;
1030 int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
1036 int _iwl_pcie_rx_init(struct iwl_trans *trans)
1038 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1039 struct iwl_rxq *def_rxq;
1040 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1041 int i, err, queue_size, allocator_pool_size, num_alloc;
1043 if (!trans_pcie->rxq) {
1044 err = iwl_pcie_rx_alloc(trans);
1048 def_rxq = trans_pcie->rxq;
1050 cancel_work_sync(&rba->rx_alloc);
1052 spin_lock(&rba->lock);
1053 atomic_set(&rba->req_pending, 0);
1054 atomic_set(&rba->req_ready, 0);
1055 INIT_LIST_HEAD(&rba->rbd_allocated);
1056 INIT_LIST_HEAD(&rba->rbd_empty);
1057 spin_unlock(&rba->lock);
1059 /* free all first - we might be reconfigured for a different size */
1060 iwl_pcie_free_rbs_pool(trans);
1062 for (i = 0; i < RX_QUEUE_SIZE; i++)
1063 def_rxq->queue[i] = NULL;
1065 for (i = 0; i < trans->num_rx_queues; i++) {
1066 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1068 spin_lock(&rxq->lock);
1070 * Set read write pointer to reflect that we have processed
1071 * and used all buffers, but have not restocked the Rx queue
1072 * with fresh buffers
1076 rxq->write_actual = 0;
1077 memset(rxq->rb_stts, 0,
1078 (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
1079 sizeof(__le16) : sizeof(struct iwl_rb_status));
1081 iwl_pcie_rx_init_rxb_lists(rxq);
1083 if (!rxq->napi.poll)
1084 netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
1085 iwl_pcie_dummy_napi_poll, 64);
1087 spin_unlock(&rxq->lock);
1090 /* move the pool to the default queue and allocator ownerships */
1091 queue_size = trans->cfg->mq_rx_supported ?
1092 MQ_RX_NUM_RBDS : RX_QUEUE_SIZE;
1093 allocator_pool_size = trans->num_rx_queues *
1094 (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
1095 num_alloc = queue_size + allocator_pool_size;
1096 BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) !=
1097 ARRAY_SIZE(trans_pcie->rx_pool));
1098 for (i = 0; i < num_alloc; i++) {
1099 struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
1101 if (i < allocator_pool_size)
1102 list_add(&rxb->list, &rba->rbd_empty);
1104 list_add(&rxb->list, &def_rxq->rx_used);
1105 trans_pcie->global_table[i] = rxb;
1106 rxb->vid = (u16)(i + 1);
1107 rxb->invalid = true;
1110 iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
1115 int iwl_pcie_rx_init(struct iwl_trans *trans)
1117 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1118 int ret = _iwl_pcie_rx_init(trans);
1123 if (trans->cfg->mq_rx_supported)
1124 iwl_pcie_rx_mq_hw_init(trans);
1126 iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
1128 iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
1130 spin_lock(&trans_pcie->rxq->lock);
1131 iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
1132 spin_unlock(&trans_pcie->rxq->lock);
1137 int iwl_pcie_gen2_rx_init(struct iwl_trans *trans)
1139 /* Set interrupt coalescing timer to default (2048 usecs) */
1140 iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
1143 * We don't configure the RFH.
1144 * Restock will be done at alive, after firmware configured the RFH.
1146 return _iwl_pcie_rx_init(trans);
1149 void iwl_pcie_rx_free(struct iwl_trans *trans)
1151 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1152 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1154 size_t rb_stts_size = trans->cfg->device_family >=
1155 IWL_DEVICE_FAMILY_22560 ?
1156 sizeof(__le16) : sizeof(struct iwl_rb_status);
1159 * if rxq is NULL, it means that nothing has been allocated,
1162 if (!trans_pcie->rxq) {
1163 IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
1167 cancel_work_sync(&rba->rx_alloc);
1169 iwl_pcie_free_rbs_pool(trans);
1171 if (trans_pcie->base_rb_stts) {
1172 dma_free_coherent(trans->dev,
1173 rb_stts_size * trans->num_rx_queues,
1174 trans_pcie->base_rb_stts,
1175 trans_pcie->base_rb_stts_dma);
1176 trans_pcie->base_rb_stts = NULL;
1177 trans_pcie->base_rb_stts_dma = 0;
1180 for (i = 0; i < trans->num_rx_queues; i++) {
1181 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1183 iwl_pcie_free_rxq_dma(trans, rxq);
1186 netif_napi_del(&rxq->napi);
1188 kfree(trans_pcie->rxq);
1191 static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq,
1192 struct iwl_rb_allocator *rba)
1194 spin_lock(&rba->lock);
1195 list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
1196 spin_unlock(&rba->lock);
1200 * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
1202 * Called when a RBD can be reused. The RBD is transferred to the allocator.
1203 * When there are 2 empty RBDs - a request for allocation is posted
1205 static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
1206 struct iwl_rx_mem_buffer *rxb,
1207 struct iwl_rxq *rxq, bool emergency)
1209 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1210 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1212 /* Move the RBD to the used list, will be moved to allocator in batches
1213 * before claiming or posting a request*/
1214 list_add_tail(&rxb->list, &rxq->rx_used);
1216 if (unlikely(emergency))
1219 /* Count the allocator owned RBDs */
1222 /* If we have RX_POST_REQ_ALLOC new released rx buffers -
1223 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
1224 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
1225 * after but we still need to post another request.
1227 if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
1228 /* Move the 2 RBDs to the allocator ownership.
1229 Allocator has another 6 from pool for the request completion*/
1230 iwl_pcie_rx_move_to_allocator(rxq, rba);
1232 atomic_inc(&rba->req_pending);
1233 queue_work(rba->alloc_wq, &rba->rx_alloc);
1237 static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
1238 struct iwl_rxq *rxq,
1239 struct iwl_rx_mem_buffer *rxb,
1243 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1244 struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
1245 bool page_stolen = false;
1246 int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
1252 dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
1254 while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
1255 struct iwl_rx_packet *pkt;
1258 int index, cmd_index, len;
1259 struct iwl_rx_cmd_buffer rxcb = {
1261 ._rx_page_order = trans_pcie->rx_page_order,
1263 ._page_stolen = false,
1264 .truesize = max_len,
1267 pkt = rxb_addr(&rxcb);
1269 if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) {
1271 "Q %d: RB end marker at offset %d\n",
1276 WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
1277 FH_RSCSR_RXQ_POS != rxq->id,
1278 "frame on invalid queue - is on %d and indicates %d\n",
1280 (le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
1284 "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n",
1286 iwl_get_cmd_string(trans,
1287 iwl_cmd_id(pkt->hdr.cmd,
1290 pkt->hdr.group_id, pkt->hdr.cmd,
1291 le16_to_cpu(pkt->hdr.sequence));
1293 len = iwl_rx_packet_len(pkt);
1294 len += sizeof(u32); /* account for status word */
1295 trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
1296 trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
1298 /* Reclaim a command buffer only if this packet is a response
1299 * to a (driver-originated) command.
1300 * If the packet (e.g. Rx frame) originated from uCode,
1301 * there is no command buffer to reclaim.
1302 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
1303 * but apparently a few don't get set; catch them here. */
1304 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
1305 if (reclaim && !pkt->hdr.group_id) {
1308 for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
1309 if (trans_pcie->no_reclaim_cmds[i] ==
1317 sequence = le16_to_cpu(pkt->hdr.sequence);
1318 index = SEQ_TO_INDEX(sequence);
1319 cmd_index = iwl_pcie_get_cmd_index(txq, index);
1321 if (rxq->id == trans_pcie->def_rx_queue)
1322 iwl_op_mode_rx(trans->op_mode, &rxq->napi,
1325 iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi,
1329 kzfree(txq->entries[cmd_index].free_buf);
1330 txq->entries[cmd_index].free_buf = NULL;
1334 * After here, we should always check rxcb._page_stolen,
1335 * if it is true then one of the handlers took the page.
1339 /* Invoke any callbacks, transfer the buffer to caller,
1340 * and fire off the (possibly) blocking
1341 * iwl_trans_send_cmd()
1342 * as we reclaim the driver command queue */
1343 if (!rxcb._page_stolen)
1344 iwl_pcie_hcmd_complete(trans, &rxcb);
1346 IWL_WARN(trans, "Claim null rxb?\n");
1349 page_stolen |= rxcb._page_stolen;
1350 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1352 offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
1355 /* page was stolen from us -- free our reference */
1357 __free_pages(rxb->page, trans_pcie->rx_page_order);
1361 /* Reuse the page if possible. For notification packets and
1362 * SKBs that fail to Rx correctly, add them back into the
1363 * rx_free list for reuse later. */
1364 if (rxb->page != NULL) {
1366 dma_map_page(trans->dev, rxb->page, 0,
1367 PAGE_SIZE << trans_pcie->rx_page_order,
1369 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
1371 * free the page(s) as well to not break
1372 * the invariant that the items on the used
1373 * list have no page(s)
1375 __free_pages(rxb->page, trans_pcie->rx_page_order);
1377 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1379 list_add_tail(&rxb->list, &rxq->rx_free);
1383 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1386 static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
1387 struct iwl_rxq *rxq, int i)
1389 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1390 struct iwl_rx_mem_buffer *rxb;
1393 BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32);
1395 if (!trans->cfg->mq_rx_supported) {
1396 rxb = rxq->queue[i];
1397 rxq->queue[i] = NULL;
1401 /* used_bd is a 32/16 bit but only 12 are used to retrieve the vid */
1402 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1403 vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF;
1405 vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF;
1407 if (!vid || vid > ARRAY_SIZE(trans_pcie->global_table))
1410 rxb = trans_pcie->global_table[vid - 1];
1414 IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid);
1416 rxb->invalid = true;
1421 WARN(1, "Invalid rxb from HW %u\n", (u32)vid);
1422 iwl_force_nmi(trans);
1427 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
1429 static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
1431 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1432 struct iwl_rxq *rxq;
1433 u32 r, i, count = 0;
1434 bool emergency = false;
1436 if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd))
1439 rxq = &trans_pcie->rxq[queue];
1442 spin_lock(&rxq->lock);
1443 /* uCode's read index (stored in shared DRAM) indicates the last Rx
1444 * buffer that the driver may process (last buffer filled by ucode). */
1445 r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
1448 /* W/A 9000 device step A0 wrap-around bug */
1449 r &= (rxq->queue_size - 1);
1451 /* Rx interrupt, but nothing sent from uCode */
1453 IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
1456 struct iwl_rb_allocator *rba = &trans_pcie->rba;
1457 struct iwl_rx_mem_buffer *rxb;
1458 /* number of RBDs still waiting for page allocation */
1459 u32 rb_pending_alloc =
1460 atomic_read(&trans_pcie->rba.req_pending) *
1463 if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 &&
1465 iwl_pcie_rx_move_to_allocator(rxq, rba);
1467 IWL_DEBUG_TPT(trans,
1468 "RX path is in emergency. Pending allocations %d\n",
1472 IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
1474 rxb = iwl_pcie_get_rxb(trans, rxq, i);
1478 iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i);
1480 i = (i + 1) & (rxq->queue_size - 1);
1483 * If we have RX_CLAIM_REQ_ALLOC released rx buffers -
1484 * try to claim the pre-allocated buffers from the allocator.
1485 * If not ready - will try to reclaim next time.
1486 * There is no need to reschedule work - allocator exits only
1489 if (rxq->used_count >= RX_CLAIM_REQ_ALLOC)
1490 iwl_pcie_rx_allocator_get(trans, rxq);
1492 if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
1493 /* Add the remaining empty RBDs for allocator use */
1494 iwl_pcie_rx_move_to_allocator(rxq, rba);
1495 } else if (emergency) {
1499 if (rb_pending_alloc < rxq->queue_size / 3) {
1500 IWL_DEBUG_TPT(trans,
1501 "RX path exited emergency. Pending allocations %d\n",
1507 spin_unlock(&rxq->lock);
1508 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1509 iwl_pcie_rxq_restock(trans, rxq);
1515 /* Backtrack one entry */
1517 /* update cr tail with the rxq read pointer */
1518 if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
1519 *rxq->cr_tail = cpu_to_le16(r);
1520 spin_unlock(&rxq->lock);
1523 * handle a case where in emergency there are some unallocated RBDs.
1524 * those RBDs are in the used list, but are not tracked by the queue's
1525 * used_count which counts allocator owned RBDs.
1526 * unallocated emergency RBDs must be allocated on exit, otherwise
1527 * when called again the function may not be in emergency mode and
1528 * they will be handed to the allocator with no tracking in the RBD
1529 * allocator counters, which will lead to them never being claimed back
1531 * by allocating them here, they are now in the queue free list, and
1532 * will be restocked by the next call of iwl_pcie_rxq_restock.
1534 if (unlikely(emergency && count))
1535 iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1538 napi_gro_flush(&rxq->napi, false);
1540 iwl_pcie_rxq_restock(trans, rxq);
1543 static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
1545 u8 queue = entry->entry;
1546 struct msix_entry *entries = entry - queue;
1548 return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
1552 * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
1553 * This interrupt handler should be used with RSS queue only.
1555 irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
1557 struct msix_entry *entry = dev_id;
1558 struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
1559 struct iwl_trans *trans = trans_pcie->trans;
1561 trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0);
1563 if (WARN_ON(entry->entry >= trans->num_rx_queues))
1566 lock_map_acquire(&trans->sync_cmd_lockdep_map);
1569 iwl_pcie_rx_handle(trans, entry->entry);
1572 iwl_pcie_clear_irq(trans, entry);
1574 lock_map_release(&trans->sync_cmd_lockdep_map);
1580 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
1582 static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
1584 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1587 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
1588 if (trans->cfg->internal_wimax_coex &&
1589 !trans->cfg->apmg_not_supported &&
1590 (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
1591 APMS_CLK_VAL_MRB_FUNC_MODE) ||
1592 (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
1593 APMG_PS_CTRL_VAL_RESET_REQ))) {
1594 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1595 iwl_op_mode_wimax_active(trans->op_mode);
1596 wake_up(&trans_pcie->wait_command_queue);
1600 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
1601 if (!trans_pcie->txq[i])
1603 del_timer(&trans_pcie->txq[i]->stuck_timer);
1606 /* The STATUS_FW_ERROR bit is set in this function. This must happen
1607 * before we wake up the command caller, to ensure a proper cleanup. */
1608 iwl_trans_fw_error(trans);
1610 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1611 wake_up(&trans_pcie->wait_command_queue);
1614 static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
1618 lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
1620 trace_iwlwifi_dev_irq(trans->dev);
1622 /* Discover which interrupts are active/pending */
1623 inta = iwl_read32(trans, CSR_INT);
1625 /* the thread will service interrupts and re-enable them */
1629 /* a device (PCI-E) page is 4096 bytes long */
1630 #define ICT_SHIFT 12
1631 #define ICT_SIZE (1 << ICT_SHIFT)
1632 #define ICT_COUNT (ICT_SIZE / sizeof(u32))
1634 /* interrupt handler using ict table, with this interrupt driver will
1635 * stop using INTA register to get device's interrupt, reading this register
1636 * is expensive, device will write interrupts in ICT dram table, increment
1637 * index then will fire interrupt to driver, driver will OR all ICT table
1638 * entries from current index up to table entry with 0 value. the result is
1639 * the interrupt we need to service, driver will set the entries back to 0 and
1642 static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
1644 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1649 trace_iwlwifi_dev_irq(trans->dev);
1651 /* Ignore interrupt if there's nothing in NIC to service.
1652 * This may be due to IRQ shared with another device,
1653 * or due to sporadic interrupts thrown from our NIC. */
1654 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1655 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
1660 * Collect all entries up to the first 0, starting from ict_index;
1661 * note we already read at ict_index.
1665 IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
1666 trans_pcie->ict_index, read);
1667 trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
1668 trans_pcie->ict_index =
1669 ((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
1671 read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1672 trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
1676 /* We should not get this value, just ignore it. */
1677 if (val == 0xffffffff)
1681 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
1682 * (bit 15 before shifting it to 31) to clear when using interrupt
1683 * coalescing. fortunately, bits 18 and 19 stay set when this happens
1684 * so we use them to decide on the real state of the Rx bit.
1685 * In order words, bit 15 is set if bit 18 or bit 19 are set.
1690 inta = (0xff & val) | ((0xff00 & val) << 16);
1694 void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans)
1696 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1697 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1698 bool hw_rfkill, prev, report;
1700 mutex_lock(&trans_pcie->mutex);
1701 prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1702 hw_rfkill = iwl_is_rfkill_set(trans);
1704 set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1705 set_bit(STATUS_RFKILL_HW, &trans->status);
1707 if (trans_pcie->opmode_down)
1710 report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1712 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
1713 hw_rfkill ? "disable radio" : "enable radio");
1715 isr_stats->rfkill++;
1718 iwl_trans_pcie_rf_kill(trans, report);
1719 mutex_unlock(&trans_pcie->mutex);
1722 if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
1724 IWL_DEBUG_RF_KILL(trans,
1725 "Rfkill while SYNC HCMD in flight\n");
1726 wake_up(&trans_pcie->wait_command_queue);
1728 clear_bit(STATUS_RFKILL_HW, &trans->status);
1729 if (trans_pcie->opmode_down)
1730 clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1734 irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1736 struct iwl_trans *trans = dev_id;
1737 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1738 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1742 lock_map_acquire(&trans->sync_cmd_lockdep_map);
1744 spin_lock(&trans_pcie->irq_lock);
1746 /* dram interrupt table not set yet,
1747 * use legacy interrupt.
1749 if (likely(trans_pcie->use_ict))
1750 inta = iwl_pcie_int_cause_ict(trans);
1752 inta = iwl_pcie_int_cause_non_ict(trans);
1754 if (iwl_have_debug_level(IWL_DL_ISR)) {
1755 IWL_DEBUG_ISR(trans,
1756 "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
1757 inta, trans_pcie->inta_mask,
1758 iwl_read32(trans, CSR_INT_MASK),
1759 iwl_read32(trans, CSR_FH_INT_STATUS));
1760 if (inta & (~trans_pcie->inta_mask))
1761 IWL_DEBUG_ISR(trans,
1762 "We got a masked interrupt (0x%08x)\n",
1763 inta & (~trans_pcie->inta_mask));
1766 inta &= trans_pcie->inta_mask;
1769 * Ignore interrupt if there's nothing in NIC to service.
1770 * This may be due to IRQ shared with another device,
1771 * or due to sporadic interrupts thrown from our NIC.
1773 if (unlikely(!inta)) {
1774 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1776 * Re-enable interrupts here since we don't
1777 * have anything to service
1779 if (test_bit(STATUS_INT_ENABLED, &trans->status))
1780 _iwl_enable_interrupts(trans);
1781 spin_unlock(&trans_pcie->irq_lock);
1782 lock_map_release(&trans->sync_cmd_lockdep_map);
1786 if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
1788 * Hardware disappeared. It might have
1789 * already raised an interrupt.
1791 IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1792 spin_unlock(&trans_pcie->irq_lock);
1796 /* Ack/clear/reset pending uCode interrupts.
1797 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
1799 /* There is a hardware bug in the interrupt mask function that some
1800 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
1801 * they are disabled in the CSR_INT_MASK register. Furthermore the
1802 * ICT interrupt handling mechanism has another bug that might cause
1803 * these unmasked interrupts fail to be detected. We workaround the
1804 * hardware bugs here by ACKing all the possible interrupts so that
1805 * interrupt coalescing can still be achieved.
1807 iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
1809 if (iwl_have_debug_level(IWL_DL_ISR))
1810 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
1811 inta, iwl_read32(trans, CSR_INT_MASK));
1813 spin_unlock(&trans_pcie->irq_lock);
1815 /* Now service all interrupt bits discovered above. */
1816 if (inta & CSR_INT_BIT_HW_ERR) {
1817 IWL_ERR(trans, "Hardware error detected. Restarting.\n");
1819 /* Tell the device to stop sending interrupts */
1820 iwl_disable_interrupts(trans);
1823 iwl_pcie_irq_handle_error(trans);
1825 handled |= CSR_INT_BIT_HW_ERR;
1830 /* NIC fires this, but we don't use it, redundant with WAKEUP */
1831 if (inta & CSR_INT_BIT_SCD) {
1832 IWL_DEBUG_ISR(trans,
1833 "Scheduler finished to transmit the frame/frames.\n");
1837 /* Alive notification via Rx interrupt will do the real work */
1838 if (inta & CSR_INT_BIT_ALIVE) {
1839 IWL_DEBUG_ISR(trans, "Alive interrupt\n");
1841 if (trans->cfg->gen2) {
1843 * We can restock, since firmware configured
1846 iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
1849 handled |= CSR_INT_BIT_ALIVE;
1852 /* Safely ignore these bits for debug checks below */
1853 inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
1855 /* HW RF KILL switch toggled */
1856 if (inta & CSR_INT_BIT_RF_KILL) {
1857 iwl_pcie_handle_rfkill_irq(trans);
1858 handled |= CSR_INT_BIT_RF_KILL;
1861 /* Chip got too hot and stopped itself */
1862 if (inta & CSR_INT_BIT_CT_KILL) {
1863 IWL_ERR(trans, "Microcode CT kill error detected.\n");
1864 isr_stats->ctkill++;
1865 handled |= CSR_INT_BIT_CT_KILL;
1868 /* Error detected by uCode */
1869 if (inta & CSR_INT_BIT_SW_ERR) {
1870 IWL_ERR(trans, "Microcode SW error detected. "
1871 " Restarting 0x%X.\n", inta);
1873 iwl_pcie_irq_handle_error(trans);
1874 handled |= CSR_INT_BIT_SW_ERR;
1877 /* uCode wakes up after power-down sleep */
1878 if (inta & CSR_INT_BIT_WAKEUP) {
1879 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
1880 iwl_pcie_rxq_check_wrptr(trans);
1881 iwl_pcie_txq_check_wrptrs(trans);
1883 isr_stats->wakeup++;
1885 handled |= CSR_INT_BIT_WAKEUP;
1888 /* All uCode command responses, including Tx command responses,
1889 * Rx "responses" (frame-received notification), and other
1890 * notifications from uCode come through here*/
1891 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
1892 CSR_INT_BIT_RX_PERIODIC)) {
1893 IWL_DEBUG_ISR(trans, "Rx interrupt\n");
1894 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1895 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1896 iwl_write32(trans, CSR_FH_INT_STATUS,
1897 CSR_FH_INT_RX_MASK);
1899 if (inta & CSR_INT_BIT_RX_PERIODIC) {
1900 handled |= CSR_INT_BIT_RX_PERIODIC;
1902 CSR_INT, CSR_INT_BIT_RX_PERIODIC);
1904 /* Sending RX interrupt require many steps to be done in the
1906 * 1- write interrupt to current index in ICT table.
1908 * 3- update RX shared data to indicate last write index.
1909 * 4- send interrupt.
1910 * This could lead to RX race, driver could receive RX interrupt
1911 * but the shared data changes does not reflect this;
1912 * periodic interrupt will detect any dangling Rx activity.
1915 /* Disable periodic interrupt; we use it as just a one-shot. */
1916 iwl_write8(trans, CSR_INT_PERIODIC_REG,
1917 CSR_INT_PERIODIC_DIS);
1920 * Enable periodic interrupt in 8 msec only if we received
1921 * real RX interrupt (instead of just periodic int), to catch
1922 * any dangling Rx interrupt. If it was just the periodic
1923 * interrupt, there was no dangling Rx activity, and no need
1924 * to extend the periodic interrupt; one-shot is enough.
1926 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
1927 iwl_write8(trans, CSR_INT_PERIODIC_REG,
1928 CSR_INT_PERIODIC_ENA);
1933 iwl_pcie_rx_handle(trans, 0);
1937 /* This "Tx" DMA channel is used only for loading uCode */
1938 if (inta & CSR_INT_BIT_FH_TX) {
1939 iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
1940 IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
1942 handled |= CSR_INT_BIT_FH_TX;
1943 /* Wake up uCode load routine, now that load is complete */
1944 trans_pcie->ucode_write_complete = true;
1945 wake_up(&trans_pcie->ucode_write_waitq);
1948 if (inta & ~handled) {
1949 IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1950 isr_stats->unhandled++;
1953 if (inta & ~(trans_pcie->inta_mask)) {
1954 IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
1955 inta & ~trans_pcie->inta_mask);
1958 spin_lock(&trans_pcie->irq_lock);
1959 /* only Re-enable all interrupt if disabled by irq */
1960 if (test_bit(STATUS_INT_ENABLED, &trans->status))
1961 _iwl_enable_interrupts(trans);
1962 /* we are loading the firmware, enable FH_TX interrupt only */
1963 else if (handled & CSR_INT_BIT_FH_TX)
1964 iwl_enable_fw_load_int(trans);
1965 /* Re-enable RF_KILL if it occurred */
1966 else if (handled & CSR_INT_BIT_RF_KILL)
1967 iwl_enable_rfkill_int(trans);
1968 /* Re-enable the ALIVE / Rx interrupt if it occurred */
1969 else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX))
1970 iwl_enable_fw_load_int_ctx_info(trans);
1971 spin_unlock(&trans_pcie->irq_lock);
1974 lock_map_release(&trans->sync_cmd_lockdep_map);
1978 /******************************************************************************
1982 ******************************************************************************/
1984 /* Free dram table */
1985 void iwl_pcie_free_ict(struct iwl_trans *trans)
1987 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1989 if (trans_pcie->ict_tbl) {
1990 dma_free_coherent(trans->dev, ICT_SIZE,
1991 trans_pcie->ict_tbl,
1992 trans_pcie->ict_tbl_dma);
1993 trans_pcie->ict_tbl = NULL;
1994 trans_pcie->ict_tbl_dma = 0;
1999 * allocate dram shared table, it is an aligned memory
2000 * block of ICT_SIZE.
2001 * also reset all data related to ICT table interrupt.
2003 int iwl_pcie_alloc_ict(struct iwl_trans *trans)
2005 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2007 trans_pcie->ict_tbl =
2008 dma_alloc_coherent(trans->dev, ICT_SIZE,
2009 &trans_pcie->ict_tbl_dma, GFP_KERNEL);
2010 if (!trans_pcie->ict_tbl)
2013 /* just an API sanity check ... it is guaranteed to be aligned */
2014 if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
2015 iwl_pcie_free_ict(trans);
2022 /* Device is going up inform it about using ICT interrupt table,
2023 * also we need to tell the driver to start using ICT interrupt.
2025 void iwl_pcie_reset_ict(struct iwl_trans *trans)
2027 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2030 if (!trans_pcie->ict_tbl)
2033 spin_lock(&trans_pcie->irq_lock);
2034 _iwl_disable_interrupts(trans);
2036 memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
2038 val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
2040 val |= CSR_DRAM_INT_TBL_ENABLE |
2041 CSR_DRAM_INIT_TBL_WRAP_CHECK |
2042 CSR_DRAM_INIT_TBL_WRITE_POINTER;
2044 IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
2046 iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
2047 trans_pcie->use_ict = true;
2048 trans_pcie->ict_index = 0;
2049 iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
2050 _iwl_enable_interrupts(trans);
2051 spin_unlock(&trans_pcie->irq_lock);
2054 /* Device is going down disable ict interrupt usage */
2055 void iwl_pcie_disable_ict(struct iwl_trans *trans)
2057 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2059 spin_lock(&trans_pcie->irq_lock);
2060 trans_pcie->use_ict = false;
2061 spin_unlock(&trans_pcie->irq_lock);
2064 irqreturn_t iwl_pcie_isr(int irq, void *data)
2066 struct iwl_trans *trans = data;
2071 /* Disable (but don't clear!) interrupts here to avoid
2072 * back-to-back ISRs and sporadic interrupts from our NIC.
2073 * If we have something to service, the tasklet will re-enable ints.
2074 * If we *don't* have something, we'll re-enable before leaving here.
2076 iwl_write32(trans, CSR_INT_MASK, 0x00000000);
2078 return IRQ_WAKE_THREAD;
2081 irqreturn_t iwl_pcie_msix_isr(int irq, void *data)
2083 return IRQ_WAKE_THREAD;
2086 irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
2088 struct msix_entry *entry = dev_id;
2089 struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
2090 struct iwl_trans *trans = trans_pcie->trans;
2091 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2092 u32 inta_fh, inta_hw;
2094 lock_map_acquire(&trans->sync_cmd_lockdep_map);
2096 spin_lock(&trans_pcie->irq_lock);
2097 inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
2098 inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
2100 * Clear causes registers to avoid being handling the same cause.
2102 iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
2103 iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
2104 spin_unlock(&trans_pcie->irq_lock);
2106 trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw);
2108 if (unlikely(!(inta_fh | inta_hw))) {
2109 IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
2110 lock_map_release(&trans->sync_cmd_lockdep_map);
2114 if (iwl_have_debug_level(IWL_DL_ISR)) {
2115 IWL_DEBUG_ISR(trans,
2116 "ISR inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
2117 inta_fh, trans_pcie->fh_mask,
2118 iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
2119 if (inta_fh & ~trans_pcie->fh_mask)
2120 IWL_DEBUG_ISR(trans,
2121 "We got a masked interrupt (0x%08x)\n",
2122 inta_fh & ~trans_pcie->fh_mask);
2125 inta_fh &= trans_pcie->fh_mask;
2127 if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
2128 inta_fh & MSIX_FH_INT_CAUSES_Q0) {
2130 iwl_pcie_rx_handle(trans, 0);
2134 if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) &&
2135 inta_fh & MSIX_FH_INT_CAUSES_Q1) {
2137 iwl_pcie_rx_handle(trans, 1);
2141 /* This "Tx" DMA channel is used only for loading uCode */
2142 if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
2143 IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
2146 * Wake up uCode load routine,
2147 * now that load is complete
2149 trans_pcie->ucode_write_complete = true;
2150 wake_up(&trans_pcie->ucode_write_waitq);
2153 /* Error detected by uCode */
2154 if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) ||
2155 (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
2156 (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
2158 "Microcode SW error detected. Restarting 0x%X.\n",
2161 iwl_pcie_irq_handle_error(trans);
2164 /* After checking FH register check HW register */
2165 if (iwl_have_debug_level(IWL_DL_ISR)) {
2166 IWL_DEBUG_ISR(trans,
2167 "ISR inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
2168 inta_hw, trans_pcie->hw_mask,
2169 iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));
2170 if (inta_hw & ~trans_pcie->hw_mask)
2171 IWL_DEBUG_ISR(trans,
2172 "We got a masked interrupt 0x%08x\n",
2173 inta_hw & ~trans_pcie->hw_mask);
2176 inta_hw &= trans_pcie->hw_mask;
2178 /* Alive notification via Rx interrupt will do the real work */
2179 if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
2180 IWL_DEBUG_ISR(trans, "Alive interrupt\n");
2182 if (trans->cfg->gen2) {
2183 /* We can restock, since firmware configured the RFH */
2184 iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
2188 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560 &&
2189 inta_hw & MSIX_HW_INT_CAUSES_REG_IPC) {
2190 /* Reflect IML transfer status */
2191 int res = iwl_read32(trans, CSR_IML_RESP_ADDR);
2193 IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res);
2194 if (res == IWL_IMAGE_RESP_FAIL) {
2196 iwl_pcie_irq_handle_error(trans);
2198 } else if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
2199 /* uCode wakes up after power-down sleep */
2200 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
2201 iwl_pcie_rxq_check_wrptr(trans);
2202 iwl_pcie_txq_check_wrptrs(trans);
2204 isr_stats->wakeup++;
2207 if (inta_hw & MSIX_HW_INT_CAUSES_REG_IML) {
2208 /* Reflect IML transfer status */
2209 int res = iwl_read32(trans, CSR_IML_RESP_ADDR);
2211 IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res);
2212 if (res == IWL_IMAGE_RESP_FAIL) {
2214 iwl_pcie_irq_handle_error(trans);
2218 /* Chip got too hot and stopped itself */
2219 if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) {
2220 IWL_ERR(trans, "Microcode CT kill error detected.\n");
2221 isr_stats->ctkill++;
2224 /* HW RF KILL switch toggled */
2225 if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL)
2226 iwl_pcie_handle_rfkill_irq(trans);
2228 if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
2230 "Hardware error detected. Restarting.\n");
2233 trans->dbg.hw_error = true;
2234 iwl_pcie_irq_handle_error(trans);
2237 iwl_pcie_clear_irq(trans, entry);
2239 lock_map_release(&trans->sync_cmd_lockdep_map);