From: Stijn Tintel Date: Fri, 24 Nov 2017 09:36:24 +0000 (+0100) Subject: kernel: bump 4.9 to 4.9.65 X-Git-Url: https://git.librecmc.org/?a=commitdiff_plain;h=9fe59abef8bde3b7f32868873c4e1f76e9222d46;p=librecmc%2Flibrecmc.git kernel: bump 4.9 to 4.9.65 Refresh patches. Compile-tested: ar71xx, octeon, x86/64. Runtime-tested: ar71xx, octeon, x86/64. Signed-off-by: Stijn Tintel --- diff --git a/include/kernel-version.mk b/include/kernel-version.mk index 8848439c91..55010add29 100644 --- a/include/kernel-version.mk +++ b/include/kernel-version.mk @@ -4,11 +4,11 @@ LINUX_RELEASE?=1 LINUX_VERSION-3.18 = .71 LINUX_VERSION-4.4 = .93 -LINUX_VERSION-4.9 = .63 +LINUX_VERSION-4.9 = .65 LINUX_KERNEL_HASH-3.18.71 = 5abc9778ad44ce02ed6c8ab52ece8a21c6d20d21f6ed8a19287b4a38a50c1240 LINUX_KERNEL_HASH-4.4.93 = ed349314f16e78a6571b5f8884f6452782aef6c26b81bcc7ccdac44ecd917c36 -LINUX_KERNEL_HASH-4.9.63 = 21c9386f33fd3453ca67f7478b4c1ba34067645ef6d391871029cbd7f5df2ea3 +LINUX_KERNEL_HASH-4.9.65 = 24ba70877549a3cf25dc5f12efd260d3e957bce64c087de98baf8968ee514895 ifdef KERNEL_PATCHVER LINUX_VERSION:=$(KERNEL_PATCHVER)$(strip $(LINUX_VERSION-$(KERNEL_PATCHVER))) diff --git a/target/linux/generic/backport-4.9/024-8-tcp-tsq-move-tsq_flags-close-to-sk_wmem_alloc.patch b/target/linux/generic/backport-4.9/024-8-tcp-tsq-move-tsq_flags-close-to-sk_wmem_alloc.patch index 8f4c5b2a0b..0acd00817e 100644 --- a/target/linux/generic/backport-4.9/024-8-tcp-tsq-move-tsq_flags-close-to-sk_wmem_alloc.patch +++ b/target/linux/generic/backport-4.9/024-8-tcp-tsq-move-tsq_flags-close-to-sk_wmem_alloc.patch @@ -134,7 +134,7 @@ Signed-off-by: David S. Miller if (tcp_small_queue_check(sk, skb, 0)) break; -@@ -3509,8 +3509,6 @@ void tcp_send_ack(struct sock *sk) +@@ -3504,8 +3504,6 @@ void tcp_send_ack(struct sock *sk) /* We do not want pure acks influencing TCP Small Queues or fq/pacing * too much. * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784 diff --git a/target/linux/generic/pending-4.9/120-Fix-alloc_node_mem_map-with-ARCH_PFN_OFFSET-calcu.patch b/target/linux/generic/pending-4.9/120-Fix-alloc_node_mem_map-with-ARCH_PFN_OFFSET-calcu.patch index 8168a885b2..826496b419 100644 --- a/target/linux/generic/pending-4.9/120-Fix-alloc_node_mem_map-with-ARCH_PFN_OFFSET-calcu.patch +++ b/target/linux/generic/pending-4.9/120-Fix-alloc_node_mem_map-with-ARCH_PFN_OFFSET-calcu.patch @@ -71,7 +71,7 @@ Signed-off-by: Tobias Wolf --- a/mm/page_alloc.c +++ b/mm/page_alloc.c -@@ -5921,7 +5921,7 @@ static void __ref alloc_node_mem_map(str +@@ -5930,7 +5930,7 @@ static void __ref alloc_node_mem_map(str mem_map = NODE_DATA(0)->node_mem_map; #if defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) || defined(CONFIG_FLATMEM) if (page_to_pfn(mem_map) != pgdat->node_start_pfn) diff --git a/target/linux/ramips/patches-4.9/0013-owrt-hack-fix-mt7688-cache-issue.patch b/target/linux/ramips/patches-4.9/0013-owrt-hack-fix-mt7688-cache-issue.patch index 8602801587..0a3ef365fa 100644 --- a/target/linux/ramips/patches-4.9/0013-owrt-hack-fix-mt7688-cache-issue.patch +++ b/target/linux/ramips/patches-4.9/0013-owrt-hack-fix-mt7688-cache-issue.patch @@ -10,7 +10,7 @@ Signed-off-by: John Crispin --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c -@@ -774,7 +774,6 @@ static void __init arch_mem_init(char ** +@@ -846,7 +846,6 @@ static void __init arch_mem_init(char ** crashk_res.end - crashk_res.start + 1, BOOTMEM_DEFAULT); #endif @@ -18,7 +18,7 @@ Signed-off-by: John Crispin sparse_init(); plat_swiotlb_setup(); -@@ -890,6 +889,7 @@ void __init setup_arch(char **cmdline_p) +@@ -962,6 +961,7 @@ void __init setup_arch(char **cmdline_p) cpu_cache_init(); paging_init(); diff --git a/target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch b/target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch index a5cdec5525..285e4d2762 100644 --- a/target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch +++ b/target/linux/sunxi/patches-4.9/0052-stmmac-form-4-12.patch @@ -2887,7 +2887,7 @@ + + /* Display RX ring */ + priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true); -+ } + } +} + +static void stmmac_display_tx_rings(struct stmmac_priv *priv) @@ -2906,15 +2906,15 @@ + head_tx = (void *)tx_q->dma_etx; + else + head_tx = (void *)tx_q->dma_tx; -+ -+ priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false); - } -+} - /* Display Rx ring */ - priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true); - /* Display Tx ring */ - priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false); ++ priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false); ++ } ++} ++ +static void stmmac_display_rings(struct stmmac_priv *priv) +{ + /* Display RX ring */ @@ -3131,7 +3131,7 @@ if (priv->hw->mode->set_16kib_bfsize) bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu); -@@ -1018,257 +1228,516 @@ static int init_dma_desc_rings(struct ne +@@ -1018,235 +1228,409 @@ static int init_dma_desc_rings(struct ne priv->dma_buf_sz = bfsize; @@ -3163,10 +3163,7 @@ + p = &((rx_q->dma_erx + i)->basic); + else + p = rx_q->dma_rx + i; - -- ret = stmmac_init_rx_buffers(priv, p, i, flags); -- if (ret) -- goto err_init_rx_buffers; ++ + ret = stmmac_init_rx_buffers(priv, p, i, flags, + queue); + if (ret) @@ -3176,15 +3173,18 @@ + rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data, + (unsigned int)rx_q->rx_skbuff_dma[i]); + } -+ + +- ret = stmmac_init_rx_buffers(priv, p, i, flags); +- if (ret) +- goto err_init_rx_buffers; + rx_q->cur_rx = 0; + rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE); -+ -+ stmmac_clear_rx_descriptors(priv, queue); - netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n", - priv->rx_skbuff[i], priv->rx_skbuff[i]->data, - (unsigned int)priv->rx_skbuff_dma[i]); ++ stmmac_clear_rx_descriptors(priv, queue); ++ + /* Setup the chained descriptor addresses */ + if (priv->mode == STMMAC_CHAIN_MODE) { + if (priv->extend_desc) @@ -3277,13 +3277,8 @@ + priv->hw->mode->init(tx_q->dma_tx, + tx_q->dma_tx_phy, + DMA_TX_SIZE, 0); - } - -- priv->tx_skbuff_dma[i].buf = 0; -- priv->tx_skbuff_dma[i].map_as_page = false; -- priv->tx_skbuff_dma[i].len = 0; -- priv->tx_skbuff_dma[i].last_segment = false; -- priv->tx_skbuff[i] = NULL; ++ } ++ + for (i = 0; i < DMA_TX_SIZE; i++) { + struct dma_desc *p; + if (priv->extend_desc) @@ -3305,8 +3300,13 @@ + tx_q->tx_skbuff_dma[i].len = 0; + tx_q->tx_skbuff_dma[i].last_segment = false; + tx_q->tx_skbuff[i] = NULL; -+ } -+ + } + +- priv->tx_skbuff_dma[i].buf = 0; +- priv->tx_skbuff_dma[i].map_as_page = false; +- priv->tx_skbuff_dma[i].len = 0; +- priv->tx_skbuff_dma[i].last_segment = false; +- priv->tx_skbuff[i] = NULL; + tx_q->dirty_tx = 0; + tx_q->cur_tx = 0; + @@ -3387,10 +3387,17 @@ - priv->tx_skbuff_dma[i].buf, - priv->tx_skbuff_dma[i].len, - DMA_TO_DEVICE); +- } + for (i = 0; i < DMA_TX_SIZE; i++) + stmmac_free_tx_buffer(priv, queue, i); +} -+ + +- if (priv->tx_skbuff[i]) { +- dev_kfree_skb_any(priv->tx_skbuff[i]); +- priv->tx_skbuff[i] = NULL; +- priv->tx_skbuff_dma[i].buf = 0; +- priv->tx_skbuff_dma[i].map_as_page = false; +- } +/** + * free_dma_rx_desc_resources - free RX dma desc resources + * @priv: private structure @@ -3419,10 +3426,11 @@ + + kfree(rx_q->rx_skbuff_dma); + kfree(rx_q->rx_skbuff); -+ } -+} -+ -+/** + } + } + + /** +- * alloc_dma_desc_resources - alloc TX/RX resources. + * free_dma_tx_desc_resources - free TX dma desc resources + * @priv: private structure + */ @@ -3455,36 +3463,90 @@ + +/** + * alloc_dma_rx_desc_resources - alloc RX resources. -+ * @priv: private structure -+ * Description: according to which descriptor can be used (extend or basic) -+ * this function allocates the resources for TX and RX paths. In case of -+ * reception, for example, it pre-allocated the RX socket buffer in order to -+ * allow zero-copy mechanism. -+ */ + * @priv: private structure + * Description: according to which descriptor can be used (extend or basic) + * this function allocates the resources for TX and RX paths. In case of + * reception, for example, it pre-allocated the RX socket buffer in order to + * allow zero-copy mechanism. + */ +-static int alloc_dma_desc_resources(struct stmmac_priv *priv) +static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) -+{ + { + u32 rx_count = priv->plat->rx_queues_to_use; -+ int ret = -ENOMEM; + int ret = -ENOMEM; + u32 queue; -+ + +- priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t), +- GFP_KERNEL); +- if (!priv->rx_skbuff_dma) +- return -ENOMEM; + /* RX queues buffers and DMA */ + for (queue = 0; queue < rx_count; queue++) { + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; -+ + +- priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *), +- GFP_KERNEL); +- if (!priv->rx_skbuff) +- goto err_rx_skbuff; +- +- priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE, +- sizeof(*priv->tx_skbuff_dma), +- GFP_KERNEL); +- if (!priv->tx_skbuff_dma) +- goto err_tx_skbuff_dma; +- +- priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *), +- GFP_KERNEL); +- if (!priv->tx_skbuff) +- goto err_tx_skbuff; +- +- if (priv->extend_desc) { +- priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE * +- sizeof(struct +- dma_extended_desc), +- &priv->dma_rx_phy, +- GFP_KERNEL); +- if (!priv->dma_erx) +- goto err_dma; + rx_q->queue_index = queue; + rx_q->priv_data = priv; -+ + +- priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE * +- sizeof(struct +- dma_extended_desc), +- &priv->dma_tx_phy, + rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, + sizeof(dma_addr_t), -+ GFP_KERNEL); + GFP_KERNEL); +- if (!priv->dma_etx) { +- dma_free_coherent(priv->device, DMA_RX_SIZE * +- sizeof(struct dma_extended_desc), +- priv->dma_erx, priv->dma_rx_phy); +- goto err_dma; +- } +- } else { +- priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE * +- sizeof(struct dma_desc), +- &priv->dma_rx_phy, +- GFP_KERNEL); +- if (!priv->dma_rx) +- goto err_dma; + if (!rx_q->rx_skbuff_dma) + return -ENOMEM; -+ + +- priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE * +- sizeof(struct dma_desc), +- &priv->dma_tx_phy, +- GFP_KERNEL); +- if (!priv->dma_tx) { +- dma_free_coherent(priv->device, DMA_RX_SIZE * +- sizeof(struct dma_desc), +- priv->dma_rx, priv->dma_rx_phy); + rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE, + sizeof(struct sk_buff *), + GFP_KERNEL); + if (!rx_q->rx_skbuff) -+ goto err_dma; + goto err_dma; + + if (priv->extend_desc) { + rx_q->dma_erx = dma_zalloc_coherent(priv->device, @@ -3505,12 +3567,19 @@ + GFP_KERNEL); + if (!rx_q->dma_rx) + goto err_dma; -+ } -+ } -+ -+ return 0; -+ -+err_dma: + } + } + + return 0; + + err_dma: +- kfree(priv->tx_skbuff); +-err_tx_skbuff: +- kfree(priv->tx_skbuff_dma); +-err_tx_skbuff_dma: +- kfree(priv->rx_skbuff); +-err_rx_skbuff: +- kfree(priv->rx_skbuff_dma); + free_dma_rx_desc_resources(priv); + + return ret; @@ -3567,7 +3636,7 @@ + GFP_KERNEL); + if (!tx_q->dma_tx) + goto err_dma_buffers; - } ++ } + } + + return 0; @@ -3575,9 +3644,9 @@ +err_dma_buffers: + free_dma_tx_desc_resources(priv); + -+ return ret; -+} -+ + return ret; + } + +/** + * alloc_dma_desc_resources - alloc TX/RX resources. + * @priv: private structure @@ -3603,119 +3672,78 @@ + * free_dma_desc_resources - free dma desc resources + * @priv: private structure + */ -+static void free_dma_desc_resources(struct stmmac_priv *priv) -+{ + static void free_dma_desc_resources(struct stmmac_priv *priv) + { +- /* Release the DMA TX/RX socket buffers */ +- dma_free_rx_skbufs(priv); +- dma_free_tx_skbufs(priv); +- +- /* Free DMA regions of consistent memory previously allocated */ +- if (!priv->extend_desc) { +- dma_free_coherent(priv->device, +- DMA_TX_SIZE * sizeof(struct dma_desc), +- priv->dma_tx, priv->dma_tx_phy); +- dma_free_coherent(priv->device, +- DMA_RX_SIZE * sizeof(struct dma_desc), +- priv->dma_rx, priv->dma_rx_phy); +- } else { +- dma_free_coherent(priv->device, DMA_TX_SIZE * +- sizeof(struct dma_extended_desc), +- priv->dma_etx, priv->dma_tx_phy); +- dma_free_coherent(priv->device, DMA_RX_SIZE * +- sizeof(struct dma_extended_desc), +- priv->dma_erx, priv->dma_rx_phy); +- } +- kfree(priv->rx_skbuff_dma); +- kfree(priv->rx_skbuff); +- kfree(priv->tx_skbuff_dma); +- kfree(priv->tx_skbuff); + /* Release the DMA RX socket buffers */ + free_dma_rx_desc_resources(priv); + + /* Release the DMA TX socket buffers */ + free_dma_tx_desc_resources(priv); -+} -+ -+/** -+ * stmmac_mac_enable_rx_queues - Enable MAC rx queues -+ * @priv: driver private structure -+ * Description: It is used for enabling the rx queues in the MAC -+ */ -+static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) -+{ + } + + /** +@@ -1256,19 +1640,104 @@ static void free_dma_desc_resources(stru + */ + static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) + { +- int rx_count = priv->dma_cap.number_rx_queues; +- int queue = 0; + u32 rx_queues_count = priv->plat->rx_queues_to_use; + int queue; + u8 mode; -- if (priv->tx_skbuff[i]) { -- dev_kfree_skb_any(priv->tx_skbuff[i]); -- priv->tx_skbuff[i] = NULL; -- priv->tx_skbuff_dma[i].buf = 0; -- priv->tx_skbuff_dma[i].map_as_page = false; -- } +- /* If GMAC does not have multiple queues, then this is not necessary*/ +- if (rx_count == 1) +- return; + for (queue = 0; queue < rx_queues_count; queue++) { + mode = priv->plat->rx_queues_cfg[queue].mode_to_use; + priv->hw->mac->rx_queue_enable(priv->hw, mode, queue); - } - } ++ } ++} - /** -- * alloc_dma_desc_resources - alloc TX/RX resources. -- * @priv: private structure -- * Description: according to which descriptor can be used (extend or basic) -- * this function allocates the resources for TX and RX paths. In case of -- * reception, for example, it pre-allocated the RX socket buffer in order to -- * allow zero-copy mechanism. +- /** +- * If the core is synthesized with multiple rx queues / multiple +- * dma channels, then rx queues will be disabled by default. +- * For now only rx queue 0 is enabled. +- */ +- priv->hw->mac->rx_queue_enable(priv->hw, queue); ++/** + * stmmac_start_rx_dma - start RX DMA channel + * @priv: driver private structure + * @chan: RX channel index + * Description: + * This starts a RX DMA channel - */ --static int alloc_dma_desc_resources(struct stmmac_priv *priv) ++ */ +static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan) - { -- int ret = -ENOMEM; -- -- priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t), -- GFP_KERNEL); -- if (!priv->rx_skbuff_dma) -- return -ENOMEM; -- -- priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *), -- GFP_KERNEL); -- if (!priv->rx_skbuff) -- goto err_rx_skbuff; -- -- priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE, -- sizeof(*priv->tx_skbuff_dma), -- GFP_KERNEL); -- if (!priv->tx_skbuff_dma) -- goto err_tx_skbuff_dma; -- -- priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *), -- GFP_KERNEL); -- if (!priv->tx_skbuff) -- goto err_tx_skbuff; -- -- if (priv->extend_desc) { -- priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE * -- sizeof(struct -- dma_extended_desc), -- &priv->dma_rx_phy, -- GFP_KERNEL); -- if (!priv->dma_erx) -- goto err_dma; -- -- priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE * -- sizeof(struct -- dma_extended_desc), -- &priv->dma_tx_phy, -- GFP_KERNEL); -- if (!priv->dma_etx) { -- dma_free_coherent(priv->device, DMA_RX_SIZE * -- sizeof(struct dma_extended_desc), -- priv->dma_erx, priv->dma_rx_phy); -- goto err_dma; -- } -- } else { -- priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE * -- sizeof(struct dma_desc), -- &priv->dma_rx_phy, -- GFP_KERNEL); -- if (!priv->dma_rx) -- goto err_dma; ++{ + netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan); + priv->hw->dma->start_rx(priv->ioaddr, chan); +} - -- priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE * -- sizeof(struct dma_desc), -- &priv->dma_tx_phy, -- GFP_KERNEL); -- if (!priv->dma_tx) { -- dma_free_coherent(priv->device, DMA_RX_SIZE * -- sizeof(struct dma_desc), -- priv->dma_rx, priv->dma_rx_phy); -- goto err_dma; -- } -- } ++ +/** + * stmmac_start_tx_dma - start TX DMA channel + * @priv: driver private structure @@ -3728,8 +3756,7 @@ + netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan); + priv->hw->dma->start_tx(priv->ioaddr, chan); +} - -- return 0; ++ +/** + * stmmac_stop_rx_dma - stop RX DMA channel + * @priv: driver private structure @@ -3742,16 +3769,7 @@ + netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan); + priv->hw->dma->stop_rx(priv->ioaddr, chan); +} - --err_dma: -- kfree(priv->tx_skbuff); --err_tx_skbuff: -- kfree(priv->tx_skbuff_dma); --err_tx_skbuff_dma: -- kfree(priv->rx_skbuff); --err_rx_skbuff: -- kfree(priv->rx_skbuff_dma); -- return ret; ++ +/** + * stmmac_stop_tx_dma - stop TX DMA channel + * @priv: driver private structure @@ -3763,9 +3781,8 @@ +{ + netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan); + priv->hw->dma->stop_tx(priv->ioaddr, chan); - } - --static void free_dma_desc_resources(struct stmmac_priv *priv) ++} ++ +/** + * stmmac_start_all_dma - start all RX and TX DMA channels + * @priv: driver private structure @@ -3773,31 +3790,7 @@ + * This starts all the RX and TX DMA channels + */ +static void stmmac_start_all_dma(struct stmmac_priv *priv) - { -- /* Release the DMA TX/RX socket buffers */ -- dma_free_rx_skbufs(priv); -- dma_free_tx_skbufs(priv); -- -- /* Free DMA regions of consistent memory previously allocated */ -- if (!priv->extend_desc) { -- dma_free_coherent(priv->device, -- DMA_TX_SIZE * sizeof(struct dma_desc), -- priv->dma_tx, priv->dma_tx_phy); -- dma_free_coherent(priv->device, -- DMA_RX_SIZE * sizeof(struct dma_desc), -- priv->dma_rx, priv->dma_rx_phy); -- } else { -- dma_free_coherent(priv->device, DMA_TX_SIZE * -- sizeof(struct dma_extended_desc), -- priv->dma_etx, priv->dma_tx_phy); -- dma_free_coherent(priv->device, DMA_RX_SIZE * -- sizeof(struct dma_extended_desc), -- priv->dma_erx, priv->dma_rx_phy); -- } -- kfree(priv->rx_skbuff_dma); -- kfree(priv->rx_skbuff); -- kfree(priv->tx_skbuff_dma); -- kfree(priv->tx_skbuff); ++{ + u32 rx_channels_count = priv->plat->rx_queues_to_use; + u32 tx_channels_count = priv->plat->tx_queues_to_use; + u32 chan = 0; @@ -3807,38 +3800,23 @@ + + for (chan = 0; chan < tx_channels_count; chan++) + stmmac_start_tx_dma(priv, chan); - } - - /** -- * stmmac_mac_enable_rx_queues - Enable MAC rx queues -- * @priv: driver private structure -- * Description: It is used for enabling the rx queues in the MAC ++} ++ ++/** + * stmmac_stop_all_dma - stop all RX and TX DMA channels + * @priv: driver private structure + * Description: + * This stops the RX and TX DMA channels - */ --static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) ++ */ +static void stmmac_stop_all_dma(struct stmmac_priv *priv) - { -- int rx_count = priv->dma_cap.number_rx_queues; -- int queue = 0; ++{ + u32 rx_channels_count = priv->plat->rx_queues_to_use; + u32 tx_channels_count = priv->plat->tx_queues_to_use; + u32 chan = 0; - -- /* If GMAC does not have multiple queues, then this is not necessary*/ -- if (rx_count == 1) -- return; ++ + for (chan = 0; chan < rx_channels_count; chan++) + stmmac_stop_rx_dma(priv, chan); - -- /** -- * If the core is synthesized with multiple rx queues / multiple -- * dma channels, then rx queues will be disabled by default. -- * For now only rx queue 0 is enabled. -- */ -- priv->hw->mac->rx_queue_enable(priv->hw, queue); ++ + for (chan = 0; chan < tx_channels_count; chan++) + stmmac_stop_tx_dma(priv, chan); }