1 // SPDX-License-Identifier: ISC
3 * Copyright (c) 2014 Broadcom Corporation
6 /*******************************************************************************
7 * Communicates with the dongle by using dcmd codes.
8 * For certain dcmd codes, the dongle interprets string data from the host.
9 ******************************************************************************/
11 #include <linux/types.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
15 #include <brcmu_utils.h>
16 #include <brcmu_wifi.h>
22 #include "commonring.h"
25 #include "tracepoint.h"
28 #define MSGBUF_IOCTL_RESP_TIMEOUT msecs_to_jiffies(2000)
30 #define MSGBUF_TYPE_GEN_STATUS 0x1
31 #define MSGBUF_TYPE_RING_STATUS 0x2
32 #define MSGBUF_TYPE_FLOW_RING_CREATE 0x3
33 #define MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT 0x4
34 #define MSGBUF_TYPE_FLOW_RING_DELETE 0x5
35 #define MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT 0x6
36 #define MSGBUF_TYPE_FLOW_RING_FLUSH 0x7
37 #define MSGBUF_TYPE_FLOW_RING_FLUSH_CMPLT 0x8
38 #define MSGBUF_TYPE_IOCTLPTR_REQ 0x9
39 #define MSGBUF_TYPE_IOCTLPTR_REQ_ACK 0xA
40 #define MSGBUF_TYPE_IOCTLRESP_BUF_POST 0xB
41 #define MSGBUF_TYPE_IOCTL_CMPLT 0xC
42 #define MSGBUF_TYPE_EVENT_BUF_POST 0xD
43 #define MSGBUF_TYPE_WL_EVENT 0xE
44 #define MSGBUF_TYPE_TX_POST 0xF
45 #define MSGBUF_TYPE_TX_STATUS 0x10
46 #define MSGBUF_TYPE_RXBUF_POST 0x11
47 #define MSGBUF_TYPE_RX_CMPLT 0x12
48 #define MSGBUF_TYPE_LPBK_DMAXFER 0x13
49 #define MSGBUF_TYPE_LPBK_DMAXFER_CMPLT 0x14
51 #define NR_TX_PKTIDS 2048
52 #define NR_RX_PKTIDS 1024
54 #define BRCMF_IOCTL_REQ_PKTID 0xFFFE
56 #define BRCMF_MSGBUF_MAX_PKT_SIZE 2048
57 #define BRCMF_MSGBUF_RXBUFPOST_THRESHOLD 32
58 #define BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST 8
59 #define BRCMF_MSGBUF_MAX_EVENTBUF_POST 8
61 #define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3 0x01
62 #define BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_11 0x02
63 #define BRCMF_MSGBUF_PKT_FLAGS_FRAME_MASK 0x07
64 #define BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT 5
66 #define BRCMF_MSGBUF_TX_FLUSH_CNT1 32
67 #define BRCMF_MSGBUF_TX_FLUSH_CNT2 96
69 #define BRCMF_MSGBUF_DELAY_TXWORKER_THRS 96
70 #define BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS 32
71 #define BRCMF_MSGBUF_UPDATE_RX_PTR_THRS 48
74 struct msgbuf_common_hdr {
82 struct msgbuf_ioctl_req_hdr {
83 struct msgbuf_common_hdr msg;
87 __le16 output_buf_len;
89 struct msgbuf_buf_addr req_buf_addr;
93 struct msgbuf_tx_msghdr {
94 struct msgbuf_common_hdr msg;
98 struct msgbuf_buf_addr metadata_buf_addr;
99 struct msgbuf_buf_addr data_buf_addr;
100 __le16 metadata_buf_len;
105 struct msgbuf_rx_bufpost {
106 struct msgbuf_common_hdr msg;
107 __le16 metadata_buf_len;
110 struct msgbuf_buf_addr metadata_buf_addr;
111 struct msgbuf_buf_addr data_buf_addr;
114 struct msgbuf_rx_ioctl_resp_or_event {
115 struct msgbuf_common_hdr msg;
118 struct msgbuf_buf_addr host_buf_addr;
122 struct msgbuf_completion_hdr {
127 /* Data struct for the MSGBUF_TYPE_GEN_STATUS */
128 struct msgbuf_gen_status {
129 struct msgbuf_common_hdr msg;
130 struct msgbuf_completion_hdr compl_hdr;
135 /* Data struct for the MSGBUF_TYPE_RING_STATUS */
136 struct msgbuf_ring_status {
137 struct msgbuf_common_hdr msg;
138 struct msgbuf_completion_hdr compl_hdr;
143 struct msgbuf_rx_event {
144 struct msgbuf_common_hdr msg;
145 struct msgbuf_completion_hdr compl_hdr;
146 __le16 event_data_len;
151 struct msgbuf_ioctl_resp_hdr {
152 struct msgbuf_common_hdr msg;
153 struct msgbuf_completion_hdr compl_hdr;
160 struct msgbuf_tx_status {
161 struct msgbuf_common_hdr msg;
162 struct msgbuf_completion_hdr compl_hdr;
167 struct msgbuf_rx_complete {
168 struct msgbuf_common_hdr msg;
169 struct msgbuf_completion_hdr compl_hdr;
179 struct msgbuf_tx_flowring_create_req {
180 struct msgbuf_common_hdr msg;
191 struct msgbuf_buf_addr flow_ring_addr;
194 struct msgbuf_tx_flowring_delete_req {
195 struct msgbuf_common_hdr msg;
201 struct msgbuf_flowring_create_resp {
202 struct msgbuf_common_hdr msg;
203 struct msgbuf_completion_hdr compl_hdr;
207 struct msgbuf_flowring_delete_resp {
208 struct msgbuf_common_hdr msg;
209 struct msgbuf_completion_hdr compl_hdr;
213 struct msgbuf_flowring_flush_resp {
214 struct msgbuf_common_hdr msg;
215 struct msgbuf_completion_hdr compl_hdr;
219 struct brcmf_msgbuf_work_item {
220 struct list_head queue;
227 struct brcmf_msgbuf {
228 struct brcmf_pub *drvr;
230 struct brcmf_commonring **commonrings;
231 struct brcmf_commonring **flowrings;
232 dma_addr_t *flowring_dma_handle;
235 u16 max_submissionrings;
236 u16 max_completionrings;
240 u16 rx_metadata_offset;
243 u32 max_ioctlrespbuf;
244 u32 cur_ioctlrespbuf;
249 dma_addr_t ioctbuf_handle;
252 int ioctl_resp_status;
253 u32 ioctl_resp_ret_len;
254 u32 ioctl_resp_pktid;
259 wait_queue_head_t ioctl_resp_wait;
262 struct brcmf_msgbuf_pktids *tx_pktids;
263 struct brcmf_msgbuf_pktids *rx_pktids;
264 struct brcmf_flowring *flow;
266 struct workqueue_struct *txflow_wq;
267 struct work_struct txflow_work;
268 unsigned long *flow_map;
269 unsigned long *txstatus_done_map;
271 struct work_struct flowring_work;
272 spinlock_t flowring_work_lock;
273 struct list_head work_queue;
276 struct brcmf_msgbuf_pktid {
283 struct brcmf_msgbuf_pktids {
285 u32 last_allocated_idx;
286 enum dma_data_direction direction;
287 struct brcmf_msgbuf_pktid *array;
290 static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf);
293 static struct brcmf_msgbuf_pktids *
294 brcmf_msgbuf_init_pktids(u32 nr_array_entries,
295 enum dma_data_direction direction)
297 struct brcmf_msgbuf_pktid *array;
298 struct brcmf_msgbuf_pktids *pktids;
300 array = kcalloc(nr_array_entries, sizeof(*array), GFP_KERNEL);
304 pktids = kzalloc(sizeof(*pktids), GFP_KERNEL);
309 pktids->array = array;
310 pktids->array_size = nr_array_entries;
317 brcmf_msgbuf_alloc_pktid(struct device *dev,
318 struct brcmf_msgbuf_pktids *pktids,
319 struct sk_buff *skb, u16 data_offset,
320 dma_addr_t *physaddr, u32 *idx)
322 struct brcmf_msgbuf_pktid *array;
325 array = pktids->array;
327 *physaddr = dma_map_single(dev, skb->data + data_offset,
328 skb->len - data_offset, pktids->direction);
330 if (dma_mapping_error(dev, *physaddr)) {
331 brcmf_err("dma_map_single failed !!\n");
335 *idx = pktids->last_allocated_idx;
340 if (*idx == pktids->array_size)
342 if (array[*idx].allocated.counter == 0)
343 if (atomic_cmpxchg(&array[*idx].allocated, 0, 1) == 0)
346 } while (count < pktids->array_size);
348 if (count == pktids->array_size)
351 array[*idx].data_offset = data_offset;
352 array[*idx].physaddr = *physaddr;
353 array[*idx].skb = skb;
355 pktids->last_allocated_idx = *idx;
361 static struct sk_buff *
362 brcmf_msgbuf_get_pktid(struct device *dev, struct brcmf_msgbuf_pktids *pktids,
365 struct brcmf_msgbuf_pktid *pktid;
368 if (idx < 0 || idx >= pktids->array_size) {
369 brcmf_err("Invalid packet id %d (max %d)\n", idx,
373 if (pktids->array[idx].allocated.counter) {
374 pktid = &pktids->array[idx];
375 dma_unmap_single(dev, pktid->physaddr,
376 pktid->skb->len - pktid->data_offset,
379 pktid->allocated.counter = 0;
382 brcmf_err("Invalid packet id %d (not in use)\n", idx);
390 brcmf_msgbuf_release_array(struct device *dev,
391 struct brcmf_msgbuf_pktids *pktids)
393 struct brcmf_msgbuf_pktid *array;
394 struct brcmf_msgbuf_pktid *pktid;
397 array = pktids->array;
400 if (array[count].allocated.counter) {
401 pktid = &array[count];
402 dma_unmap_single(dev, pktid->physaddr,
403 pktid->skb->len - pktid->data_offset,
405 brcmu_pkt_buf_free_skb(pktid->skb);
408 } while (count < pktids->array_size);
415 static void brcmf_msgbuf_release_pktids(struct brcmf_msgbuf *msgbuf)
417 if (msgbuf->rx_pktids)
418 brcmf_msgbuf_release_array(msgbuf->drvr->bus_if->dev,
420 if (msgbuf->tx_pktids)
421 brcmf_msgbuf_release_array(msgbuf->drvr->bus_if->dev,
426 static int brcmf_msgbuf_tx_ioctl(struct brcmf_pub *drvr, int ifidx,
427 uint cmd, void *buf, uint len)
429 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
430 struct brcmf_commonring *commonring;
431 struct msgbuf_ioctl_req_hdr *request;
436 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
437 brcmf_commonring_lock(commonring);
438 ret_ptr = brcmf_commonring_reserve_for_write(commonring);
440 bphy_err(drvr, "Failed to reserve space in commonring\n");
441 brcmf_commonring_unlock(commonring);
447 request = (struct msgbuf_ioctl_req_hdr *)ret_ptr;
448 request->msg.msgtype = MSGBUF_TYPE_IOCTLPTR_REQ;
449 request->msg.ifidx = (u8)ifidx;
450 request->msg.flags = 0;
451 request->msg.request_id = cpu_to_le32(BRCMF_IOCTL_REQ_PKTID);
452 request->cmd = cpu_to_le32(cmd);
453 request->output_buf_len = cpu_to_le16(len);
454 request->trans_id = cpu_to_le16(msgbuf->reqid);
456 buf_len = min_t(u16, len, BRCMF_TX_IOCTL_MAX_MSG_SIZE);
457 request->input_buf_len = cpu_to_le16(buf_len);
458 request->req_buf_addr.high_addr = cpu_to_le32(msgbuf->ioctbuf_phys_hi);
459 request->req_buf_addr.low_addr = cpu_to_le32(msgbuf->ioctbuf_phys_lo);
461 memcpy(msgbuf->ioctbuf, buf, buf_len);
463 memset(msgbuf->ioctbuf, 0, buf_len);
465 err = brcmf_commonring_write_complete(commonring);
466 brcmf_commonring_unlock(commonring);
472 static int brcmf_msgbuf_ioctl_resp_wait(struct brcmf_msgbuf *msgbuf)
474 return wait_event_timeout(msgbuf->ioctl_resp_wait,
475 msgbuf->ctl_completed,
476 MSGBUF_IOCTL_RESP_TIMEOUT);
480 static void brcmf_msgbuf_ioctl_resp_wake(struct brcmf_msgbuf *msgbuf)
482 msgbuf->ctl_completed = true;
483 wake_up(&msgbuf->ioctl_resp_wait);
487 static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx,
488 uint cmd, void *buf, uint len, int *fwerr)
490 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
491 struct sk_buff *skb = NULL;
495 brcmf_dbg(MSGBUF, "ifidx=%d, cmd=%d, len=%d\n", ifidx, cmd, len);
497 msgbuf->ctl_completed = false;
498 err = brcmf_msgbuf_tx_ioctl(drvr, ifidx, cmd, buf, len);
502 timeout = brcmf_msgbuf_ioctl_resp_wait(msgbuf);
504 bphy_err(drvr, "Timeout on response for query command\n");
508 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
510 msgbuf->ioctl_resp_pktid);
511 if (msgbuf->ioctl_resp_ret_len != 0) {
515 memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ?
516 len : msgbuf->ioctl_resp_ret_len);
518 brcmu_pkt_buf_free_skb(skb);
520 *fwerr = msgbuf->ioctl_resp_status;
525 static int brcmf_msgbuf_set_dcmd(struct brcmf_pub *drvr, int ifidx,
526 uint cmd, void *buf, uint len, int *fwerr)
528 return brcmf_msgbuf_query_dcmd(drvr, ifidx, cmd, buf, len, fwerr);
532 static int brcmf_msgbuf_hdrpull(struct brcmf_pub *drvr, bool do_fws,
533 struct sk_buff *skb, struct brcmf_if **ifp)
538 static void brcmf_msgbuf_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb)
543 brcmf_msgbuf_remove_flowring(struct brcmf_msgbuf *msgbuf, u16 flowid)
548 brcmf_dbg(MSGBUF, "Removing flowring %d\n", flowid);
550 dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE;
551 dma_buf = msgbuf->flowrings[flowid]->buf_addr;
552 dma_free_coherent(msgbuf->drvr->bus_if->dev, dma_sz, dma_buf,
553 msgbuf->flowring_dma_handle[flowid]);
555 brcmf_flowring_delete(msgbuf->flow, flowid);
559 static struct brcmf_msgbuf_work_item *
560 brcmf_msgbuf_dequeue_work(struct brcmf_msgbuf *msgbuf)
562 struct brcmf_msgbuf_work_item *work = NULL;
565 spin_lock_irqsave(&msgbuf->flowring_work_lock, flags);
566 if (!list_empty(&msgbuf->work_queue)) {
567 work = list_first_entry(&msgbuf->work_queue,
568 struct brcmf_msgbuf_work_item, queue);
569 list_del(&work->queue);
571 spin_unlock_irqrestore(&msgbuf->flowring_work_lock, flags);
578 brcmf_msgbuf_flowring_create_worker(struct brcmf_msgbuf *msgbuf,
579 struct brcmf_msgbuf_work_item *work)
581 struct brcmf_pub *drvr = msgbuf->drvr;
582 struct msgbuf_tx_flowring_create_req *create;
583 struct brcmf_commonring *commonring;
591 flowid = work->flowid;
592 dma_sz = BRCMF_H2D_TXFLOWRING_MAX_ITEM * BRCMF_H2D_TXFLOWRING_ITEMSIZE;
593 dma_buf = dma_alloc_coherent(msgbuf->drvr->bus_if->dev, dma_sz,
594 &msgbuf->flowring_dma_handle[flowid],
597 bphy_err(drvr, "dma_alloc_coherent failed\n");
598 brcmf_flowring_delete(msgbuf->flow, flowid);
599 return BRCMF_FLOWRING_INVALID_ID;
602 brcmf_commonring_config(msgbuf->flowrings[flowid],
603 BRCMF_H2D_TXFLOWRING_MAX_ITEM,
604 BRCMF_H2D_TXFLOWRING_ITEMSIZE, dma_buf);
606 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
607 brcmf_commonring_lock(commonring);
608 ret_ptr = brcmf_commonring_reserve_for_write(commonring);
610 bphy_err(drvr, "Failed to reserve space in commonring\n");
611 brcmf_commonring_unlock(commonring);
612 brcmf_msgbuf_remove_flowring(msgbuf, flowid);
613 return BRCMF_FLOWRING_INVALID_ID;
616 create = (struct msgbuf_tx_flowring_create_req *)ret_ptr;
617 create->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE;
618 create->msg.ifidx = work->ifidx;
619 create->msg.request_id = 0;
620 create->tid = brcmf_flowring_tid(msgbuf->flow, flowid);
621 create->flow_ring_id = cpu_to_le16(flowid +
622 BRCMF_H2D_MSGRING_FLOWRING_IDSTART);
623 memcpy(create->sa, work->sa, ETH_ALEN);
624 memcpy(create->da, work->da, ETH_ALEN);
625 address = (u64)msgbuf->flowring_dma_handle[flowid];
626 create->flow_ring_addr.high_addr = cpu_to_le32(address >> 32);
627 create->flow_ring_addr.low_addr = cpu_to_le32(address & 0xffffffff);
628 create->max_items = cpu_to_le16(BRCMF_H2D_TXFLOWRING_MAX_ITEM);
629 create->len_item = cpu_to_le16(BRCMF_H2D_TXFLOWRING_ITEMSIZE);
631 brcmf_dbg(MSGBUF, "Send Flow Create Req flow ID %d for peer %pM prio %d ifindex %d\n",
632 flowid, work->da, create->tid, work->ifidx);
634 err = brcmf_commonring_write_complete(commonring);
635 brcmf_commonring_unlock(commonring);
637 bphy_err(drvr, "Failed to write commonring\n");
638 brcmf_msgbuf_remove_flowring(msgbuf, flowid);
639 return BRCMF_FLOWRING_INVALID_ID;
646 static void brcmf_msgbuf_flowring_worker(struct work_struct *work)
648 struct brcmf_msgbuf *msgbuf;
649 struct brcmf_msgbuf_work_item *create;
651 msgbuf = container_of(work, struct brcmf_msgbuf, flowring_work);
653 while ((create = brcmf_msgbuf_dequeue_work(msgbuf))) {
654 brcmf_msgbuf_flowring_create_worker(msgbuf, create);
660 static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx,
663 struct brcmf_msgbuf_work_item *create;
664 struct ethhdr *eh = (struct ethhdr *)(skb->data);
668 create = kzalloc(sizeof(*create), GFP_ATOMIC);
670 return BRCMF_FLOWRING_INVALID_ID;
672 flowid = brcmf_flowring_create(msgbuf->flow, eh->h_dest,
673 skb->priority, ifidx);
674 if (flowid == BRCMF_FLOWRING_INVALID_ID) {
679 create->flowid = flowid;
680 create->ifidx = ifidx;
681 memcpy(create->sa, eh->h_source, ETH_ALEN);
682 memcpy(create->da, eh->h_dest, ETH_ALEN);
684 spin_lock_irqsave(&msgbuf->flowring_work_lock, flags);
685 list_add_tail(&create->queue, &msgbuf->work_queue);
686 spin_unlock_irqrestore(&msgbuf->flowring_work_lock, flags);
687 schedule_work(&msgbuf->flowring_work);
693 static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u16 flowid)
695 struct brcmf_flowring *flow = msgbuf->flow;
696 struct brcmf_pub *drvr = msgbuf->drvr;
697 struct brcmf_commonring *commonring;
703 struct msgbuf_tx_msghdr *tx_msghdr;
706 commonring = msgbuf->flowrings[flowid];
707 if (!brcmf_commonring_write_available(commonring))
710 brcmf_commonring_lock(commonring);
712 count = BRCMF_MSGBUF_TX_FLUSH_CNT2 - BRCMF_MSGBUF_TX_FLUSH_CNT1;
713 while (brcmf_flowring_qlen(flow, flowid)) {
714 skb = brcmf_flowring_dequeue(flow, flowid);
716 bphy_err(drvr, "No SKB, but qlen %d\n",
717 brcmf_flowring_qlen(flow, flowid));
721 if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev,
722 msgbuf->tx_pktids, skb, ETH_HLEN,
723 &physaddr, &pktid)) {
724 brcmf_flowring_reinsert(flow, flowid, skb);
725 bphy_err(drvr, "No PKTID available !!\n");
728 ret_ptr = brcmf_commonring_reserve_for_write(commonring);
730 brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
731 msgbuf->tx_pktids, pktid);
732 brcmf_flowring_reinsert(flow, flowid, skb);
737 tx_msghdr = (struct msgbuf_tx_msghdr *)ret_ptr;
739 tx_msghdr->msg.msgtype = MSGBUF_TYPE_TX_POST;
740 tx_msghdr->msg.request_id = cpu_to_le32(pktid + 1);
741 tx_msghdr->msg.ifidx = brcmf_flowring_ifidx_get(flow, flowid);
742 tx_msghdr->flags = BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3;
743 tx_msghdr->flags |= (skb->priority & 0x07) <<
744 BRCMF_MSGBUF_PKT_FLAGS_PRIO_SHIFT;
745 tx_msghdr->seg_cnt = 1;
746 memcpy(tx_msghdr->txhdr, skb->data, ETH_HLEN);
747 tx_msghdr->data_len = cpu_to_le16(skb->len - ETH_HLEN);
748 address = (u64)physaddr;
749 tx_msghdr->data_buf_addr.high_addr = cpu_to_le32(address >> 32);
750 tx_msghdr->data_buf_addr.low_addr =
751 cpu_to_le32(address & 0xffffffff);
752 tx_msghdr->metadata_buf_len = 0;
753 tx_msghdr->metadata_buf_addr.high_addr = 0;
754 tx_msghdr->metadata_buf_addr.low_addr = 0;
755 atomic_inc(&commonring->outstanding_tx);
756 if (count >= BRCMF_MSGBUF_TX_FLUSH_CNT2) {
757 brcmf_commonring_write_complete(commonring);
762 brcmf_commonring_write_complete(commonring);
763 brcmf_commonring_unlock(commonring);
767 static void brcmf_msgbuf_txflow_worker(struct work_struct *worker)
769 struct brcmf_msgbuf *msgbuf;
772 msgbuf = container_of(worker, struct brcmf_msgbuf, txflow_work);
773 for_each_set_bit(flowid, msgbuf->flow_map, msgbuf->max_flowrings) {
774 clear_bit(flowid, msgbuf->flow_map);
775 brcmf_msgbuf_txflow(msgbuf, flowid);
780 static int brcmf_msgbuf_schedule_txdata(struct brcmf_msgbuf *msgbuf, u32 flowid,
783 struct brcmf_commonring *commonring;
785 set_bit(flowid, msgbuf->flow_map);
786 commonring = msgbuf->flowrings[flowid];
787 if ((force) || (atomic_read(&commonring->outstanding_tx) <
788 BRCMF_MSGBUF_DELAY_TXWORKER_THRS))
789 queue_work(msgbuf->txflow_wq, &msgbuf->txflow_work);
795 static int brcmf_msgbuf_tx_queue_data(struct brcmf_pub *drvr, int ifidx,
798 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
799 struct brcmf_flowring *flow = msgbuf->flow;
800 struct ethhdr *eh = (struct ethhdr *)(skb->data);
805 flowid = brcmf_flowring_lookup(flow, eh->h_dest, skb->priority, ifidx);
806 if (flowid == BRCMF_FLOWRING_INVALID_ID) {
807 flowid = brcmf_msgbuf_flowring_create(msgbuf, ifidx, skb);
808 if (flowid == BRCMF_FLOWRING_INVALID_ID)
811 queue_count = brcmf_flowring_enqueue(flow, flowid, skb);
812 force = ((queue_count % BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) == 0);
813 brcmf_msgbuf_schedule_txdata(msgbuf, flowid, force);
820 brcmf_msgbuf_configure_addr_mode(struct brcmf_pub *drvr, int ifidx,
821 enum proto_addr_mode addr_mode)
823 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
825 brcmf_flowring_configure_addr_mode(msgbuf->flow, ifidx, addr_mode);
830 brcmf_msgbuf_delete_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN])
832 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
834 brcmf_flowring_delete_peer(msgbuf->flow, ifidx, peer);
839 brcmf_msgbuf_add_tdls_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN])
841 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
843 brcmf_flowring_add_tdls_peer(msgbuf->flow, ifidx, peer);
848 brcmf_msgbuf_process_ioctl_complete(struct brcmf_msgbuf *msgbuf, void *buf)
850 struct msgbuf_ioctl_resp_hdr *ioctl_resp;
852 ioctl_resp = (struct msgbuf_ioctl_resp_hdr *)buf;
854 msgbuf->ioctl_resp_status =
855 (s16)le16_to_cpu(ioctl_resp->compl_hdr.status);
856 msgbuf->ioctl_resp_ret_len = le16_to_cpu(ioctl_resp->resp_len);
857 msgbuf->ioctl_resp_pktid = le32_to_cpu(ioctl_resp->msg.request_id);
859 brcmf_msgbuf_ioctl_resp_wake(msgbuf);
861 if (msgbuf->cur_ioctlrespbuf)
862 msgbuf->cur_ioctlrespbuf--;
863 brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf);
868 brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf)
870 struct brcmf_commonring *commonring;
871 struct msgbuf_tx_status *tx_status;
876 tx_status = (struct msgbuf_tx_status *)buf;
877 idx = le32_to_cpu(tx_status->msg.request_id) - 1;
878 flowid = le16_to_cpu(tx_status->compl_hdr.flow_ring_id);
879 flowid -= BRCMF_H2D_MSGRING_FLOWRING_IDSTART;
880 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
881 msgbuf->tx_pktids, idx);
885 set_bit(flowid, msgbuf->txstatus_done_map);
886 commonring = msgbuf->flowrings[flowid];
887 atomic_dec(&commonring->outstanding_tx);
889 brcmf_txfinalize(brcmf_get_ifp(msgbuf->drvr, tx_status->msg.ifidx),
894 static u32 brcmf_msgbuf_rxbuf_data_post(struct brcmf_msgbuf *msgbuf, u32 count)
896 struct brcmf_pub *drvr = msgbuf->drvr;
897 struct brcmf_commonring *commonring;
903 struct msgbuf_rx_bufpost *rx_bufpost;
908 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_RXPOST_SUBMIT];
909 ret_ptr = brcmf_commonring_reserve_for_write_multiple(commonring,
913 brcmf_dbg(MSGBUF, "Failed to reserve space in commonring\n");
917 for (i = 0; i < alloced; i++) {
918 rx_bufpost = (struct msgbuf_rx_bufpost *)ret_ptr;
919 memset(rx_bufpost, 0, sizeof(*rx_bufpost));
921 skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE);
924 bphy_err(drvr, "Failed to alloc SKB\n");
925 brcmf_commonring_write_cancel(commonring, alloced - i);
930 if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev,
931 msgbuf->rx_pktids, skb, 0,
932 &physaddr, &pktid)) {
933 dev_kfree_skb_any(skb);
934 bphy_err(drvr, "No PKTID available !!\n");
935 brcmf_commonring_write_cancel(commonring, alloced - i);
939 if (msgbuf->rx_metadata_offset) {
940 address = (u64)physaddr;
941 rx_bufpost->metadata_buf_len =
942 cpu_to_le16(msgbuf->rx_metadata_offset);
943 rx_bufpost->metadata_buf_addr.high_addr =
944 cpu_to_le32(address >> 32);
945 rx_bufpost->metadata_buf_addr.low_addr =
946 cpu_to_le32(address & 0xffffffff);
948 skb_pull(skb, msgbuf->rx_metadata_offset);
950 physaddr += msgbuf->rx_metadata_offset;
952 rx_bufpost->msg.msgtype = MSGBUF_TYPE_RXBUF_POST;
953 rx_bufpost->msg.request_id = cpu_to_le32(pktid);
955 address = (u64)physaddr;
956 rx_bufpost->data_buf_len = cpu_to_le16((u16)pktlen);
957 rx_bufpost->data_buf_addr.high_addr =
958 cpu_to_le32(address >> 32);
959 rx_bufpost->data_buf_addr.low_addr =
960 cpu_to_le32(address & 0xffffffff);
962 ret_ptr += brcmf_commonring_len_item(commonring);
966 brcmf_commonring_write_complete(commonring);
973 brcmf_msgbuf_rxbuf_data_fill(struct brcmf_msgbuf *msgbuf)
978 fillbufs = msgbuf->max_rxbufpost - msgbuf->rxbufpost;
981 retcount = brcmf_msgbuf_rxbuf_data_post(msgbuf, fillbufs);
984 msgbuf->rxbufpost += retcount;
985 fillbufs -= retcount;
991 brcmf_msgbuf_update_rxbufpost_count(struct brcmf_msgbuf *msgbuf, u16 rxcnt)
993 msgbuf->rxbufpost -= rxcnt;
994 if (msgbuf->rxbufpost <= (msgbuf->max_rxbufpost -
995 BRCMF_MSGBUF_RXBUFPOST_THRESHOLD))
996 brcmf_msgbuf_rxbuf_data_fill(msgbuf);
1001 brcmf_msgbuf_rxbuf_ctrl_post(struct brcmf_msgbuf *msgbuf, bool event_buf,
1004 struct brcmf_pub *drvr = msgbuf->drvr;
1005 struct brcmf_commonring *commonring;
1007 struct sk_buff *skb;
1010 dma_addr_t physaddr;
1011 struct msgbuf_rx_ioctl_resp_or_event *rx_bufpost;
1016 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
1017 brcmf_commonring_lock(commonring);
1018 ret_ptr = brcmf_commonring_reserve_for_write_multiple(commonring,
1022 bphy_err(drvr, "Failed to reserve space in commonring\n");
1023 brcmf_commonring_unlock(commonring);
1027 for (i = 0; i < alloced; i++) {
1028 rx_bufpost = (struct msgbuf_rx_ioctl_resp_or_event *)ret_ptr;
1029 memset(rx_bufpost, 0, sizeof(*rx_bufpost));
1031 skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE);
1034 bphy_err(drvr, "Failed to alloc SKB\n");
1035 brcmf_commonring_write_cancel(commonring, alloced - i);
1040 if (brcmf_msgbuf_alloc_pktid(msgbuf->drvr->bus_if->dev,
1041 msgbuf->rx_pktids, skb, 0,
1042 &physaddr, &pktid)) {
1043 dev_kfree_skb_any(skb);
1044 bphy_err(drvr, "No PKTID available !!\n");
1045 brcmf_commonring_write_cancel(commonring, alloced - i);
1049 rx_bufpost->msg.msgtype = MSGBUF_TYPE_EVENT_BUF_POST;
1051 rx_bufpost->msg.msgtype =
1052 MSGBUF_TYPE_IOCTLRESP_BUF_POST;
1053 rx_bufpost->msg.request_id = cpu_to_le32(pktid);
1055 address = (u64)physaddr;
1056 rx_bufpost->host_buf_len = cpu_to_le16((u16)pktlen);
1057 rx_bufpost->host_buf_addr.high_addr =
1058 cpu_to_le32(address >> 32);
1059 rx_bufpost->host_buf_addr.low_addr =
1060 cpu_to_le32(address & 0xffffffff);
1062 ret_ptr += brcmf_commonring_len_item(commonring);
1066 brcmf_commonring_write_complete(commonring);
1068 brcmf_commonring_unlock(commonring);
1074 static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf)
1078 count = msgbuf->max_ioctlrespbuf - msgbuf->cur_ioctlrespbuf;
1079 count = brcmf_msgbuf_rxbuf_ctrl_post(msgbuf, false, count);
1080 msgbuf->cur_ioctlrespbuf += count;
1084 static void brcmf_msgbuf_rxbuf_event_post(struct brcmf_msgbuf *msgbuf)
1088 count = msgbuf->max_eventbuf - msgbuf->cur_eventbuf;
1089 count = brcmf_msgbuf_rxbuf_ctrl_post(msgbuf, true, count);
1090 msgbuf->cur_eventbuf += count;
1094 static void brcmf_msgbuf_process_event(struct brcmf_msgbuf *msgbuf, void *buf)
1096 struct brcmf_pub *drvr = msgbuf->drvr;
1097 struct msgbuf_rx_event *event;
1100 struct sk_buff *skb;
1101 struct brcmf_if *ifp;
1103 event = (struct msgbuf_rx_event *)buf;
1104 idx = le32_to_cpu(event->msg.request_id);
1105 buflen = le16_to_cpu(event->event_data_len);
1107 if (msgbuf->cur_eventbuf)
1108 msgbuf->cur_eventbuf--;
1109 brcmf_msgbuf_rxbuf_event_post(msgbuf);
1111 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
1112 msgbuf->rx_pktids, idx);
1116 if (msgbuf->rx_dataoffset)
1117 skb_pull(skb, msgbuf->rx_dataoffset);
1119 skb_trim(skb, buflen);
1121 ifp = brcmf_get_ifp(msgbuf->drvr, event->msg.ifidx);
1122 if (!ifp || !ifp->ndev) {
1123 bphy_err(drvr, "Received pkt for invalid ifidx %d\n",
1128 skb->protocol = eth_type_trans(skb, ifp->ndev);
1130 brcmf_fweh_process_skb(ifp->drvr, skb, 0);
1133 brcmu_pkt_buf_free_skb(skb);
1138 brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
1140 struct brcmf_pub *drvr = msgbuf->drvr;
1141 struct msgbuf_rx_complete *rx_complete;
1142 struct sk_buff *skb;
1147 struct brcmf_if *ifp;
1149 brcmf_msgbuf_update_rxbufpost_count(msgbuf, 1);
1151 rx_complete = (struct msgbuf_rx_complete *)buf;
1152 data_offset = le16_to_cpu(rx_complete->data_offset);
1153 buflen = le16_to_cpu(rx_complete->data_len);
1154 idx = le32_to_cpu(rx_complete->msg.request_id);
1155 flags = le16_to_cpu(rx_complete->flags);
1157 skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
1158 msgbuf->rx_pktids, idx);
1163 skb_pull(skb, data_offset);
1164 else if (msgbuf->rx_dataoffset)
1165 skb_pull(skb, msgbuf->rx_dataoffset);
1167 skb_trim(skb, buflen);
1169 if ((flags & BRCMF_MSGBUF_PKT_FLAGS_FRAME_MASK) ==
1170 BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_11) {
1171 ifp = msgbuf->drvr->mon_if;
1174 bphy_err(drvr, "Received unexpected monitor pkt\n");
1175 brcmu_pkt_buf_free_skb(skb);
1179 brcmf_netif_mon_rx(ifp, skb);
1183 ifp = brcmf_get_ifp(msgbuf->drvr, rx_complete->msg.ifidx);
1184 if (!ifp || !ifp->ndev) {
1185 bphy_err(drvr, "Received pkt for invalid ifidx %d\n",
1186 rx_complete->msg.ifidx);
1187 brcmu_pkt_buf_free_skb(skb);
1191 skb->protocol = eth_type_trans(skb, ifp->ndev);
1192 brcmf_netif_rx(ifp, skb);
1195 static void brcmf_msgbuf_process_gen_status(struct brcmf_msgbuf *msgbuf,
1198 struct msgbuf_gen_status *gen_status = buf;
1199 struct brcmf_pub *drvr = msgbuf->drvr;
1202 err = le16_to_cpu(gen_status->compl_hdr.status);
1204 bphy_err(drvr, "Firmware reported general error: %d\n", err);
1207 static void brcmf_msgbuf_process_ring_status(struct brcmf_msgbuf *msgbuf,
1210 struct msgbuf_ring_status *ring_status = buf;
1211 struct brcmf_pub *drvr = msgbuf->drvr;
1214 err = le16_to_cpu(ring_status->compl_hdr.status);
1216 int ring = le16_to_cpu(ring_status->compl_hdr.flow_ring_id);
1218 bphy_err(drvr, "Firmware reported ring %d error: %d\n", ring,
1224 brcmf_msgbuf_process_flow_ring_create_response(struct brcmf_msgbuf *msgbuf,
1227 struct brcmf_pub *drvr = msgbuf->drvr;
1228 struct msgbuf_flowring_create_resp *flowring_create_resp;
1232 flowring_create_resp = (struct msgbuf_flowring_create_resp *)buf;
1234 flowid = le16_to_cpu(flowring_create_resp->compl_hdr.flow_ring_id);
1235 flowid -= BRCMF_H2D_MSGRING_FLOWRING_IDSTART;
1236 status = le16_to_cpu(flowring_create_resp->compl_hdr.status);
1239 bphy_err(drvr, "Flowring creation failed, code %d\n", status);
1240 brcmf_msgbuf_remove_flowring(msgbuf, flowid);
1243 brcmf_dbg(MSGBUF, "Flowring %d Create response status %d\n", flowid,
1246 brcmf_flowring_open(msgbuf->flow, flowid);
1248 brcmf_msgbuf_schedule_txdata(msgbuf, flowid, true);
1253 brcmf_msgbuf_process_flow_ring_delete_response(struct brcmf_msgbuf *msgbuf,
1256 struct brcmf_pub *drvr = msgbuf->drvr;
1257 struct msgbuf_flowring_delete_resp *flowring_delete_resp;
1261 flowring_delete_resp = (struct msgbuf_flowring_delete_resp *)buf;
1263 flowid = le16_to_cpu(flowring_delete_resp->compl_hdr.flow_ring_id);
1264 flowid -= BRCMF_H2D_MSGRING_FLOWRING_IDSTART;
1265 status = le16_to_cpu(flowring_delete_resp->compl_hdr.status);
1268 bphy_err(drvr, "Flowring deletion failed, code %d\n", status);
1269 brcmf_flowring_delete(msgbuf->flow, flowid);
1272 brcmf_dbg(MSGBUF, "Flowring %d Delete response status %d\n", flowid,
1275 brcmf_msgbuf_remove_flowring(msgbuf, flowid);
1279 static void brcmf_msgbuf_process_msgtype(struct brcmf_msgbuf *msgbuf, void *buf)
1281 struct brcmf_pub *drvr = msgbuf->drvr;
1282 struct msgbuf_common_hdr *msg;
1284 msg = (struct msgbuf_common_hdr *)buf;
1285 switch (msg->msgtype) {
1286 case MSGBUF_TYPE_GEN_STATUS:
1287 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_GEN_STATUS\n");
1288 brcmf_msgbuf_process_gen_status(msgbuf, buf);
1290 case MSGBUF_TYPE_RING_STATUS:
1291 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_RING_STATUS\n");
1292 brcmf_msgbuf_process_ring_status(msgbuf, buf);
1294 case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT:
1295 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT\n");
1296 brcmf_msgbuf_process_flow_ring_create_response(msgbuf, buf);
1298 case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT:
1299 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT\n");
1300 brcmf_msgbuf_process_flow_ring_delete_response(msgbuf, buf);
1302 case MSGBUF_TYPE_IOCTLPTR_REQ_ACK:
1303 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_IOCTLPTR_REQ_ACK\n");
1305 case MSGBUF_TYPE_IOCTL_CMPLT:
1306 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_IOCTL_CMPLT\n");
1307 brcmf_msgbuf_process_ioctl_complete(msgbuf, buf);
1309 case MSGBUF_TYPE_WL_EVENT:
1310 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_WL_EVENT\n");
1311 brcmf_msgbuf_process_event(msgbuf, buf);
1313 case MSGBUF_TYPE_TX_STATUS:
1314 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_TX_STATUS\n");
1315 brcmf_msgbuf_process_txstatus(msgbuf, buf);
1317 case MSGBUF_TYPE_RX_CMPLT:
1318 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_RX_CMPLT\n");
1319 brcmf_msgbuf_process_rx_complete(msgbuf, buf);
1322 bphy_err(drvr, "Unsupported msgtype %d\n", msg->msgtype);
1328 static void brcmf_msgbuf_process_rx(struct brcmf_msgbuf *msgbuf,
1329 struct brcmf_commonring *commonring)
1336 buf = brcmf_commonring_get_read_ptr(commonring, &count);
1342 brcmf_msgbuf_process_msgtype(msgbuf,
1343 buf + msgbuf->rx_dataoffset);
1344 buf += brcmf_commonring_len_item(commonring);
1346 if (processed == BRCMF_MSGBUF_UPDATE_RX_PTR_THRS) {
1347 brcmf_commonring_read_complete(commonring, processed);
1353 brcmf_commonring_read_complete(commonring, processed);
1355 if (commonring->r_ptr == 0)
1360 int brcmf_proto_msgbuf_rx_trigger(struct device *dev)
1362 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
1363 struct brcmf_pub *drvr = bus_if->drvr;
1364 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
1365 struct brcmf_commonring *commonring;
1370 buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE];
1371 brcmf_msgbuf_process_rx(msgbuf, buf);
1372 buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE];
1373 brcmf_msgbuf_process_rx(msgbuf, buf);
1374 buf = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE];
1375 brcmf_msgbuf_process_rx(msgbuf, buf);
1377 for_each_set_bit(flowid, msgbuf->txstatus_done_map,
1378 msgbuf->max_flowrings) {
1379 clear_bit(flowid, msgbuf->txstatus_done_map);
1380 commonring = msgbuf->flowrings[flowid];
1381 qlen = brcmf_flowring_qlen(msgbuf->flow, flowid);
1382 if ((qlen > BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) ||
1383 ((qlen) && (atomic_read(&commonring->outstanding_tx) <
1384 BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS)))
1385 brcmf_msgbuf_schedule_txdata(msgbuf, flowid, true);
1392 void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid)
1394 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
1395 struct msgbuf_tx_flowring_delete_req *delete;
1396 struct brcmf_commonring *commonring;
1401 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
1402 brcmf_commonring_lock(commonring);
1403 ret_ptr = brcmf_commonring_reserve_for_write(commonring);
1405 bphy_err(drvr, "FW unaware, flowring will be removed !!\n");
1406 brcmf_commonring_unlock(commonring);
1407 brcmf_msgbuf_remove_flowring(msgbuf, flowid);
1411 delete = (struct msgbuf_tx_flowring_delete_req *)ret_ptr;
1413 ifidx = brcmf_flowring_ifidx_get(msgbuf->flow, flowid);
1415 delete->msg.msgtype = MSGBUF_TYPE_FLOW_RING_DELETE;
1416 delete->msg.ifidx = ifidx;
1417 delete->msg.request_id = 0;
1419 delete->flow_ring_id = cpu_to_le16(flowid +
1420 BRCMF_H2D_MSGRING_FLOWRING_IDSTART);
1423 brcmf_dbg(MSGBUF, "Send Flow Delete Req flow ID %d, ifindex %d\n",
1426 err = brcmf_commonring_write_complete(commonring);
1427 brcmf_commonring_unlock(commonring);
1429 bphy_err(drvr, "Failed to submit RING_DELETE, flowring will be removed\n");
1430 brcmf_msgbuf_remove_flowring(msgbuf, flowid);
1435 static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data)
1437 struct brcmf_bus *bus_if = dev_get_drvdata(seq->private);
1438 struct brcmf_pub *drvr = bus_if->drvr;
1439 struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
1440 struct brcmf_commonring *commonring;
1442 struct brcmf_flowring_ring *ring;
1443 struct brcmf_flowring_hash *hash;
1445 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT];
1446 seq_printf(seq, "h2d_ctl_submit: rp %4u, wp %4u, depth %4u\n",
1447 commonring->r_ptr, commonring->w_ptr, commonring->depth);
1448 commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_RXPOST_SUBMIT];
1449 seq_printf(seq, "h2d_rx_submit: rp %4u, wp %4u, depth %4u\n",
1450 commonring->r_ptr, commonring->w_ptr, commonring->depth);
1451 commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE];
1452 seq_printf(seq, "d2h_ctl_cmplt: rp %4u, wp %4u, depth %4u\n",
1453 commonring->r_ptr, commonring->w_ptr, commonring->depth);
1454 commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE];
1455 seq_printf(seq, "d2h_tx_cmplt: rp %4u, wp %4u, depth %4u\n",
1456 commonring->r_ptr, commonring->w_ptr, commonring->depth);
1457 commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE];
1458 seq_printf(seq, "d2h_rx_cmplt: rp %4u, wp %4u, depth %4u\n",
1459 commonring->r_ptr, commonring->w_ptr, commonring->depth);
1461 seq_printf(seq, "\nh2d_flowrings: depth %u\n",
1462 BRCMF_H2D_TXFLOWRING_MAX_ITEM);
1463 seq_puts(seq, "Active flowrings:\n");
1464 hash = msgbuf->flow->hash;
1465 for (i = 0; i < msgbuf->flow->nrofrings; i++) {
1466 if (!msgbuf->flow->rings[i])
1468 ring = msgbuf->flow->rings[i];
1469 if (ring->status != RING_OPEN)
1471 commonring = msgbuf->flowrings[i];
1472 hash = &msgbuf->flow->hash[ring->hash_id];
1473 seq_printf(seq, "id %3u: rp %4u, wp %4u, qlen %4u, blocked %u\n"
1474 " ifidx %u, fifo %u, da %pM\n",
1475 i, commonring->r_ptr, commonring->w_ptr,
1476 skb_queue_len(&ring->skblist), ring->blocked,
1477 hash->ifidx, hash->fifo, hash->mac);
1483 static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data)
1489 static void brcmf_msgbuf_debugfs_create(struct brcmf_pub *drvr)
1491 brcmf_debugfs_add_entry(drvr, "msgbuf_stats", brcmf_msgbuf_stats_read);
1494 int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
1496 struct brcmf_bus_msgbuf *if_msgbuf;
1497 struct brcmf_msgbuf *msgbuf;
1501 if_msgbuf = drvr->bus_if->msgbuf;
1503 if (if_msgbuf->max_flowrings >= BRCMF_FLOWRING_HASHSIZE) {
1504 bphy_err(drvr, "driver not configured for this many flowrings %d\n",
1505 if_msgbuf->max_flowrings);
1506 if_msgbuf->max_flowrings = BRCMF_FLOWRING_HASHSIZE - 1;
1509 msgbuf = kzalloc(sizeof(*msgbuf), GFP_KERNEL);
1513 msgbuf->txflow_wq = create_singlethread_workqueue("msgbuf_txflow");
1514 if (msgbuf->txflow_wq == NULL) {
1515 bphy_err(drvr, "workqueue creation failed\n");
1518 INIT_WORK(&msgbuf->txflow_work, brcmf_msgbuf_txflow_worker);
1519 count = BITS_TO_LONGS(if_msgbuf->max_flowrings);
1520 count = count * sizeof(unsigned long);
1521 msgbuf->flow_map = kzalloc(count, GFP_KERNEL);
1522 if (!msgbuf->flow_map)
1525 msgbuf->txstatus_done_map = kzalloc(count, GFP_KERNEL);
1526 if (!msgbuf->txstatus_done_map)
1529 msgbuf->drvr = drvr;
1530 msgbuf->ioctbuf = dma_alloc_coherent(drvr->bus_if->dev,
1531 BRCMF_TX_IOCTL_MAX_MSG_SIZE,
1532 &msgbuf->ioctbuf_handle,
1534 if (!msgbuf->ioctbuf)
1536 address = (u64)msgbuf->ioctbuf_handle;
1537 msgbuf->ioctbuf_phys_hi = address >> 32;
1538 msgbuf->ioctbuf_phys_lo = address & 0xffffffff;
1540 drvr->proto->hdrpull = brcmf_msgbuf_hdrpull;
1541 drvr->proto->query_dcmd = brcmf_msgbuf_query_dcmd;
1542 drvr->proto->set_dcmd = brcmf_msgbuf_set_dcmd;
1543 drvr->proto->tx_queue_data = brcmf_msgbuf_tx_queue_data;
1544 drvr->proto->configure_addr_mode = brcmf_msgbuf_configure_addr_mode;
1545 drvr->proto->delete_peer = brcmf_msgbuf_delete_peer;
1546 drvr->proto->add_tdls_peer = brcmf_msgbuf_add_tdls_peer;
1547 drvr->proto->rxreorder = brcmf_msgbuf_rxreorder;
1548 drvr->proto->debugfs_create = brcmf_msgbuf_debugfs_create;
1549 drvr->proto->pd = msgbuf;
1551 init_waitqueue_head(&msgbuf->ioctl_resp_wait);
1553 msgbuf->commonrings =
1554 (struct brcmf_commonring **)if_msgbuf->commonrings;
1555 msgbuf->flowrings = (struct brcmf_commonring **)if_msgbuf->flowrings;
1556 msgbuf->max_flowrings = if_msgbuf->max_flowrings;
1557 msgbuf->flowring_dma_handle =
1558 kcalloc(msgbuf->max_flowrings,
1559 sizeof(*msgbuf->flowring_dma_handle), GFP_KERNEL);
1560 if (!msgbuf->flowring_dma_handle)
1563 msgbuf->rx_dataoffset = if_msgbuf->rx_dataoffset;
1564 msgbuf->max_rxbufpost = if_msgbuf->max_rxbufpost;
1566 msgbuf->max_ioctlrespbuf = BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST;
1567 msgbuf->max_eventbuf = BRCMF_MSGBUF_MAX_EVENTBUF_POST;
1569 msgbuf->tx_pktids = brcmf_msgbuf_init_pktids(NR_TX_PKTIDS,
1571 if (!msgbuf->tx_pktids)
1573 msgbuf->rx_pktids = brcmf_msgbuf_init_pktids(NR_RX_PKTIDS,
1575 if (!msgbuf->rx_pktids)
1578 msgbuf->flow = brcmf_flowring_attach(drvr->bus_if->dev,
1579 if_msgbuf->max_flowrings);
1584 brcmf_dbg(MSGBUF, "Feeding buffers, rx data %d, rx event %d, rx ioctl resp %d\n",
1585 msgbuf->max_rxbufpost, msgbuf->max_eventbuf,
1586 msgbuf->max_ioctlrespbuf);
1589 brcmf_msgbuf_rxbuf_data_fill(msgbuf);
1590 if (msgbuf->max_rxbufpost != msgbuf->rxbufpost)
1595 } while (count < 10);
1596 brcmf_msgbuf_rxbuf_event_post(msgbuf);
1597 brcmf_msgbuf_rxbuf_ioctlresp_post(msgbuf);
1599 INIT_WORK(&msgbuf->flowring_work, brcmf_msgbuf_flowring_worker);
1600 spin_lock_init(&msgbuf->flowring_work_lock);
1601 INIT_LIST_HEAD(&msgbuf->work_queue);
1607 kfree(msgbuf->flow_map);
1608 kfree(msgbuf->txstatus_done_map);
1609 brcmf_msgbuf_release_pktids(msgbuf);
1610 kfree(msgbuf->flowring_dma_handle);
1611 if (msgbuf->ioctbuf)
1612 dma_free_coherent(drvr->bus_if->dev,
1613 BRCMF_TX_IOCTL_MAX_MSG_SIZE,
1615 msgbuf->ioctbuf_handle);
1622 void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr)
1624 struct brcmf_msgbuf *msgbuf;
1625 struct brcmf_msgbuf_work_item *work;
1627 brcmf_dbg(TRACE, "Enter\n");
1628 if (drvr->proto->pd) {
1629 msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd;
1630 cancel_work_sync(&msgbuf->flowring_work);
1631 while (!list_empty(&msgbuf->work_queue)) {
1632 work = list_first_entry(&msgbuf->work_queue,
1633 struct brcmf_msgbuf_work_item,
1635 list_del(&work->queue);
1638 kfree(msgbuf->flow_map);
1639 kfree(msgbuf->txstatus_done_map);
1640 if (msgbuf->txflow_wq)
1641 destroy_workqueue(msgbuf->txflow_wq);
1643 brcmf_flowring_detach(msgbuf->flow);
1644 dma_free_coherent(drvr->bus_if->dev,
1645 BRCMF_TX_IOCTL_MAX_MSG_SIZE,
1646 msgbuf->ioctbuf, msgbuf->ioctbuf_handle);
1647 brcmf_msgbuf_release_pktids(msgbuf);
1648 kfree(msgbuf->flowring_dma_handle);
1650 drvr->proto->pd = NULL;