1 // SPDX-License-Identifier: GPL-2.0
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2019-2020 Linaro Ltd.
7 #include <linux/types.h>
8 #include <linux/device.h>
9 #include <linux/slab.h>
10 #include <linux/bitfield.h>
11 #include <linux/if_rmnet.h>
12 #include <linux/dma-direction.h>
15 #include "gsi_trans.h"
18 #include "ipa_endpoint.h"
21 #include "ipa_modem.h"
22 #include "ipa_table.h"
25 #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
27 #define IPA_REPLENISH_BATCH 16
29 /* RX buffer is 1 page (or a power-of-2 contiguous pages) */
30 #define IPA_RX_BUFFER_SIZE 8192 /* PAGE_SIZE > 4096 wastes a LOT */
32 /* The amount of RX buffer space consumed by standard skb overhead */
33 #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
35 #define IPA_ENDPOINT_STOP_RX_RETRIES 10
36 #define IPA_ENDPOINT_STOP_RX_SIZE 1 /* bytes */
38 #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3
39 #define IPA_AGGR_TIME_LIMIT_DEFAULT 1000 /* microseconds */
41 #define ENDPOINT_STOP_DMA_TIMEOUT 15 /* milliseconds */
43 /** enum ipa_status_opcode - status element opcode hardware values */
44 enum ipa_status_opcode {
45 IPA_STATUS_OPCODE_PACKET = 0x01,
46 IPA_STATUS_OPCODE_NEW_FRAG_RULE = 0x02,
47 IPA_STATUS_OPCODE_DROPPED_PACKET = 0x04,
48 IPA_STATUS_OPCODE_SUSPENDED_PACKET = 0x08,
49 IPA_STATUS_OPCODE_LOG = 0x10,
50 IPA_STATUS_OPCODE_DCMP = 0x20,
51 IPA_STATUS_OPCODE_PACKET_2ND_PASS = 0x40,
54 /** enum ipa_status_exception - status element exception type */
55 enum ipa_status_exception {
56 /* 0 means no exception */
57 IPA_STATUS_EXCEPTION_DEAGGR = 0x01,
58 IPA_STATUS_EXCEPTION_IPTYPE = 0x04,
59 IPA_STATUS_EXCEPTION_PACKET_LENGTH = 0x08,
60 IPA_STATUS_EXCEPTION_FRAG_RULE_MISS = 0x10,
61 IPA_STATUS_EXCEPTION_SW_FILT = 0x20,
62 /* The meaning of the next value depends on whether the IP version */
63 IPA_STATUS_EXCEPTION_NAT = 0x40, /* IPv4 */
64 IPA_STATUS_EXCEPTION_IPV6CT = IPA_STATUS_EXCEPTION_NAT,
67 /* Status element provided by hardware */
69 u8 opcode; /* enum ipa_status_opcode */
70 u8 exception; /* enum ipa_status_exception */
82 /* Field masks for struct ipa_status structure fields */
84 #define IPA_STATUS_SRC_IDX_FMASK GENMASK(4, 0)
86 #define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0)
88 #define IPA_STATUS_FLAGS1_FLT_LOCAL_FMASK GENMASK(0, 0)
89 #define IPA_STATUS_FLAGS1_FLT_HASH_FMASK GENMASK(1, 1)
90 #define IPA_STATUS_FLAGS1_FLT_GLOBAL_FMASK GENMASK(2, 2)
91 #define IPA_STATUS_FLAGS1_FLT_RET_HDR_FMASK GENMASK(3, 3)
92 #define IPA_STATUS_FLAGS1_FLT_RULE_ID_FMASK GENMASK(13, 4)
93 #define IPA_STATUS_FLAGS1_RT_LOCAL_FMASK GENMASK(14, 14)
94 #define IPA_STATUS_FLAGS1_RT_HASH_FMASK GENMASK(15, 15)
95 #define IPA_STATUS_FLAGS1_UCP_FMASK GENMASK(16, 16)
96 #define IPA_STATUS_FLAGS1_RT_TBL_IDX_FMASK GENMASK(21, 17)
97 #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22)
99 #define IPA_STATUS_FLAGS2_NAT_HIT_FMASK GENMASK_ULL(0, 0)
100 #define IPA_STATUS_FLAGS2_NAT_ENTRY_IDX_FMASK GENMASK_ULL(13, 1)
101 #define IPA_STATUS_FLAGS2_NAT_TYPE_FMASK GENMASK_ULL(15, 14)
102 #define IPA_STATUS_FLAGS2_TAG_INFO_FMASK GENMASK_ULL(63, 16)
104 #define IPA_STATUS_FLAGS3_SEQ_NUM_FMASK GENMASK(7, 0)
105 #define IPA_STATUS_FLAGS3_TOD_CTR_FMASK GENMASK(31, 8)
107 #define IPA_STATUS_FLAGS4_HDR_LOCAL_FMASK GENMASK(0, 0)
108 #define IPA_STATUS_FLAGS4_HDR_OFFSET_FMASK GENMASK(10, 1)
109 #define IPA_STATUS_FLAGS4_FRAG_HIT_FMASK GENMASK(11, 11)
110 #define IPA_STATUS_FLAGS4_FRAG_RULE_FMASK GENMASK(15, 12)
111 #define IPA_STATUS_FLAGS4_HW_SPECIFIC_FMASK GENMASK(31, 16)
115 static void ipa_endpoint_validate_build(void)
117 /* The aggregation byte limit defines the point at which an
118 * aggregation window will close. It is programmed into the
119 * IPA hardware as a number of KB. We don't use "hard byte
120 * limit" aggregation, which means that we need to supply
121 * enough space in a receive buffer to hold a complete MTU
122 * plus normal skb overhead *after* that aggregation byte
123 * limit has been crossed.
125 * This check just ensures we don't define a receive buffer
126 * size that would exceed what we can represent in the field
127 * that is used to program its size.
129 BUILD_BUG_ON(IPA_RX_BUFFER_SIZE >
130 field_max(AGGR_BYTE_LIMIT_FMASK) * SZ_1K +
131 IPA_MTU + IPA_RX_BUFFER_OVERHEAD);
133 /* I honestly don't know where this requirement comes from. But
134 * it holds, and if we someday need to loosen the constraint we
135 * can try to track it down.
137 BUILD_BUG_ON(sizeof(struct ipa_status) % 4);
140 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
141 const struct ipa_gsi_endpoint_data *all_data,
142 const struct ipa_gsi_endpoint_data *data)
144 const struct ipa_gsi_endpoint_data *other_data;
145 struct device *dev = &ipa->pdev->dev;
146 enum ipa_endpoint_name other_name;
148 if (ipa_gsi_endpoint_data_empty(data))
151 if (!data->toward_ipa) {
152 if (data->endpoint.filter_support) {
153 dev_err(dev, "filtering not supported for "
159 return true; /* Nothing more to check for RX */
162 if (data->endpoint.config.status_enable) {
163 other_name = data->endpoint.config.tx.status_endpoint;
164 if (other_name >= count) {
165 dev_err(dev, "status endpoint name %u out of range "
167 other_name, data->endpoint_id);
171 /* Status endpoint must be defined... */
172 other_data = &all_data[other_name];
173 if (ipa_gsi_endpoint_data_empty(other_data)) {
174 dev_err(dev, "DMA endpoint name %u undefined "
176 other_name, data->endpoint_id);
180 /* ...and has to be an RX endpoint... */
181 if (other_data->toward_ipa) {
183 "status endpoint for endpoint %u not RX\n",
188 /* ...and if it's to be an AP endpoint... */
189 if (other_data->ee_id == GSI_EE_AP) {
190 /* ...make sure it has status enabled. */
191 if (!other_data->endpoint.config.status_enable) {
193 "status not enabled for endpoint %u\n",
194 other_data->endpoint_id);
200 if (data->endpoint.config.dma_mode) {
201 other_name = data->endpoint.config.dma_endpoint;
202 if (other_name >= count) {
203 dev_err(dev, "DMA endpoint name %u out of range "
205 other_name, data->endpoint_id);
209 other_data = &all_data[other_name];
210 if (ipa_gsi_endpoint_data_empty(other_data)) {
211 dev_err(dev, "DMA endpoint name %u undefined "
213 other_name, data->endpoint_id);
221 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
222 const struct ipa_gsi_endpoint_data *data)
224 const struct ipa_gsi_endpoint_data *dp = data;
225 struct device *dev = &ipa->pdev->dev;
226 enum ipa_endpoint_name name;
228 ipa_endpoint_validate_build();
230 if (count > IPA_ENDPOINT_COUNT) {
231 dev_err(dev, "too many endpoints specified (%u > %u)\n",
232 count, IPA_ENDPOINT_COUNT);
236 /* Make sure needed endpoints have defined data */
237 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
238 dev_err(dev, "command TX endpoint not defined\n");
241 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
242 dev_err(dev, "LAN RX endpoint not defined\n");
245 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
246 dev_err(dev, "AP->modem TX endpoint not defined\n");
249 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
250 dev_err(dev, "AP<-modem RX endpoint not defined\n");
254 for (name = 0; name < count; name++, dp++)
255 if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
261 #else /* !IPA_VALIDATE */
263 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
264 const struct ipa_gsi_endpoint_data *data)
269 #endif /* !IPA_VALIDATE */
271 /* Allocate a transaction to use on a non-command endpoint */
272 static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
275 struct gsi *gsi = &endpoint->ipa->gsi;
276 u32 channel_id = endpoint->channel_id;
277 enum dma_data_direction direction;
279 direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
281 return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction);
284 /* suspend_delay represents suspend for RX, delay for TX endpoints.
285 * Note that suspend is not supported starting with IPA v4.0.
288 ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
290 u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id);
291 struct ipa *ipa = endpoint->ipa;
295 /* assert(ipa->version == IPA_VERSION_3_5_1 */
296 mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK;
298 val = ioread32(ipa->reg_virt + offset);
299 if (suspend_delay == !!(val & mask))
300 return -EALREADY; /* Already set to desired state */
303 iowrite32(val, ipa->reg_virt + offset);
308 /* Enable or disable delay or suspend mode on all modem endpoints */
309 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
311 bool support_suspend;
314 /* DELAY mode doesn't work right on IPA v4.2 */
315 if (ipa->version == IPA_VERSION_4_2)
318 /* Only IPA v3.5.1 supports SUSPEND mode on RX endpoints */
319 support_suspend = ipa->version == IPA_VERSION_3_5_1;
321 for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) {
322 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id];
324 if (endpoint->ee_id != GSI_EE_MODEM)
327 /* Set TX delay mode, or for IPA v3.5.1 RX suspend mode */
328 if (endpoint->toward_ipa || support_suspend)
329 (void)ipa_endpoint_init_ctrl(endpoint, enable);
333 /* Reset all modem endpoints to use the default exception endpoint */
334 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
336 u32 initialized = ipa->initialized;
337 struct gsi_trans *trans;
340 /* We need one command per modem TX endpoint. We can get an upper
341 * bound on that by assuming all initialized endpoints are modem->IPA.
342 * That won't happen, and we could be more precise, but this is fine
343 * for now. We need to end the transactio with a "tag process."
345 count = hweight32(initialized) + ipa_cmd_tag_process_count();
346 trans = ipa_cmd_trans_alloc(ipa, count);
348 dev_err(&ipa->pdev->dev,
349 "no transaction to reset modem exception endpoints\n");
353 while (initialized) {
354 u32 endpoint_id = __ffs(initialized);
355 struct ipa_endpoint *endpoint;
358 initialized ^= BIT(endpoint_id);
360 /* We only reset modem TX endpoints */
361 endpoint = &ipa->endpoint[endpoint_id];
362 if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
365 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
367 /* Value written is 0, and all bits are updated. That
368 * means status is disabled on the endpoint, and as a
369 * result all other fields in the register are ignored.
371 ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
374 ipa_cmd_tag_process_add(trans);
376 /* XXX This should have a 1 second timeout */
377 gsi_trans_commit_wait(trans);
382 static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
384 u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id);
387 /* FRAG_OFFLOAD_EN is 0 */
388 if (endpoint->data->checksum) {
389 if (endpoint->toward_ipa) {
392 val |= u32_encode_bits(IPA_CS_OFFLOAD_UL,
393 CS_OFFLOAD_EN_FMASK);
394 /* Checksum header offset is in 4-byte units */
395 checksum_offset = sizeof(struct rmnet_map_header);
396 checksum_offset /= sizeof(u32);
397 val |= u32_encode_bits(checksum_offset,
398 CS_METADATA_HDR_OFFSET_FMASK);
400 val |= u32_encode_bits(IPA_CS_OFFLOAD_DL,
401 CS_OFFLOAD_EN_FMASK);
404 val |= u32_encode_bits(IPA_CS_OFFLOAD_NONE,
405 CS_OFFLOAD_EN_FMASK);
407 /* CS_GEN_QMB_MASTER_SEL is 0 */
409 iowrite32(val, endpoint->ipa->reg_virt + offset);
412 static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
414 u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id);
417 if (endpoint->data->qmap) {
418 size_t header_size = sizeof(struct rmnet_map_header);
420 if (endpoint->toward_ipa && endpoint->data->checksum)
421 header_size += sizeof(struct rmnet_map_ul_csum_header);
423 val |= u32_encode_bits(header_size, HDR_LEN_FMASK);
424 /* metadata is the 4 byte rmnet_map header itself */
425 val |= HDR_OFST_METADATA_VALID_FMASK;
426 val |= u32_encode_bits(0, HDR_OFST_METADATA_FMASK);
427 /* HDR_ADDITIONAL_CONST_LEN is 0; (IPA->AP only) */
428 if (!endpoint->toward_ipa) {
429 u32 size_offset = offsetof(struct rmnet_map_header,
432 val |= HDR_OFST_PKT_SIZE_VALID_FMASK;
433 val |= u32_encode_bits(size_offset,
434 HDR_OFST_PKT_SIZE_FMASK);
436 /* HDR_A5_MUX is 0 */
437 /* HDR_LEN_INC_DEAGG_HDR is 0 */
438 /* HDR_METADATA_REG_VALID is 0; (AP->IPA only) */
441 iowrite32(val, endpoint->ipa->reg_virt + offset);
444 static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
446 u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id);
447 u32 pad_align = endpoint->data->rx.pad_align;
450 val |= HDR_ENDIANNESS_FMASK; /* big endian */
451 val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK;
452 /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
453 /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
454 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
455 if (!endpoint->toward_ipa)
456 val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK);
458 iowrite32(val, endpoint->ipa->reg_virt + offset);
462 * Generate a metadata mask value that will select only the mux_id
463 * field in an rmnet_map header structure. The mux_id is at offset
464 * 1 byte from the beginning of the structure, but the metadata
465 * value is treated as a 4-byte unit. So this mask must be computed
466 * with endianness in mind. Note that ipa_endpoint_init_hdr_metadata_mask()
467 * will convert this value to the proper byte order.
469 * Marked __always_inline because this is really computing a
472 static __always_inline __be32 ipa_rmnet_mux_id_metadata_mask(void)
474 size_t mux_id_offset = offsetof(struct rmnet_map_header, mux_id);
478 bytes = (u8 *)&mux_id_mask;
479 bytes[mux_id_offset] = 0xff; /* mux_id is 1 byte */
481 return cpu_to_be32(mux_id_mask);
484 static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
486 u32 endpoint_id = endpoint->endpoint_id;
490 offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id);
492 if (!endpoint->toward_ipa && endpoint->data->qmap)
493 val = ipa_rmnet_mux_id_metadata_mask();
495 iowrite32(val, endpoint->ipa->reg_virt + offset);
498 static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
500 u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id);
503 if (endpoint->toward_ipa && endpoint->data->dma_mode) {
504 enum ipa_endpoint_name name = endpoint->data->dma_endpoint;
507 dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id;
509 val = u32_encode_bits(IPA_DMA, MODE_FMASK);
510 val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK);
512 val = u32_encode_bits(IPA_BASIC, MODE_FMASK);
514 /* Other bitfields unspecified (and 0) */
516 iowrite32(val, endpoint->ipa->reg_virt + offset);
519 /* Compute the aggregation size value to use for a given buffer size */
520 static u32 ipa_aggr_size_kb(u32 rx_buffer_size)
522 /* We don't use "hard byte limit" aggregation, so we define the
523 * aggregation limit such that our buffer has enough space *after*
524 * that limit to receive a full MTU of data, plus overhead.
526 rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
528 return rx_buffer_size / SZ_1K;
531 static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
533 u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id);
536 if (endpoint->data->aggregation) {
537 if (!endpoint->toward_ipa) {
538 u32 aggr_size = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE);
541 val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
542 val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
543 val |= u32_encode_bits(aggr_size,
544 AGGR_BYTE_LIMIT_FMASK);
545 limit = IPA_AGGR_TIME_LIMIT_DEFAULT;
546 val |= u32_encode_bits(limit / IPA_AGGR_GRANULARITY,
547 AGGR_TIME_LIMIT_FMASK);
548 val |= u32_encode_bits(0, AGGR_PKT_LIMIT_FMASK);
549 if (endpoint->data->rx.aggr_close_eof)
550 val |= AGGR_SW_EOF_ACTIVE_FMASK;
551 /* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */
553 val |= u32_encode_bits(IPA_ENABLE_DEAGGR,
555 val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK);
556 /* other fields ignored */
558 /* AGGR_FORCE_CLOSE is 0 */
560 val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK);
561 /* other fields ignored */
564 iowrite32(val, endpoint->ipa->reg_virt + offset);
567 /* A return value of 0 indicates an error */
568 static u32 ipa_reg_init_hol_block_timer_val(struct ipa *ipa, u32 microseconds)
575 return 0; /* invalid delay */
577 /* Timer is represented in units of clock ticks. */
578 if (ipa->version < IPA_VERSION_4_2)
579 return microseconds; /* XXX Needs to be computed */
581 /* IPA v4.2 represents the tick count as base * scale */
582 scale = 1; /* XXX Needs to be computed */
583 if (scale > field_max(SCALE_FMASK))
584 return 0; /* scale too big */
586 base = DIV_ROUND_CLOSEST(microseconds, scale);
587 if (base > field_max(BASE_VALUE_FMASK))
588 return 0; /* microseconds too big */
590 val = u32_encode_bits(scale, SCALE_FMASK);
591 val |= u32_encode_bits(base, BASE_VALUE_FMASK);
596 static int ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
599 u32 endpoint_id = endpoint->endpoint_id;
600 struct ipa *ipa = endpoint->ipa;
604 /* XXX We'll fix this when the register definition is clear */
606 struct device *dev = &ipa->pdev->dev;
608 dev_err(dev, "endpoint %u non-zero HOLB period (ignoring)\n",
614 val = ipa_reg_init_hol_block_timer_val(ipa, microseconds);
618 val = 0; /* timeout is immediate */
620 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id);
621 iowrite32(val, ipa->reg_virt + offset);
627 ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable)
629 u32 endpoint_id = endpoint->endpoint_id;
633 val = u32_encode_bits(enable ? 1 : 0, HOL_BLOCK_EN_FMASK);
634 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id);
635 iowrite32(val, endpoint->ipa->reg_virt + offset);
638 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
642 for (i = 0; i < IPA_ENDPOINT_MAX; i++) {
643 struct ipa_endpoint *endpoint = &ipa->endpoint[i];
645 if (endpoint->ee_id != GSI_EE_MODEM)
648 (void)ipa_endpoint_init_hol_block_timer(endpoint, 0);
649 ipa_endpoint_init_hol_block_enable(endpoint, true);
653 static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
655 u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id);
658 /* DEAGGR_HDR_LEN is 0 */
659 /* PACKET_OFFSET_VALID is 0 */
660 /* PACKET_OFFSET_LOCATION is ignored (not valid) */
661 /* MAX_PACKET_LEN is 0 (not enforced) */
663 iowrite32(val, endpoint->ipa->reg_virt + offset);
666 static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
668 u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id);
669 u32 seq_type = endpoint->seq_type;
672 /* Sequencer type is made up of four nibbles */
673 val |= u32_encode_bits(seq_type & 0xf, HPS_SEQ_TYPE_FMASK);
674 val |= u32_encode_bits((seq_type >> 4) & 0xf, DPS_SEQ_TYPE_FMASK);
675 /* The second two apply to replicated packets */
676 val |= u32_encode_bits((seq_type >> 8) & 0xf, HPS_REP_SEQ_TYPE_FMASK);
677 val |= u32_encode_bits((seq_type >> 12) & 0xf, DPS_REP_SEQ_TYPE_FMASK);
679 iowrite32(val, endpoint->ipa->reg_virt + offset);
683 * ipa_endpoint_skb_tx() - Transmit a socket buffer
684 * @endpoint: Endpoint pointer
685 * @skb: Socket buffer to send
687 * Returns: 0 if successful, or a negative error code
689 int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
691 struct gsi_trans *trans;
695 /* Make sure source endpoint's TLV FIFO has enough entries to
696 * hold the linear portion of the skb and all its fragments.
697 * If not, see if we can linearize it before giving up.
699 nr_frags = skb_shinfo(skb)->nr_frags;
700 if (1 + nr_frags > endpoint->trans_tre_max) {
701 if (skb_linearize(skb))
706 trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags);
710 ret = gsi_trans_skb_add(trans, skb);
713 trans->data = skb; /* transaction owns skb now */
715 gsi_trans_commit(trans, !netdev_xmit_more());
720 gsi_trans_free(trans);
725 static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
727 u32 endpoint_id = endpoint->endpoint_id;
728 struct ipa *ipa = endpoint->ipa;
732 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
734 if (endpoint->data->status_enable) {
735 val |= STATUS_EN_FMASK;
736 if (endpoint->toward_ipa) {
737 enum ipa_endpoint_name name;
738 u32 status_endpoint_id;
740 name = endpoint->data->tx.status_endpoint;
741 status_endpoint_id = ipa->name_map[name]->endpoint_id;
743 val |= u32_encode_bits(status_endpoint_id,
746 /* STATUS_LOCATION is 0 (status element precedes packet) */
747 /* The next field is present for IPA v4.0 and above */
748 /* STATUS_PKT_SUPPRESS_FMASK is 0 */
751 iowrite32(val, ipa->reg_virt + offset);
754 static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint)
756 struct gsi_trans *trans;
757 bool doorbell = false;
763 page = dev_alloc_pages(get_order(IPA_RX_BUFFER_SIZE));
767 trans = ipa_endpoint_trans_alloc(endpoint, 1);
771 /* Offset the buffer to make space for skb headroom */
772 offset = NET_SKB_PAD;
773 len = IPA_RX_BUFFER_SIZE - offset;
775 ret = gsi_trans_page_add(trans, page, len, offset);
778 trans->data = page; /* transaction owns page now */
780 if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) {
782 endpoint->replenish_ready = 0;
785 gsi_trans_commit(trans, doorbell);
790 gsi_trans_free(trans);
792 __free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
798 * ipa_endpoint_replenish() - Replenish the Rx packets cache.
800 * Allocate RX packet wrapper structures with maximal socket buffers
801 * for an endpoint. These are supplied to the hardware, which fills
802 * them with incoming data.
804 static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, u32 count)
809 if (!endpoint->replenish_enabled) {
811 atomic_add(count, &endpoint->replenish_saved);
816 while (atomic_dec_not_zero(&endpoint->replenish_backlog))
817 if (ipa_endpoint_replenish_one(endpoint))
818 goto try_again_later;
820 atomic_add(count, &endpoint->replenish_backlog);
825 /* The last one didn't succeed, so fix the backlog */
826 backlog = atomic_inc_return(&endpoint->replenish_backlog);
829 atomic_add(count, &endpoint->replenish_backlog);
831 /* Whenever a receive buffer transaction completes we'll try to
832 * replenish again. It's unlikely, but if we fail to supply even
833 * one buffer, nothing will trigger another replenish attempt.
834 * Receive buffer transactions use one TRE, so schedule work to
835 * try replenishing again if our backlog is *all* available TREs.
837 gsi = &endpoint->ipa->gsi;
838 if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id))
839 schedule_delayed_work(&endpoint->replenish_work,
840 msecs_to_jiffies(1));
843 static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
845 struct gsi *gsi = &endpoint->ipa->gsi;
849 endpoint->replenish_enabled = true;
850 while ((saved = atomic_xchg(&endpoint->replenish_saved, 0)))
851 atomic_add(saved, &endpoint->replenish_backlog);
853 /* Start replenishing if hardware currently has no buffers */
854 max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id);
855 if (atomic_read(&endpoint->replenish_backlog) == max_backlog)
856 ipa_endpoint_replenish(endpoint, 0);
859 static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
863 endpoint->replenish_enabled = false;
864 while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0)))
865 atomic_add(backlog, &endpoint->replenish_saved);
868 static void ipa_endpoint_replenish_work(struct work_struct *work)
870 struct delayed_work *dwork = to_delayed_work(work);
871 struct ipa_endpoint *endpoint;
873 endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
875 ipa_endpoint_replenish(endpoint, 0);
878 static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
879 void *data, u32 len, u32 extra)
883 skb = __dev_alloc_skb(len, GFP_ATOMIC);
886 memcpy(skb->data, data, len);
887 skb->truesize += extra;
890 /* Now receive it, or drop it if there's no netdev */
891 if (endpoint->netdev)
892 ipa_modem_skb_rx(endpoint->netdev, skb);
894 dev_kfree_skb_any(skb);
897 static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
898 struct page *page, u32 len)
902 /* Nothing to do if there's no netdev */
903 if (!endpoint->netdev)
906 /* assert(len <= SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE-NET_SKB_PAD)); */
907 skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE);
909 /* Reserve the headroom and account for the data */
910 skb_reserve(skb, NET_SKB_PAD);
914 /* Receive the buffer (or record drop if unable to build it) */
915 ipa_modem_skb_rx(endpoint->netdev, skb);
920 /* The format of a packet status element is the same for several status
921 * types (opcodes). The NEW_FRAG_RULE, LOG, DCMP (decompression) types
922 * aren't currently supported
924 static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
927 case IPA_STATUS_OPCODE_PACKET:
928 case IPA_STATUS_OPCODE_DROPPED_PACKET:
929 case IPA_STATUS_OPCODE_SUSPENDED_PACKET:
930 case IPA_STATUS_OPCODE_PACKET_2ND_PASS:
937 static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint,
938 const struct ipa_status *status)
942 if (!ipa_status_format_packet(status->opcode))
944 if (!status->pkt_len)
946 endpoint_id = u32_get_bits(status->endp_dst_idx,
947 IPA_STATUS_DST_IDX_FMASK);
948 if (endpoint_id != endpoint->endpoint_id)
951 return false; /* Don't skip this packet, process it */
954 /* Return whether the status indicates the packet should be dropped */
955 static bool ipa_status_drop_packet(const struct ipa_status *status)
959 /* Deaggregation exceptions we drop; others we consume */
960 if (status->exception)
961 return status->exception == IPA_STATUS_EXCEPTION_DEAGGR;
963 /* Drop the packet if it fails to match a routing rule; otherwise no */
964 val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
966 return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
969 static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
970 struct page *page, u32 total_len)
972 void *data = page_address(page) + NET_SKB_PAD;
973 u32 unused = IPA_RX_BUFFER_SIZE - total_len;
974 u32 resid = total_len;
977 const struct ipa_status *status = data;
981 if (resid < sizeof(*status)) {
982 dev_err(&endpoint->ipa->pdev->dev,
983 "short message (%u bytes < %zu byte status)\n",
984 resid, sizeof(*status));
988 /* Skip over status packets that lack packet data */
989 if (ipa_endpoint_status_skip(endpoint, status)) {
990 data += sizeof(*status);
991 resid -= sizeof(*status);
995 /* Compute the amount of buffer space consumed by the
996 * packet, including the status element. If the hardware
997 * is configured to pad packet data to an aligned boundary,
998 * account for that. And if checksum offload is is enabled
999 * a trailer containing computed checksum information will
1002 align = endpoint->data->rx.pad_align ? : 1;
1003 len = le16_to_cpu(status->pkt_len);
1004 len = sizeof(*status) + ALIGN(len, align);
1005 if (endpoint->data->checksum)
1006 len += sizeof(struct rmnet_map_dl_csum_trailer);
1008 /* Charge the new packet with a proportional fraction of
1009 * the unused space in the original receive buffer.
1010 * XXX Charge a proportion of the *whole* receive buffer?
1012 if (!ipa_status_drop_packet(status)) {
1013 u32 extra = unused * len / total_len;
1014 void *data2 = data + sizeof(*status);
1015 u32 len2 = le16_to_cpu(status->pkt_len);
1017 /* Client receives only packet data (no status) */
1018 ipa_endpoint_skb_copy(endpoint, data2, len2, extra);
1021 /* Consume status and the full packet it describes */
1027 /* Complete a TX transaction, command or from ipa_endpoint_skb_tx() */
1028 static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint,
1029 struct gsi_trans *trans)
1033 /* Complete transaction initiated in ipa_endpoint_replenish_one() */
1034 static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
1035 struct gsi_trans *trans)
1039 ipa_endpoint_replenish(endpoint, 1);
1041 if (trans->cancelled)
1044 /* Parse or build a socket buffer using the actual received length */
1046 if (endpoint->data->status_enable)
1047 ipa_endpoint_status_parse(endpoint, page, trans->len);
1048 else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
1049 trans->data = NULL; /* Pages have been consumed */
1052 void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
1053 struct gsi_trans *trans)
1055 if (endpoint->toward_ipa)
1056 ipa_endpoint_tx_complete(endpoint, trans);
1058 ipa_endpoint_rx_complete(endpoint, trans);
1061 void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
1062 struct gsi_trans *trans)
1064 if (endpoint->toward_ipa) {
1065 struct ipa *ipa = endpoint->ipa;
1067 /* Nothing to do for command transactions */
1068 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
1069 struct sk_buff *skb = trans->data;
1072 dev_kfree_skb_any(skb);
1075 struct page *page = trans->data;
1078 __free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
1082 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
1086 /* ROUTE_DIS is 0 */
1087 val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK);
1088 val |= ROUTE_DEF_HDR_TABLE_FMASK;
1089 val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK);
1090 val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK);
1091 val |= ROUTE_DEF_RETAIN_HDR_FMASK;
1093 iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET);
1096 void ipa_endpoint_default_route_clear(struct ipa *ipa)
1098 ipa_endpoint_default_route_set(ipa, 0);
1101 static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
1103 u32 mask = BIT(endpoint->endpoint_id);
1104 struct ipa *ipa = endpoint->ipa;
1108 /* assert(mask & ipa->available); */
1109 offset = ipa_reg_state_aggr_active_offset(ipa->version);
1110 val = ioread32(ipa->reg_virt + offset);
1112 return !!(val & mask);
1115 static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
1117 u32 mask = BIT(endpoint->endpoint_id);
1118 struct ipa *ipa = endpoint->ipa;
1120 /* assert(mask & ipa->available); */
1121 iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET);
1125 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1126 * @endpoint: Endpoint to be reset
1128 * If aggregation is active on an RX endpoint when a reset is performed
1129 * on its underlying GSI channel, a special sequence of actions must be
1130 * taken to ensure the IPA pipeline is properly cleared.
1132 * @Return: 0 if successful, or a negative error code
1134 static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
1136 struct device *dev = &endpoint->ipa->pdev->dev;
1137 struct ipa *ipa = endpoint->ipa;
1138 bool endpoint_suspended = false;
1139 struct gsi *gsi = &ipa->gsi;
1147 virt = kzalloc(len, GFP_KERNEL);
1151 addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE);
1152 if (dma_mapping_error(dev, addr)) {
1157 /* Force close aggregation before issuing the reset */
1158 ipa_endpoint_force_close(endpoint);
1160 /* Reset and reconfigure the channel with the doorbell engine
1161 * disabled. Then poll until we know aggregation is no longer
1162 * active. We'll re-enable the doorbell (if appropriate) when
1163 * we reset again below.
1165 gsi_channel_reset(gsi, endpoint->channel_id, false);
1167 /* Make sure the channel isn't suspended */
1168 if (endpoint->ipa->version == IPA_VERSION_3_5_1)
1169 if (!ipa_endpoint_init_ctrl(endpoint, false))
1170 endpoint_suspended = true;
1172 /* Start channel and do a 1 byte read */
1173 ret = gsi_channel_start(gsi, endpoint->channel_id);
1175 goto out_suspend_again;
1177 ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr);
1179 goto err_endpoint_stop;
1181 /* Wait for aggregation to be closed on the channel */
1182 retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX;
1184 if (!ipa_endpoint_aggr_active(endpoint))
1187 } while (retries--);
1189 /* Check one last time */
1190 if (ipa_endpoint_aggr_active(endpoint))
1191 dev_err(dev, "endpoint %u still active during reset\n",
1192 endpoint->endpoint_id);
1194 gsi_trans_read_byte_done(gsi, endpoint->channel_id);
1196 ret = ipa_endpoint_stop(endpoint);
1198 goto out_suspend_again;
1200 /* Finally, reset and reconfigure the channel again (re-enabling the
1201 * the doorbell engine if appropriate). Sleep for 1 millisecond to
1202 * complete the channel reset sequence. Finish by suspending the
1203 * channel again (if necessary).
1205 db_enable = ipa->version == IPA_VERSION_3_5_1;
1206 gsi_channel_reset(gsi, endpoint->channel_id, db_enable);
1210 goto out_suspend_again;
1213 ipa_endpoint_stop(endpoint);
1215 if (endpoint_suspended)
1216 (void)ipa_endpoint_init_ctrl(endpoint, true);
1217 dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE);
1224 static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
1226 u32 channel_id = endpoint->channel_id;
1227 struct ipa *ipa = endpoint->ipa;
1232 /* On IPA v3.5.1, if an RX endpoint is reset while aggregation
1233 * is active, we need to handle things specially to recover.
1234 * All other cases just need to reset the underlying GSI channel.
1236 * IPA v3.5.1 enables the doorbell engine. Newer versions do not.
1238 db_enable = ipa->version == IPA_VERSION_3_5_1;
1239 special = !endpoint->toward_ipa && endpoint->data->aggregation;
1240 if (special && ipa_endpoint_aggr_active(endpoint))
1241 ret = ipa_endpoint_reset_rx_aggr(endpoint);
1243 gsi_channel_reset(&ipa->gsi, channel_id, db_enable);
1246 dev_err(&ipa->pdev->dev,
1247 "error %d resetting channel %u for endpoint %u\n",
1248 ret, endpoint->channel_id, endpoint->endpoint_id);
1251 static int ipa_endpoint_stop_rx_dma(struct ipa *ipa)
1253 u16 size = IPA_ENDPOINT_STOP_RX_SIZE;
1254 struct gsi_trans *trans;
1258 trans = ipa_cmd_trans_alloc(ipa, 1);
1260 dev_err(&ipa->pdev->dev,
1261 "no transaction for RX endpoint STOP workaround\n");
1265 /* Read into the highest part of the zero memory area */
1266 addr = ipa->zero_addr + ipa->zero_size - size;
1268 ipa_cmd_dma_task_32b_addr_add(trans, size, addr, false);
1270 ret = gsi_trans_commit_wait_timeout(trans, ENDPOINT_STOP_DMA_TIMEOUT);
1272 gsi_trans_free(trans);
1278 * ipa_endpoint_stop() - Stops a GSI channel in IPA
1279 * @client: Client whose endpoint should be stopped
1281 * This function implements the sequence to stop a GSI channel
1282 * in IPA. This function returns when the channel is is STOP state.
1284 * Return value: 0 on success, negative otherwise
1286 int ipa_endpoint_stop(struct ipa_endpoint *endpoint)
1288 u32 retries = IPA_ENDPOINT_STOP_RX_RETRIES;
1292 struct ipa *ipa = endpoint->ipa;
1293 struct gsi *gsi = &ipa->gsi;
1295 ret = gsi_channel_stop(gsi, endpoint->channel_id);
1296 if (ret != -EAGAIN || endpoint->toward_ipa)
1299 /* For IPA v3.5.1, send a DMA read task and check again */
1300 if (ipa->version == IPA_VERSION_3_5_1) {
1301 ret = ipa_endpoint_stop_rx_dma(ipa);
1307 } while (retries--);
1309 return retries ? ret : -EIO;
1312 static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
1314 struct device *dev = &endpoint->ipa->pdev->dev;
1317 if (endpoint->toward_ipa) {
1318 bool delay_mode = endpoint->data->tx.delay;
1320 ret = ipa_endpoint_init_ctrl(endpoint, delay_mode);
1321 /* Endpoint is expected to not be in delay mode */
1322 if (!ret != delay_mode) {
1324 "TX endpoint %u was %sin delay mode\n",
1325 endpoint->endpoint_id,
1326 delay_mode ? "already " : "");
1328 ipa_endpoint_init_hdr_ext(endpoint);
1329 ipa_endpoint_init_aggr(endpoint);
1330 ipa_endpoint_init_deaggr(endpoint);
1331 ipa_endpoint_init_seq(endpoint);
1333 if (endpoint->ipa->version == IPA_VERSION_3_5_1) {
1334 if (!ipa_endpoint_init_ctrl(endpoint, false))
1336 "RX endpoint %u was suspended\n",
1337 endpoint->endpoint_id);
1339 ipa_endpoint_init_hdr_ext(endpoint);
1340 ipa_endpoint_init_aggr(endpoint);
1342 ipa_endpoint_init_cfg(endpoint);
1343 ipa_endpoint_init_hdr(endpoint);
1344 ipa_endpoint_init_hdr_metadata_mask(endpoint);
1345 ipa_endpoint_init_mode(endpoint);
1346 ipa_endpoint_status(endpoint);
1349 int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
1351 struct ipa *ipa = endpoint->ipa;
1352 struct gsi *gsi = &ipa->gsi;
1355 ret = gsi_channel_start(gsi, endpoint->channel_id);
1357 dev_err(&ipa->pdev->dev,
1358 "error %d starting %cX channel %u for endpoint %u\n",
1359 ret, endpoint->toward_ipa ? 'T' : 'R',
1360 endpoint->channel_id, endpoint->endpoint_id);
1364 if (!endpoint->toward_ipa) {
1365 ipa_interrupt_suspend_enable(ipa->interrupt,
1366 endpoint->endpoint_id);
1367 ipa_endpoint_replenish_enable(endpoint);
1370 ipa->enabled |= BIT(endpoint->endpoint_id);
1375 void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
1377 u32 mask = BIT(endpoint->endpoint_id);
1378 struct ipa *ipa = endpoint->ipa;
1381 if (!(endpoint->ipa->enabled & mask))
1384 endpoint->ipa->enabled ^= mask;
1386 if (!endpoint->toward_ipa) {
1387 ipa_endpoint_replenish_disable(endpoint);
1388 ipa_interrupt_suspend_disable(ipa->interrupt,
1389 endpoint->endpoint_id);
1392 /* Note that if stop fails, the channel's state is not well-defined */
1393 ret = ipa_endpoint_stop(endpoint);
1395 dev_err(&ipa->pdev->dev,
1396 "error %d attempting to stop endpoint %u\n", ret,
1397 endpoint->endpoint_id);
1401 * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
1402 * @endpoint_id: Endpoint on which to emulate a suspend
1404 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended
1405 * with an open aggregation frame. This is to work around a hardware
1406 * issue in IPA version 3.5.1 where the suspend interrupt will not be
1407 * generated when it should be.
1409 static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
1411 struct ipa *ipa = endpoint->ipa;
1413 /* assert(ipa->version == IPA_VERSION_3_5_1); */
1415 if (!endpoint->data->aggregation)
1418 /* Nothing to do if the endpoint doesn't have aggregation open */
1419 if (!ipa_endpoint_aggr_active(endpoint))
1422 /* Force close aggregation */
1423 ipa_endpoint_force_close(endpoint);
1425 ipa_interrupt_simulate_suspend(ipa->interrupt);
1428 void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
1430 struct device *dev = &endpoint->ipa->pdev->dev;
1431 struct gsi *gsi = &endpoint->ipa->gsi;
1435 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1438 if (!endpoint->toward_ipa)
1439 ipa_endpoint_replenish_disable(endpoint);
1441 /* IPA v3.5.1 doesn't use channel stop for suspend */
1442 stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
1443 if (!endpoint->toward_ipa && !stop_channel) {
1444 /* Due to a hardware bug, a client suspended with an open
1445 * aggregation frame will not generate a SUSPEND IPA
1446 * interrupt. We work around this by force-closing the
1447 * aggregation frame, then simulating the arrival of such
1450 WARN_ON(ipa_endpoint_init_ctrl(endpoint, true));
1451 ipa_endpoint_suspend_aggr(endpoint);
1454 ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel);
1456 dev_err(dev, "error %d suspending channel %u\n", ret,
1457 endpoint->channel_id);
1460 void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
1462 struct device *dev = &endpoint->ipa->pdev->dev;
1463 struct gsi *gsi = &endpoint->ipa->gsi;
1467 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1470 /* IPA v3.5.1 doesn't use channel start for resume */
1471 start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
1472 if (!endpoint->toward_ipa && !start_channel)
1473 WARN_ON(ipa_endpoint_init_ctrl(endpoint, false));
1475 ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel);
1477 dev_err(dev, "error %d resuming channel %u\n", ret,
1478 endpoint->channel_id);
1479 else if (!endpoint->toward_ipa)
1480 ipa_endpoint_replenish_enable(endpoint);
1483 void ipa_endpoint_suspend(struct ipa *ipa)
1485 if (ipa->modem_netdev)
1486 ipa_modem_suspend(ipa->modem_netdev);
1488 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1489 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1492 void ipa_endpoint_resume(struct ipa *ipa)
1494 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1495 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1497 if (ipa->modem_netdev)
1498 ipa_modem_resume(ipa->modem_netdev);
1501 static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
1503 struct gsi *gsi = &endpoint->ipa->gsi;
1504 u32 channel_id = endpoint->channel_id;
1506 /* Only AP endpoints get set up */
1507 if (endpoint->ee_id != GSI_EE_AP)
1510 endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id);
1511 if (!endpoint->toward_ipa) {
1512 /* RX transactions require a single TRE, so the maximum
1513 * backlog is the same as the maximum outstanding TREs.
1515 endpoint->replenish_enabled = false;
1516 atomic_set(&endpoint->replenish_saved,
1517 gsi_channel_tre_max(gsi, endpoint->channel_id));
1518 atomic_set(&endpoint->replenish_backlog, 0);
1519 INIT_DELAYED_WORK(&endpoint->replenish_work,
1520 ipa_endpoint_replenish_work);
1523 ipa_endpoint_program(endpoint);
1525 endpoint->ipa->set_up |= BIT(endpoint->endpoint_id);
1528 static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
1530 endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id);
1532 if (!endpoint->toward_ipa)
1533 cancel_delayed_work_sync(&endpoint->replenish_work);
1535 ipa_endpoint_reset(endpoint);
1538 void ipa_endpoint_setup(struct ipa *ipa)
1540 u32 initialized = ipa->initialized;
1543 while (initialized) {
1544 u32 endpoint_id = __ffs(initialized);
1546 initialized ^= BIT(endpoint_id);
1548 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
1552 void ipa_endpoint_teardown(struct ipa *ipa)
1554 u32 set_up = ipa->set_up;
1557 u32 endpoint_id = __fls(set_up);
1559 set_up ^= BIT(endpoint_id);
1561 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
1566 int ipa_endpoint_config(struct ipa *ipa)
1568 struct device *dev = &ipa->pdev->dev;
1577 /* Find out about the endpoints supplied by the hardware, and ensure
1578 * the highest one doesn't exceed the number we support.
1580 val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET);
1582 /* Our RX is an IPA producer */
1583 rx_base = u32_get_bits(val, BAM_PROD_LOWEST_FMASK);
1584 max = rx_base + u32_get_bits(val, BAM_MAX_PROD_PIPES_FMASK);
1585 if (max > IPA_ENDPOINT_MAX) {
1586 dev_err(dev, "too many endpoints (%u > %u)\n",
1587 max, IPA_ENDPOINT_MAX);
1590 rx_mask = GENMASK(max - 1, rx_base);
1592 /* Our TX is an IPA consumer */
1593 max = u32_get_bits(val, BAM_MAX_CONS_PIPES_FMASK);
1594 tx_mask = GENMASK(max - 1, 0);
1596 ipa->available = rx_mask | tx_mask;
1598 /* Check for initialized endpoints not supported by the hardware */
1599 if (ipa->initialized & ~ipa->available) {
1600 dev_err(dev, "unavailable endpoint id(s) 0x%08x\n",
1601 ipa->initialized & ~ipa->available);
1602 ret = -EINVAL; /* Report other errors too */
1605 initialized = ipa->initialized;
1606 while (initialized) {
1607 u32 endpoint_id = __ffs(initialized);
1608 struct ipa_endpoint *endpoint;
1610 initialized ^= BIT(endpoint_id);
1612 /* Make sure it's pointing in the right direction */
1613 endpoint = &ipa->endpoint[endpoint_id];
1614 if ((endpoint_id < rx_base) != !!endpoint->toward_ipa) {
1615 dev_err(dev, "endpoint id %u wrong direction\n",
1624 void ipa_endpoint_deconfig(struct ipa *ipa)
1626 ipa->available = 0; /* Nothing more to do */
1629 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
1630 const struct ipa_gsi_endpoint_data *data)
1632 struct ipa_endpoint *endpoint;
1634 endpoint = &ipa->endpoint[data->endpoint_id];
1636 if (data->ee_id == GSI_EE_AP)
1637 ipa->channel_map[data->channel_id] = endpoint;
1638 ipa->name_map[name] = endpoint;
1640 endpoint->ipa = ipa;
1641 endpoint->ee_id = data->ee_id;
1642 endpoint->seq_type = data->endpoint.seq_type;
1643 endpoint->channel_id = data->channel_id;
1644 endpoint->endpoint_id = data->endpoint_id;
1645 endpoint->toward_ipa = data->toward_ipa;
1646 endpoint->data = &data->endpoint.config;
1648 ipa->initialized |= BIT(endpoint->endpoint_id);
1651 void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
1653 endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id);
1655 memset(endpoint, 0, sizeof(*endpoint));
1658 void ipa_endpoint_exit(struct ipa *ipa)
1660 u32 initialized = ipa->initialized;
1662 while (initialized) {
1663 u32 endpoint_id = __fls(initialized);
1665 initialized ^= BIT(endpoint_id);
1667 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
1669 memset(ipa->name_map, 0, sizeof(ipa->name_map));
1670 memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
1673 /* Returns a bitmask of endpoints that support filtering, or 0 on error */
1674 u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
1675 const struct ipa_gsi_endpoint_data *data)
1677 enum ipa_endpoint_name name;
1680 if (!ipa_endpoint_data_valid(ipa, count, data))
1681 return 0; /* Error */
1683 ipa->initialized = 0;
1686 for (name = 0; name < count; name++, data++) {
1687 if (ipa_gsi_endpoint_data_empty(data))
1688 continue; /* Skip over empty slots */
1690 ipa_endpoint_init_one(ipa, name, data);
1692 if (data->endpoint.filter_support)
1693 filter_map |= BIT(data->endpoint_id);
1696 if (!ipa_filter_map_valid(ipa, filter_map))
1697 goto err_endpoint_exit;
1699 return filter_map; /* Non-zero bitmask */
1702 ipa_endpoint_exit(ipa);
1704 return 0; /* Error */