2 * Copyright 2015 Amazon.com, Inc. or its affiliates.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include "ena_eth_com.h"
35 static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
36 struct ena_com_io_cq *io_cq)
38 struct ena_eth_io_rx_cdesc_base *cdesc;
39 u16 expected_phase, head_masked;
42 head_masked = io_cq->head & (io_cq->q_depth - 1);
43 expected_phase = io_cq->phase;
45 cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
46 + (head_masked * io_cq->cdesc_entry_size_in_bytes));
48 desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
49 ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
51 if (desc_phase != expected_phase)
54 /* Make sure we read the rest of the descriptor after the phase bit
62 static void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
67 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
69 offset = tail_masked * io_sq->desc_entry_size;
71 return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
74 static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
77 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
82 dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
83 dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
85 if (is_llq_max_tx_burst_exists(io_sq)) {
86 if (unlikely(!io_sq->entries_in_tx_burst_left)) {
87 pr_err("Error: trying to send more packets than tx burst allows\n");
91 io_sq->entries_in_tx_burst_left--;
92 pr_debug("decreasing entries_in_tx_burst_left of queue %d to %d\n",
93 io_sq->qid, io_sq->entries_in_tx_burst_left);
96 /* Make sure everything was written into the bounce buffer before
97 * writing the bounce buffer to the device
101 /* The line is completed. Copy it to dev */
102 __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
103 bounce_buffer, (llq_info->desc_list_entry_size) / 8);
107 /* Switch phase bit in case of wrap around */
108 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
114 static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
118 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
119 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
120 u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
123 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
127 llq_info->descs_num_before_header * io_sq->desc_entry_size;
129 if (unlikely((header_offset + header_len) >
130 llq_info->desc_list_entry_size)) {
131 pr_err("trying to write header larger than llq entry can accommodate\n");
135 if (unlikely(!bounce_buffer)) {
136 pr_err("bounce buffer is NULL\n");
140 memcpy(bounce_buffer + header_offset, header_src, header_len);
145 static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
147 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
151 bounce_buffer = pkt_ctrl->curr_bounce_buf;
153 if (unlikely(!bounce_buffer)) {
154 pr_err("bounce buffer is NULL\n");
158 sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
160 pkt_ctrl->descs_left_in_line--;
165 static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
167 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
168 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
171 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
174 /* bounce buffer was used, so write it and get a new one */
176 rc = ena_com_write_bounce_buffer_to_dev(io_sq,
177 pkt_ctrl->curr_bounce_buf);
181 pkt_ctrl->curr_bounce_buf =
182 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
183 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
184 0x0, llq_info->desc_list_entry_size);
188 pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
192 static void *get_sq_desc(struct ena_com_io_sq *io_sq)
194 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
195 return get_sq_desc_llq(io_sq);
197 return get_sq_desc_regular_queue(io_sq);
200 static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
202 struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
203 struct ena_com_llq_info *llq_info = &io_sq->llq_info;
206 if (!pkt_ctrl->descs_left_in_line) {
207 rc = ena_com_write_bounce_buffer_to_dev(io_sq,
208 pkt_ctrl->curr_bounce_buf);
212 pkt_ctrl->curr_bounce_buf =
213 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
214 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
215 0x0, llq_info->desc_list_entry_size);
218 if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY))
219 pkt_ctrl->descs_left_in_line = 1;
221 pkt_ctrl->descs_left_in_line =
222 llq_info->desc_list_entry_size / io_sq->desc_entry_size;
228 static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
230 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
231 return ena_com_sq_update_llq_tail(io_sq);
235 /* Switch phase bit in case of wrap around */
236 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
242 static struct ena_eth_io_rx_cdesc_base *
243 ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
245 idx &= (io_cq->q_depth - 1);
246 return (struct ena_eth_io_rx_cdesc_base *)
247 ((uintptr_t)io_cq->cdesc_addr.virt_addr +
248 idx * io_cq->cdesc_entry_size_in_bytes);
251 static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
252 u16 *first_cdesc_idx)
254 struct ena_eth_io_rx_cdesc_base *cdesc;
255 u16 count = 0, head_masked;
259 cdesc = ena_com_get_next_rx_cdesc(io_cq);
263 ena_com_cq_inc_head(io_cq);
265 last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
266 ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
270 *first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
271 count += io_cq->cur_rx_pkt_cdesc_count;
273 head_masked = io_cq->head & (io_cq->q_depth - 1);
275 io_cq->cur_rx_pkt_cdesc_count = 0;
276 io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
278 pr_debug("ena q_id: %d packets were completed. first desc idx %u descs# %d\n",
279 io_cq->qid, *first_cdesc_idx, count);
281 io_cq->cur_rx_pkt_cdesc_count += count;
288 static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
289 struct ena_com_tx_ctx *ena_tx_ctx)
291 struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
292 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
294 meta_desc = get_sq_desc(io_sq);
295 memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
297 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
299 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
301 /* bits 0-9 of the mss */
302 meta_desc->word2 |= (ena_meta->mss <<
303 ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
304 ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
305 /* bits 10-13 of the mss */
306 meta_desc->len_ctrl |= ((ena_meta->mss >> 10) <<
307 ENA_ETH_IO_TX_META_DESC_MSS_HI_SHIFT) &
308 ENA_ETH_IO_TX_META_DESC_MSS_HI_MASK;
310 /* Extended meta desc */
311 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
312 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
313 meta_desc->len_ctrl |= (io_sq->phase <<
314 ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
315 ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
317 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
318 meta_desc->word2 |= ena_meta->l3_hdr_len &
319 ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
320 meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
321 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
322 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
324 meta_desc->word2 |= (ena_meta->l4_hdr_len <<
325 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
326 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
328 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
330 /* Cached the meta desc */
331 memcpy(&io_sq->cached_tx_meta, ena_meta,
332 sizeof(struct ena_com_tx_meta));
334 return ena_com_sq_update_tail(io_sq);
337 static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
338 struct ena_eth_io_rx_cdesc_base *cdesc)
340 ena_rx_ctx->l3_proto = cdesc->status &
341 ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
342 ena_rx_ctx->l4_proto =
343 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
344 ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
345 ena_rx_ctx->l3_csum_err =
346 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
347 ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT);
348 ena_rx_ctx->l4_csum_err =
349 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
350 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT);
351 ena_rx_ctx->l4_csum_checked =
352 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >>
353 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT);
354 ena_rx_ctx->hash = cdesc->hash;
356 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
357 ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
359 pr_debug("ena_rx_ctx->l3_proto %d ena_rx_ctx->l4_proto %d\nena_rx_ctx->l3_csum_err %d ena_rx_ctx->l4_csum_err %d\nhash frag %d frag: %d cdesc_status: %x\n",
360 ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto,
361 ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err,
362 ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
365 /*****************************************************************************/
366 /***************************** API **********************************/
367 /*****************************************************************************/
369 int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
370 struct ena_com_tx_ctx *ena_tx_ctx,
373 struct ena_eth_io_tx_desc *desc = NULL;
374 struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
375 void *buffer_to_push = ena_tx_ctx->push_header;
376 u16 header_len = ena_tx_ctx->header_len;
377 u16 num_bufs = ena_tx_ctx->num_bufs;
378 u16 start_tail = io_sq->tail;
383 WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type");
385 /* num_bufs +1 for potential meta desc */
386 if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
387 pr_debug("Not enough space in the tx queue\n");
391 if (unlikely(header_len > io_sq->tx_max_header_size)) {
392 pr_err("header size is too large %d max header: %d\n",
393 header_len, io_sq->tx_max_header_size);
397 if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
401 rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
405 have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq,
408 rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx);
413 /* If the caller doesn't want to send packets */
414 if (unlikely(!num_bufs && !header_len)) {
415 rc = ena_com_close_bounce_buffer(io_sq);
416 *nb_hw_desc = io_sq->tail - start_tail;
420 desc = get_sq_desc(io_sq);
423 memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
425 /* Set first desc when we don't have meta descriptor */
427 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
429 desc->buff_addr_hi_hdr_sz |= (header_len <<
430 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
431 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
432 desc->len_ctrl |= (io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
433 ENA_ETH_IO_TX_DESC_PHASE_MASK;
435 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
438 desc->meta_ctrl |= (ena_tx_ctx->req_id <<
439 ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
440 ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
442 desc->meta_ctrl |= (ena_tx_ctx->df <<
443 ENA_ETH_IO_TX_DESC_DF_SHIFT) &
444 ENA_ETH_IO_TX_DESC_DF_MASK;
447 desc->len_ctrl |= ((ena_tx_ctx->req_id >> 10) <<
448 ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT) &
449 ENA_ETH_IO_TX_DESC_REQ_ID_HI_MASK;
451 if (ena_tx_ctx->meta_valid) {
452 desc->meta_ctrl |= (ena_tx_ctx->tso_enable <<
453 ENA_ETH_IO_TX_DESC_TSO_EN_SHIFT) &
454 ENA_ETH_IO_TX_DESC_TSO_EN_MASK;
455 desc->meta_ctrl |= ena_tx_ctx->l3_proto &
456 ENA_ETH_IO_TX_DESC_L3_PROTO_IDX_MASK;
457 desc->meta_ctrl |= (ena_tx_ctx->l4_proto <<
458 ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_SHIFT) &
459 ENA_ETH_IO_TX_DESC_L4_PROTO_IDX_MASK;
460 desc->meta_ctrl |= (ena_tx_ctx->l3_csum_enable <<
461 ENA_ETH_IO_TX_DESC_L3_CSUM_EN_SHIFT) &
462 ENA_ETH_IO_TX_DESC_L3_CSUM_EN_MASK;
463 desc->meta_ctrl |= (ena_tx_ctx->l4_csum_enable <<
464 ENA_ETH_IO_TX_DESC_L4_CSUM_EN_SHIFT) &
465 ENA_ETH_IO_TX_DESC_L4_CSUM_EN_MASK;
466 desc->meta_ctrl |= (ena_tx_ctx->l4_csum_partial <<
467 ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT) &
468 ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK;
471 for (i = 0; i < num_bufs; i++) {
472 /* The first desc share the same desc as the header */
473 if (likely(i != 0)) {
474 rc = ena_com_sq_update_tail(io_sq);
478 desc = get_sq_desc(io_sq);
482 memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
484 desc->len_ctrl |= (io_sq->phase <<
485 ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
486 ENA_ETH_IO_TX_DESC_PHASE_MASK;
489 desc->len_ctrl |= ena_bufs->len &
490 ENA_ETH_IO_TX_DESC_LENGTH_MASK;
492 addr_hi = ((ena_bufs->paddr &
493 GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
495 desc->buff_addr_lo = (u32)ena_bufs->paddr;
496 desc->buff_addr_hi_hdr_sz |= addr_hi &
497 ENA_ETH_IO_TX_DESC_ADDR_HI_MASK;
501 /* set the last desc indicator */
502 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
504 rc = ena_com_sq_update_tail(io_sq);
508 rc = ena_com_close_bounce_buffer(io_sq);
510 *nb_hw_desc = io_sq->tail - start_tail;
514 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
515 struct ena_com_io_sq *io_sq,
516 struct ena_com_rx_ctx *ena_rx_ctx)
518 struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
519 struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
524 WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
526 nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
527 if (nb_hw_desc == 0) {
528 ena_rx_ctx->descs = nb_hw_desc;
532 pr_debug("fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
535 if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
536 pr_err("Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc,
537 ena_rx_ctx->max_bufs);
541 for (i = 0; i < nb_hw_desc; i++) {
542 cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
544 ena_buf->len = cdesc->length;
545 ena_buf->req_id = cdesc->req_id;
549 /* Update SQ head ptr */
550 io_sq->next_to_comp += nb_hw_desc;
552 pr_debug("[%s][QID#%d] Updating SQ head to: %d\n", __func__, io_sq->qid,
553 io_sq->next_to_comp);
555 /* Get rx flags from the last pkt */
556 ena_com_rx_set_flags(ena_rx_ctx, cdesc);
558 ena_rx_ctx->descs = nb_hw_desc;
562 int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
563 struct ena_com_buf *ena_buf,
566 struct ena_eth_io_rx_desc *desc;
568 WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
570 if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
573 desc = get_sq_desc(io_sq);
577 memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
579 desc->length = ena_buf->len;
581 desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK;
582 desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK;
583 desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK;
584 desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
586 desc->req_id = req_id;
588 desc->buff_addr_lo = (u32)ena_buf->paddr;
590 ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
592 return ena_com_sq_update_tail(io_sq);
595 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
597 struct ena_eth_io_rx_cdesc_base *cdesc;
599 cdesc = ena_com_get_next_rx_cdesc(io_cq);