1 // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
3 * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
7 #include "efa_regs_defs.h"
9 #define ADMIN_CMD_TIMEOUT_US 30000000 /* usecs */
11 #define EFA_REG_READ_TIMEOUT_US 50000 /* usecs */
12 #define EFA_MMIO_READ_INVALID 0xffffffff
14 #define EFA_POLL_INTERVAL_MS 100 /* msecs */
16 #define EFA_ASYNC_QUEUE_DEPTH 16
17 #define EFA_ADMIN_QUEUE_DEPTH 32
20 ((EFA_ADMIN_API_VERSION_MAJOR << EFA_REGS_VERSION_MAJOR_VERSION_SHIFT) | \
21 (EFA_ADMIN_API_VERSION_MINOR & EFA_REGS_VERSION_MINOR_VERSION_MASK))
23 #define EFA_CTRL_MAJOR 0
24 #define EFA_CTRL_MINOR 0
25 #define EFA_CTRL_SUB_MINOR 1
27 #define MIN_EFA_CTRL_VER \
28 (((EFA_CTRL_MAJOR) << \
29 (EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
30 ((EFA_CTRL_MINOR) << \
31 (EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
34 #define EFA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
35 #define EFA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
37 #define EFA_REGS_ADMIN_INTR_MASK 1
45 struct completion wait_event;
46 struct efa_admin_acq_entry *user_cqe;
48 enum efa_cmd_status status;
49 /* status from the device */
55 static const char *efa_com_cmd_str(u8 cmd)
57 #define EFA_CMD_STR_CASE(_cmd) case EFA_ADMIN_##_cmd: return #_cmd
60 EFA_CMD_STR_CASE(CREATE_QP);
61 EFA_CMD_STR_CASE(MODIFY_QP);
62 EFA_CMD_STR_CASE(QUERY_QP);
63 EFA_CMD_STR_CASE(DESTROY_QP);
64 EFA_CMD_STR_CASE(CREATE_AH);
65 EFA_CMD_STR_CASE(DESTROY_AH);
66 EFA_CMD_STR_CASE(REG_MR);
67 EFA_CMD_STR_CASE(DEREG_MR);
68 EFA_CMD_STR_CASE(CREATE_CQ);
69 EFA_CMD_STR_CASE(DESTROY_CQ);
70 EFA_CMD_STR_CASE(GET_FEATURE);
71 EFA_CMD_STR_CASE(SET_FEATURE);
72 EFA_CMD_STR_CASE(GET_STATS);
73 EFA_CMD_STR_CASE(ALLOC_PD);
74 EFA_CMD_STR_CASE(DEALLOC_PD);
75 EFA_CMD_STR_CASE(ALLOC_UAR);
76 EFA_CMD_STR_CASE(DEALLOC_UAR);
77 default: return "unknown command opcode";
79 #undef EFA_CMD_STR_CASE
82 static u32 efa_com_reg_read32(struct efa_com_dev *edev, u16 offset)
84 struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
85 struct efa_admin_mmio_req_read_less_resp *read_resp;
86 unsigned long exp_time;
90 read_resp = mmio_read->read_resp;
92 spin_lock(&mmio_read->lock);
95 /* trash DMA req_id to identify when hardware is done */
96 read_resp->req_id = mmio_read->seq_num + 0x9aL;
97 mmio_read_reg = (offset << EFA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
98 EFA_REGS_MMIO_REG_READ_REG_OFF_MASK;
99 mmio_read_reg |= mmio_read->seq_num &
100 EFA_REGS_MMIO_REG_READ_REQ_ID_MASK;
102 writel(mmio_read_reg, edev->reg_bar + EFA_REGS_MMIO_REG_READ_OFF);
104 exp_time = jiffies + usecs_to_jiffies(mmio_read->mmio_read_timeout);
106 if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
109 } while (time_is_after_jiffies(exp_time));
111 if (read_resp->req_id != mmio_read->seq_num) {
112 ibdev_err(edev->efa_dev,
113 "Reading register timed out. expected: req id[%u] offset[%#x] actual: req id[%u] offset[%#x]\n",
114 mmio_read->seq_num, offset, read_resp->req_id,
116 err = EFA_MMIO_READ_INVALID;
120 if (read_resp->reg_off != offset) {
121 ibdev_err(edev->efa_dev,
122 "Reading register failed: wrong offset provided\n");
123 err = EFA_MMIO_READ_INVALID;
127 err = read_resp->reg_val;
129 spin_unlock(&mmio_read->lock);
133 static int efa_com_admin_init_sq(struct efa_com_dev *edev)
135 struct efa_com_admin_queue *aq = &edev->aq;
136 struct efa_com_admin_sq *sq = &aq->sq;
137 u16 size = aq->depth * sizeof(*sq->entries);
143 dma_alloc_coherent(aq->dmadev, size, &sq->dma_addr, GFP_KERNEL);
147 spin_lock_init(&sq->lock);
153 sq->db_addr = (u32 __iomem *)(edev->reg_bar + EFA_REGS_AQ_PROD_DB_OFF);
155 addr_high = EFA_DMA_ADDR_TO_UINT32_HIGH(sq->dma_addr);
156 addr_low = EFA_DMA_ADDR_TO_UINT32_LOW(sq->dma_addr);
158 writel(addr_low, edev->reg_bar + EFA_REGS_AQ_BASE_LO_OFF);
159 writel(addr_high, edev->reg_bar + EFA_REGS_AQ_BASE_HI_OFF);
161 aq_caps = aq->depth & EFA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
162 aq_caps |= (sizeof(struct efa_admin_aq_entry) <<
163 EFA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
164 EFA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
166 writel(aq_caps, edev->reg_bar + EFA_REGS_AQ_CAPS_OFF);
171 static int efa_com_admin_init_cq(struct efa_com_dev *edev)
173 struct efa_com_admin_queue *aq = &edev->aq;
174 struct efa_com_admin_cq *cq = &aq->cq;
175 u16 size = aq->depth * sizeof(*cq->entries);
181 dma_alloc_coherent(aq->dmadev, size, &cq->dma_addr, GFP_KERNEL);
185 spin_lock_init(&cq->lock);
190 addr_high = EFA_DMA_ADDR_TO_UINT32_HIGH(cq->dma_addr);
191 addr_low = EFA_DMA_ADDR_TO_UINT32_LOW(cq->dma_addr);
193 writel(addr_low, edev->reg_bar + EFA_REGS_ACQ_BASE_LO_OFF);
194 writel(addr_high, edev->reg_bar + EFA_REGS_ACQ_BASE_HI_OFF);
196 acq_caps = aq->depth & EFA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
197 acq_caps |= (sizeof(struct efa_admin_acq_entry) <<
198 EFA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
199 EFA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
200 acq_caps |= (aq->msix_vector_idx <<
201 EFA_REGS_ACQ_CAPS_ACQ_MSIX_VECTOR_SHIFT) &
202 EFA_REGS_ACQ_CAPS_ACQ_MSIX_VECTOR_MASK;
204 writel(acq_caps, edev->reg_bar + EFA_REGS_ACQ_CAPS_OFF);
209 static int efa_com_admin_init_aenq(struct efa_com_dev *edev,
210 struct efa_aenq_handlers *aenq_handlers)
212 struct efa_com_aenq *aenq = &edev->aenq;
213 u32 addr_low, addr_high, aenq_caps;
216 if (!aenq_handlers) {
217 ibdev_err(edev->efa_dev, "aenq handlers pointer is NULL\n");
221 size = EFA_ASYNC_QUEUE_DEPTH * sizeof(*aenq->entries);
222 aenq->entries = dma_alloc_coherent(edev->dmadev, size, &aenq->dma_addr,
227 aenq->aenq_handlers = aenq_handlers;
228 aenq->depth = EFA_ASYNC_QUEUE_DEPTH;
232 addr_low = EFA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
233 addr_high = EFA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
235 writel(addr_low, edev->reg_bar + EFA_REGS_AENQ_BASE_LO_OFF);
236 writel(addr_high, edev->reg_bar + EFA_REGS_AENQ_BASE_HI_OFF);
238 aenq_caps = aenq->depth & EFA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
239 aenq_caps |= (sizeof(struct efa_admin_aenq_entry) <<
240 EFA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
241 EFA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
242 aenq_caps |= (aenq->msix_vector_idx
243 << EFA_REGS_AENQ_CAPS_AENQ_MSIX_VECTOR_SHIFT) &
244 EFA_REGS_AENQ_CAPS_AENQ_MSIX_VECTOR_MASK;
245 writel(aenq_caps, edev->reg_bar + EFA_REGS_AENQ_CAPS_OFF);
248 * Init cons_db to mark that all entries in the queue
249 * are initially available
251 writel(edev->aenq.cc, edev->reg_bar + EFA_REGS_AENQ_CONS_DB_OFF);
256 /* ID to be used with efa_com_get_comp_ctx */
257 static u16 efa_com_alloc_ctx_id(struct efa_com_admin_queue *aq)
261 spin_lock(&aq->comp_ctx_lock);
262 ctx_id = aq->comp_ctx_pool[aq->comp_ctx_pool_next];
263 aq->comp_ctx_pool_next++;
264 spin_unlock(&aq->comp_ctx_lock);
269 static void efa_com_dealloc_ctx_id(struct efa_com_admin_queue *aq,
272 spin_lock(&aq->comp_ctx_lock);
273 aq->comp_ctx_pool_next--;
274 aq->comp_ctx_pool[aq->comp_ctx_pool_next] = ctx_id;
275 spin_unlock(&aq->comp_ctx_lock);
278 static inline void efa_com_put_comp_ctx(struct efa_com_admin_queue *aq,
279 struct efa_comp_ctx *comp_ctx)
281 u16 cmd_id = comp_ctx->user_cqe->acq_common_descriptor.command &
282 EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
283 u16 ctx_id = cmd_id & (aq->depth - 1);
285 ibdev_dbg(aq->efa_dev, "Put completion command_id %#x\n", cmd_id);
286 comp_ctx->occupied = 0;
287 efa_com_dealloc_ctx_id(aq, ctx_id);
290 static struct efa_comp_ctx *efa_com_get_comp_ctx(struct efa_com_admin_queue *aq,
291 u16 cmd_id, bool capture)
293 u16 ctx_id = cmd_id & (aq->depth - 1);
295 if (aq->comp_ctx[ctx_id].occupied && capture) {
296 ibdev_err(aq->efa_dev,
297 "Completion context for command_id %#x is occupied\n",
303 aq->comp_ctx[ctx_id].occupied = 1;
304 ibdev_dbg(aq->efa_dev,
305 "Take completion ctxt for command_id %#x\n", cmd_id);
308 return &aq->comp_ctx[ctx_id];
311 static struct efa_comp_ctx *__efa_com_submit_admin_cmd(struct efa_com_admin_queue *aq,
312 struct efa_admin_aq_entry *cmd,
313 size_t cmd_size_in_bytes,
314 struct efa_admin_acq_entry *comp,
315 size_t comp_size_in_bytes)
317 struct efa_comp_ctx *comp_ctx;
323 queue_size_mask = aq->depth - 1;
324 pi = aq->sq.pc & queue_size_mask;
326 ctx_id = efa_com_alloc_ctx_id(aq);
328 /* cmd_id LSBs are the ctx_id and MSBs are entropy bits from pc */
329 cmd_id = ctx_id & queue_size_mask;
330 cmd_id |= aq->sq.pc & ~queue_size_mask;
331 cmd_id &= EFA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
333 cmd->aq_common_descriptor.command_id = cmd_id;
334 cmd->aq_common_descriptor.flags |= aq->sq.phase &
335 EFA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
337 comp_ctx = efa_com_get_comp_ctx(aq, cmd_id, true);
339 efa_com_dealloc_ctx_id(aq, ctx_id);
340 return ERR_PTR(-EINVAL);
343 comp_ctx->status = EFA_CMD_SUBMITTED;
344 comp_ctx->comp_size = comp_size_in_bytes;
345 comp_ctx->user_cqe = comp;
346 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
348 reinit_completion(&comp_ctx->wait_event);
350 memcpy(&aq->sq.entries[pi], cmd, cmd_size_in_bytes);
353 atomic64_inc(&aq->stats.submitted_cmd);
355 if ((aq->sq.pc & queue_size_mask) == 0)
356 aq->sq.phase = !aq->sq.phase;
358 /* barrier not needed in case of writel */
359 writel(aq->sq.pc, aq->sq.db_addr);
364 static inline int efa_com_init_comp_ctxt(struct efa_com_admin_queue *aq)
366 size_t pool_size = aq->depth * sizeof(*aq->comp_ctx_pool);
367 size_t size = aq->depth * sizeof(struct efa_comp_ctx);
368 struct efa_comp_ctx *comp_ctx;
371 aq->comp_ctx = devm_kzalloc(aq->dmadev, size, GFP_KERNEL);
372 aq->comp_ctx_pool = devm_kzalloc(aq->dmadev, pool_size, GFP_KERNEL);
373 if (!aq->comp_ctx || !aq->comp_ctx_pool) {
374 devm_kfree(aq->dmadev, aq->comp_ctx_pool);
375 devm_kfree(aq->dmadev, aq->comp_ctx);
379 for (i = 0; i < aq->depth; i++) {
380 comp_ctx = efa_com_get_comp_ctx(aq, i, false);
382 init_completion(&comp_ctx->wait_event);
384 aq->comp_ctx_pool[i] = i;
387 spin_lock_init(&aq->comp_ctx_lock);
389 aq->comp_ctx_pool_next = 0;
394 static struct efa_comp_ctx *efa_com_submit_admin_cmd(struct efa_com_admin_queue *aq,
395 struct efa_admin_aq_entry *cmd,
396 size_t cmd_size_in_bytes,
397 struct efa_admin_acq_entry *comp,
398 size_t comp_size_in_bytes)
400 struct efa_comp_ctx *comp_ctx;
402 spin_lock(&aq->sq.lock);
403 if (!test_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state)) {
404 ibdev_err(aq->efa_dev, "Admin queue is closed\n");
405 spin_unlock(&aq->sq.lock);
406 return ERR_PTR(-ENODEV);
409 comp_ctx = __efa_com_submit_admin_cmd(aq, cmd, cmd_size_in_bytes, comp,
411 spin_unlock(&aq->sq.lock);
412 if (IS_ERR(comp_ctx))
413 clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
418 static void efa_com_handle_single_admin_completion(struct efa_com_admin_queue *aq,
419 struct efa_admin_acq_entry *cqe)
421 struct efa_comp_ctx *comp_ctx;
424 cmd_id = cqe->acq_common_descriptor.command &
425 EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
427 comp_ctx = efa_com_get_comp_ctx(aq, cmd_id, false);
429 ibdev_err(aq->efa_dev,
430 "comp_ctx is NULL. Changing the admin queue running state\n");
431 clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
435 comp_ctx->status = EFA_CMD_COMPLETED;
436 comp_ctx->comp_status = cqe->acq_common_descriptor.status;
437 if (comp_ctx->user_cqe)
438 memcpy(comp_ctx->user_cqe, cqe, comp_ctx->comp_size);
440 if (!test_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state))
441 complete(&comp_ctx->wait_event);
444 static void efa_com_handle_admin_completion(struct efa_com_admin_queue *aq)
446 struct efa_admin_acq_entry *cqe;
452 queue_size_mask = aq->depth - 1;
454 ci = aq->cq.cc & queue_size_mask;
455 phase = aq->cq.phase;
457 cqe = &aq->cq.entries[ci];
459 /* Go over all the completions */
460 while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
461 EFA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
463 * Do not read the rest of the completion entry before the
464 * phase bit was validated
467 efa_com_handle_single_admin_completion(aq, cqe);
471 if (ci == aq->depth) {
476 cqe = &aq->cq.entries[ci];
479 aq->cq.cc += comp_num;
480 aq->cq.phase = phase;
481 aq->sq.cc += comp_num;
482 atomic64_add(comp_num, &aq->stats.completed_cmd);
485 static int efa_com_comp_status_to_errno(u8 comp_status)
487 switch (comp_status) {
488 case EFA_ADMIN_SUCCESS:
490 case EFA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
492 case EFA_ADMIN_UNSUPPORTED_OPCODE:
494 case EFA_ADMIN_BAD_OPCODE:
495 case EFA_ADMIN_MALFORMED_REQUEST:
496 case EFA_ADMIN_ILLEGAL_PARAMETER:
497 case EFA_ADMIN_UNKNOWN_ERROR:
504 static int efa_com_wait_and_process_admin_cq_polling(struct efa_comp_ctx *comp_ctx,
505 struct efa_com_admin_queue *aq)
507 unsigned long timeout;
511 timeout = jiffies + usecs_to_jiffies(aq->completion_timeout);
514 spin_lock_irqsave(&aq->cq.lock, flags);
515 efa_com_handle_admin_completion(aq);
516 spin_unlock_irqrestore(&aq->cq.lock, flags);
518 if (comp_ctx->status != EFA_CMD_SUBMITTED)
521 if (time_is_before_jiffies(timeout)) {
522 ibdev_err(aq->efa_dev,
523 "Wait for completion (polling) timeout\n");
524 /* EFA didn't have any completion */
525 atomic64_inc(&aq->stats.no_completion);
527 clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
532 msleep(aq->poll_interval);
535 err = efa_com_comp_status_to_errno(comp_ctx->comp_status);
537 efa_com_put_comp_ctx(aq, comp_ctx);
541 static int efa_com_wait_and_process_admin_cq_interrupts(struct efa_comp_ctx *comp_ctx,
542 struct efa_com_admin_queue *aq)
547 wait_for_completion_timeout(&comp_ctx->wait_event,
548 usecs_to_jiffies(aq->completion_timeout));
551 * In case the command wasn't completed find out the root cause.
552 * There might be 2 kinds of errors
553 * 1) No completion (timeout reached)
554 * 2) There is completion but the device didn't get any msi-x interrupt.
556 if (comp_ctx->status == EFA_CMD_SUBMITTED) {
557 spin_lock_irqsave(&aq->cq.lock, flags);
558 efa_com_handle_admin_completion(aq);
559 spin_unlock_irqrestore(&aq->cq.lock, flags);
561 atomic64_inc(&aq->stats.no_completion);
563 if (comp_ctx->status == EFA_CMD_COMPLETED)
564 ibdev_err(aq->efa_dev,
565 "The device sent a completion but the driver didn't receive any MSI-X interrupt for admin cmd %s(%d) status %d (ctx: 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
566 efa_com_cmd_str(comp_ctx->cmd_opcode),
567 comp_ctx->cmd_opcode, comp_ctx->status,
568 comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc);
570 ibdev_err(aq->efa_dev,
571 "The device didn't send any completion for admin cmd %s(%d) status %d (ctx 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
572 efa_com_cmd_str(comp_ctx->cmd_opcode),
573 comp_ctx->cmd_opcode, comp_ctx->status,
574 comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc);
576 clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
581 err = efa_com_comp_status_to_errno(comp_ctx->comp_status);
583 efa_com_put_comp_ctx(aq, comp_ctx);
588 * There are two types to wait for completion.
589 * Polling mode - wait until the completion is available.
590 * Async mode - wait on wait queue until the completion is ready
591 * (or the timeout expired).
592 * It is expected that the IRQ called efa_com_handle_admin_completion
593 * to mark the completions.
595 static int efa_com_wait_and_process_admin_cq(struct efa_comp_ctx *comp_ctx,
596 struct efa_com_admin_queue *aq)
598 if (test_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state))
599 return efa_com_wait_and_process_admin_cq_polling(comp_ctx, aq);
601 return efa_com_wait_and_process_admin_cq_interrupts(comp_ctx, aq);
605 * efa_com_cmd_exec - Execute admin command
607 * @cmd: the admin command to execute.
608 * @cmd_size: the command size.
609 * @comp: command completion return entry.
610 * @comp_size: command completion size.
611 * Submit an admin command and then wait until the device will return a
613 * The completion will be copied into comp.
615 * @return - 0 on success, negative value on failure.
617 int efa_com_cmd_exec(struct efa_com_admin_queue *aq,
618 struct efa_admin_aq_entry *cmd,
620 struct efa_admin_acq_entry *comp,
623 struct efa_comp_ctx *comp_ctx;
628 /* In case of queue FULL */
629 down(&aq->avail_cmds);
631 ibdev_dbg(aq->efa_dev, "%s (opcode %d)\n",
632 efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
633 cmd->aq_common_descriptor.opcode);
634 comp_ctx = efa_com_submit_admin_cmd(aq, cmd, cmd_size, comp, comp_size);
635 if (IS_ERR(comp_ctx)) {
636 ibdev_err(aq->efa_dev,
637 "Failed to submit command %s (opcode %u) err %ld\n",
638 efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
639 cmd->aq_common_descriptor.opcode, PTR_ERR(comp_ctx));
642 return PTR_ERR(comp_ctx);
645 err = efa_com_wait_and_process_admin_cq(comp_ctx, aq);
647 ibdev_err(aq->efa_dev,
648 "Failed to process command %s (opcode %u) comp_status %d err %d\n",
649 efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
650 cmd->aq_common_descriptor.opcode,
651 comp_ctx->comp_status, err);
659 * efa_com_admin_destroy - Destroy the admin and the async events queues.
660 * @edev: EFA communication layer struct
662 void efa_com_admin_destroy(struct efa_com_dev *edev)
664 struct efa_com_admin_queue *aq = &edev->aq;
665 struct efa_com_aenq *aenq = &edev->aenq;
666 struct efa_com_admin_cq *cq = &aq->cq;
667 struct efa_com_admin_sq *sq = &aq->sq;
670 clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
672 devm_kfree(edev->dmadev, aq->comp_ctx_pool);
673 devm_kfree(edev->dmadev, aq->comp_ctx);
675 size = aq->depth * sizeof(*sq->entries);
676 dma_free_coherent(edev->dmadev, size, sq->entries, sq->dma_addr);
678 size = aq->depth * sizeof(*cq->entries);
679 dma_free_coherent(edev->dmadev, size, cq->entries, cq->dma_addr);
681 size = aenq->depth * sizeof(*aenq->entries);
682 dma_free_coherent(edev->dmadev, size, aenq->entries, aenq->dma_addr);
686 * efa_com_set_admin_polling_mode - Set the admin completion queue polling mode
687 * @edev: EFA communication layer struct
688 * @polling: Enable/Disable polling mode
690 * Set the admin completion mode.
692 void efa_com_set_admin_polling_mode(struct efa_com_dev *edev, bool polling)
697 mask_value = EFA_REGS_ADMIN_INTR_MASK;
699 writel(mask_value, edev->reg_bar + EFA_REGS_INTR_MASK_OFF);
701 set_bit(EFA_AQ_STATE_POLLING_BIT, &edev->aq.state);
703 clear_bit(EFA_AQ_STATE_POLLING_BIT, &edev->aq.state);
706 static void efa_com_stats_init(struct efa_com_dev *edev)
708 atomic64_t *s = (atomic64_t *)&edev->aq.stats;
711 for (i = 0; i < sizeof(edev->aq.stats) / sizeof(*s); i++, s++)
716 * efa_com_admin_init - Init the admin and the async queues
717 * @edev: EFA communication layer struct
718 * @aenq_handlers: Those handlers to be called upon event.
720 * Initialize the admin submission and completion queues.
721 * Initialize the asynchronous events notification queues.
723 * @return - 0 on success, negative value on failure.
725 int efa_com_admin_init(struct efa_com_dev *edev,
726 struct efa_aenq_handlers *aenq_handlers)
728 struct efa_com_admin_queue *aq = &edev->aq;
734 dev_sts = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF);
735 if (!(dev_sts & EFA_REGS_DEV_STS_READY_MASK)) {
736 ibdev_err(edev->efa_dev,
737 "Device isn't ready, abort com init %#x\n", dev_sts);
741 aq->depth = EFA_ADMIN_QUEUE_DEPTH;
743 aq->dmadev = edev->dmadev;
744 aq->efa_dev = edev->efa_dev;
745 set_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state);
747 sema_init(&aq->avail_cmds, aq->depth);
749 efa_com_stats_init(edev);
751 err = efa_com_init_comp_ctxt(aq);
755 err = efa_com_admin_init_sq(edev);
757 goto err_destroy_comp_ctxt;
759 err = efa_com_admin_init_cq(edev);
763 efa_com_set_admin_polling_mode(edev, false);
765 err = efa_com_admin_init_aenq(edev, aenq_handlers);
769 cap = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF);
770 timeout = (cap & EFA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
771 EFA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
773 /* the resolution of timeout reg is 100ms */
774 aq->completion_timeout = timeout * 100000;
776 aq->completion_timeout = ADMIN_CMD_TIMEOUT_US;
778 aq->poll_interval = EFA_POLL_INTERVAL_MS;
780 set_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
785 dma_free_coherent(edev->dmadev, aq->depth * sizeof(*aq->cq.entries),
786 aq->cq.entries, aq->cq.dma_addr);
788 dma_free_coherent(edev->dmadev, aq->depth * sizeof(*aq->sq.entries),
789 aq->sq.entries, aq->sq.dma_addr);
790 err_destroy_comp_ctxt:
791 devm_kfree(edev->dmadev, aq->comp_ctx);
797 * efa_com_admin_q_comp_intr_handler - admin queue interrupt handler
798 * @edev: EFA communication layer struct
800 * This method goes over the admin completion queue and wakes up
801 * all the pending threads that wait on the commands wait event.
803 * @note: Should be called after MSI-X interrupt.
805 void efa_com_admin_q_comp_intr_handler(struct efa_com_dev *edev)
809 spin_lock_irqsave(&edev->aq.cq.lock, flags);
810 efa_com_handle_admin_completion(&edev->aq);
811 spin_unlock_irqrestore(&edev->aq.cq.lock, flags);
815 * efa_handle_specific_aenq_event:
816 * return the handler that is relevant to the specific event group
818 static efa_aenq_handler efa_com_get_specific_aenq_cb(struct efa_com_dev *edev,
821 struct efa_aenq_handlers *aenq_handlers = edev->aenq.aenq_handlers;
823 if (group < EFA_MAX_HANDLERS && aenq_handlers->handlers[group])
824 return aenq_handlers->handlers[group];
826 return aenq_handlers->unimplemented_handler;
830 * efa_com_aenq_intr_handler - AENQ interrupt handler
831 * @edev: EFA communication layer struct
832 * @data: Data of interrupt handler.
834 * Go over the async event notification queue and call the proper aenq handler.
836 void efa_com_aenq_intr_handler(struct efa_com_dev *edev, void *data)
838 struct efa_admin_aenq_common_desc *aenq_common;
839 struct efa_com_aenq *aenq = &edev->aenq;
840 struct efa_admin_aenq_entry *aenq_e;
841 efa_aenq_handler handler_cb;
846 ci = aenq->cc & (aenq->depth - 1);
848 aenq_e = &aenq->entries[ci]; /* Get first entry */
849 aenq_common = &aenq_e->aenq_common_desc;
851 /* Go over all the events */
852 while ((READ_ONCE(aenq_common->flags) &
853 EFA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
855 * Do not read the rest of the completion entry before the
856 * phase bit was validated
860 /* Handle specific event*/
861 handler_cb = efa_com_get_specific_aenq_cb(edev,
863 handler_cb(data, aenq_e); /* call the actual event handler*/
865 /* Get next event entry */
869 if (ci == aenq->depth) {
873 aenq_e = &aenq->entries[ci];
874 aenq_common = &aenq_e->aenq_common_desc;
877 aenq->cc += processed;
880 /* Don't update aenq doorbell if there weren't any processed events */
884 /* barrier not needed in case of writel */
885 writel(aenq->cc, edev->reg_bar + EFA_REGS_AENQ_CONS_DB_OFF);
888 static void efa_com_mmio_reg_read_resp_addr_init(struct efa_com_dev *edev)
890 struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
894 /* dma_addr_bits is unknown at this point */
895 addr_high = (mmio_read->read_resp_dma_addr >> 32) & GENMASK(31, 0);
896 addr_low = mmio_read->read_resp_dma_addr & GENMASK(31, 0);
898 writel(addr_high, edev->reg_bar + EFA_REGS_MMIO_RESP_HI_OFF);
899 writel(addr_low, edev->reg_bar + EFA_REGS_MMIO_RESP_LO_OFF);
902 int efa_com_mmio_reg_read_init(struct efa_com_dev *edev)
904 struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
906 spin_lock_init(&mmio_read->lock);
907 mmio_read->read_resp =
908 dma_alloc_coherent(edev->dmadev, sizeof(*mmio_read->read_resp),
909 &mmio_read->read_resp_dma_addr, GFP_KERNEL);
910 if (!mmio_read->read_resp)
913 efa_com_mmio_reg_read_resp_addr_init(edev);
915 mmio_read->read_resp->req_id = 0;
916 mmio_read->seq_num = 0;
917 mmio_read->mmio_read_timeout = EFA_REG_READ_TIMEOUT_US;
922 void efa_com_mmio_reg_read_destroy(struct efa_com_dev *edev)
924 struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
926 dma_free_coherent(edev->dmadev, sizeof(*mmio_read->read_resp),
927 mmio_read->read_resp, mmio_read->read_resp_dma_addr);
930 int efa_com_validate_version(struct efa_com_dev *edev)
937 * Make sure the EFA version and the controller version are at least
938 * as the driver expects
940 ver = efa_com_reg_read32(edev, EFA_REGS_VERSION_OFF);
941 ctrl_ver = efa_com_reg_read32(edev,
942 EFA_REGS_CONTROLLER_VERSION_OFF);
944 ibdev_dbg(edev->efa_dev, "efa device version: %d.%d\n",
945 (ver & EFA_REGS_VERSION_MAJOR_VERSION_MASK) >>
946 EFA_REGS_VERSION_MAJOR_VERSION_SHIFT,
947 ver & EFA_REGS_VERSION_MINOR_VERSION_MASK);
949 if (ver < MIN_EFA_VER) {
950 ibdev_err(edev->efa_dev,
951 "EFA version is lower than the minimal version the driver supports\n");
955 ibdev_dbg(edev->efa_dev,
956 "efa controller version: %d.%d.%d implementation version %d\n",
957 (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
958 EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
959 (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
960 EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
961 (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
962 (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
963 EFA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
966 (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
967 (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
968 (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
970 /* Validate the ctrl version without the implementation ID */
971 if (ctrl_ver_masked < MIN_EFA_CTRL_VER) {
972 ibdev_err(edev->efa_dev,
973 "EFA ctrl version is lower than the minimal ctrl version the driver supports\n");
981 * efa_com_get_dma_width - Retrieve physical dma address width the device
983 * @edev: EFA communication layer struct
985 * Retrieve the maximum physical address bits the device can handle.
987 * @return: > 0 on Success and negative value otherwise.
989 int efa_com_get_dma_width(struct efa_com_dev *edev)
991 u32 caps = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF);
994 width = (caps & EFA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
995 EFA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
997 ibdev_dbg(edev->efa_dev, "DMA width: %d\n", width);
999 if (width < 32 || width > 64) {
1000 ibdev_err(edev->efa_dev, "DMA width illegal value: %d\n", width);
1004 edev->dma_addr_bits = width;
1009 static int wait_for_reset_state(struct efa_com_dev *edev, u32 timeout,
1014 for (i = 0; i < timeout; i++) {
1015 val = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF);
1017 if ((val & EFA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
1021 ibdev_dbg(edev->efa_dev, "Reset indication val %d\n", val);
1022 msleep(EFA_POLL_INTERVAL_MS);
1029 * efa_com_dev_reset - Perform device FLR to the device.
1030 * @edev: EFA communication layer struct
1031 * @reset_reason: Specify what is the trigger for the reset in case of an error.
1033 * @return - 0 on success, negative value on failure.
1035 int efa_com_dev_reset(struct efa_com_dev *edev,
1036 enum efa_regs_reset_reason_types reset_reason)
1038 u32 stat, timeout, cap, reset_val;
1041 stat = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF);
1042 cap = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF);
1044 if (!(stat & EFA_REGS_DEV_STS_READY_MASK)) {
1045 ibdev_err(edev->efa_dev,
1046 "Device isn't ready, can't reset device\n");
1050 timeout = (cap & EFA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
1051 EFA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
1053 ibdev_err(edev->efa_dev, "Invalid timeout value\n");
1058 reset_val = EFA_REGS_DEV_CTL_DEV_RESET_MASK;
1059 reset_val |= (reset_reason << EFA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
1060 EFA_REGS_DEV_CTL_RESET_REASON_MASK;
1061 writel(reset_val, edev->reg_bar + EFA_REGS_DEV_CTL_OFF);
1063 /* reset clears the mmio readless address, restore it */
1064 efa_com_mmio_reg_read_resp_addr_init(edev);
1066 err = wait_for_reset_state(edev, timeout,
1067 EFA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
1069 ibdev_err(edev->efa_dev, "Reset indication didn't turn on\n");
1074 writel(0, edev->reg_bar + EFA_REGS_DEV_CTL_OFF);
1075 err = wait_for_reset_state(edev, timeout, 0);
1077 ibdev_err(edev->efa_dev, "Reset indication didn't turn off\n");
1081 timeout = (cap & EFA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
1082 EFA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
1084 /* the resolution of timeout reg is 100ms */
1085 edev->aq.completion_timeout = timeout * 100000;
1087 edev->aq.completion_timeout = ADMIN_CMD_TIMEOUT_US;