2 * Copyright (C) 2014 Freescale Semiconductor
4 * SPDX-License-Identifier: GPL-2.0+
14 #include <linux/compat.h>
16 #include "ldpaa_eth.h"
19 static int init_phy(struct eth_device *dev)
21 /*TODO for external PHY */
26 static void ldpaa_eth_rx(struct ldpaa_eth_priv *priv,
27 const struct dpaa_fd *fd)
32 struct ldpaa_fas *fas;
34 struct qbman_release_desc releasedesc;
35 struct qbman_swp *swp = dflt_dpio->sw_portal;
37 fd_addr = ldpaa_fd_get_addr(fd);
38 fd_offset = ldpaa_fd_get_offset(fd);
39 fd_length = ldpaa_fd_get_len(fd);
41 debug("Rx frame:data addr=0x%p size=0x%x\n", (u64 *)fd_addr, fd_length);
43 if (fd->simple.frc & LDPAA_FD_FRC_FASV) {
44 /* Read the frame annotation status word and check for errors */
45 fas = (struct ldpaa_fas *)
46 ((uint8_t *)(fd_addr) +
47 priv->buf_layout.private_data_size);
48 status = le32_to_cpu(fas->status);
49 if (status & LDPAA_ETH_RX_ERR_MASK) {
50 printf("Rx frame error(s): 0x%08x\n",
51 status & LDPAA_ETH_RX_ERR_MASK);
53 } else if (status & LDPAA_ETH_RX_UNSUPP_MASK) {
54 printf("Unsupported feature in bitmask: 0x%08x\n",
55 status & LDPAA_ETH_RX_UNSUPP_MASK);
60 debug("Rx frame: To Upper layer\n");
61 net_process_received_packet((uint8_t *)(fd_addr) + fd_offset,
65 flush_dcache_range(fd_addr, fd_addr + LDPAA_ETH_RX_BUFFER_SIZE);
66 qbman_release_desc_clear(&releasedesc);
67 qbman_release_desc_set_bpid(&releasedesc, dflt_dpbp->dpbp_attr.bpid);
69 /* Release buffer into the QBMAN */
70 err = qbman_swp_release(swp, &releasedesc, &fd_addr, 1);
71 } while (err == -EBUSY);
75 static int ldpaa_eth_pull_dequeue_rx(struct eth_device *dev)
77 struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)dev->priv;
78 const struct ldpaa_dq *dq;
79 const struct dpaa_fd *fd;
80 int i = 5, err = 0, status, loop = 20;
81 static struct qbman_pull_desc pulldesc;
82 struct qbman_swp *swp = dflt_dpio->sw_portal;
85 qbman_pull_desc_clear(&pulldesc);
86 qbman_pull_desc_set_numframes(&pulldesc, 1);
87 qbman_pull_desc_set_fq(&pulldesc, priv->rx_dflt_fqid);
89 err = qbman_swp_pull(swp, &pulldesc);
91 printf("Dequeue frames error:0x%08x\n", err);
97 dq = qbman_swp_dqrr_next(swp);
104 /* Check for valid frame. If not sent a consume
105 * confirmation to QBMAN otherwise give it to NADK
106 * application and then send consume confirmation to
109 status = (uint8_t)ldpaa_dq_flags(dq);
110 if ((status & LDPAA_DQ_STAT_VALIDFRAME) == 0) {
111 debug("Dequeue RX frames:");
112 debug("No frame delivered\n");
114 qbman_swp_dqrr_consume(swp, dq);
118 fd = ldpaa_dq_fd(dq);
120 /* Obtain FD and process it */
121 ldpaa_eth_rx(priv, fd);
122 qbman_swp_dqrr_consume(swp, dq);
130 static void ldpaa_eth_tx_conf(struct ldpaa_eth_priv *priv,
131 const struct dpaa_fd *fd)
134 struct ldpaa_fas *fas;
135 uint32_t status, err;
136 struct qbman_release_desc releasedesc;
137 struct qbman_swp *swp = dflt_dpio->sw_portal;
139 fd_addr = ldpaa_fd_get_addr(fd);
142 debug("TX Conf frame:data addr=0x%p\n", (u64 *)fd_addr);
144 /* Check the status from the Frame Annotation */
145 if (fd->simple.frc & LDPAA_FD_FRC_FASV) {
146 fas = (struct ldpaa_fas *)
147 ((uint8_t *)(fd_addr) +
148 priv->buf_layout.private_data_size);
149 status = le32_to_cpu(fas->status);
150 if (status & LDPAA_ETH_TXCONF_ERR_MASK) {
151 printf("TxConf frame error(s): 0x%08x\n",
152 status & LDPAA_ETH_TXCONF_ERR_MASK);
156 qbman_release_desc_clear(&releasedesc);
157 qbman_release_desc_set_bpid(&releasedesc, dflt_dpbp->dpbp_attr.bpid);
159 /* Release buffer into the QBMAN */
160 err = qbman_swp_release(swp, &releasedesc, &fd_addr, 1);
161 } while (err == -EBUSY);
164 static int ldpaa_eth_pull_dequeue_tx_conf(struct ldpaa_eth_priv *priv)
166 const struct ldpaa_dq *dq;
167 const struct dpaa_fd *fd;
169 int i = 5, status, loop = 20;
170 static struct qbman_pull_desc pulldesc;
171 struct qbman_swp *swp = dflt_dpio->sw_portal;
174 qbman_pull_desc_clear(&pulldesc);
175 qbman_pull_desc_set_numframes(&pulldesc, 1);
176 qbman_pull_desc_set_fq(&pulldesc, priv->tx_conf_fqid);
178 err = qbman_swp_pull(swp, &pulldesc);
180 printf("Dequeue TX conf frames error:0x%08x\n", err);
186 dq = qbman_swp_dqrr_next(swp);
193 /* Check for valid frame. If not sent a consume
194 * confirmation to QBMAN otherwise give it to NADK
195 * application and then send consume confirmation to
198 status = (uint8_t)ldpaa_dq_flags(dq);
199 if ((status & LDPAA_DQ_STAT_VALIDFRAME) == 0) {
200 debug("Dequeue TX conf frames:");
201 debug("No frame is delivered\n");
203 qbman_swp_dqrr_consume(swp, dq);
206 fd = ldpaa_dq_fd(dq);
208 ldpaa_eth_tx_conf(priv, fd);
209 qbman_swp_dqrr_consume(swp, dq);
217 static int ldpaa_eth_tx(struct eth_device *net_dev, void *buf, int len)
219 struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
222 int data_offset, err;
223 struct qbman_swp *swp = dflt_dpio->sw_portal;
224 struct qbman_eq_desc ed;
226 /* Setup the FD fields */
227 memset(&fd, 0, sizeof(fd));
229 data_offset = priv->tx_data_offset;
232 err = qbman_swp_acquire(dflt_dpio->sw_portal,
233 dflt_dpbp->dpbp_attr.bpid,
235 } while (err == -EBUSY);
238 printf("qbman_swp_acquire() failed\n");
242 debug("TX data: malloc buffer start=0x%p\n", (u64 *)buffer_start);
244 memcpy(((uint8_t *)(buffer_start) + data_offset), buf, len);
246 flush_dcache_range(buffer_start, buffer_start +
247 LDPAA_ETH_RX_BUFFER_SIZE);
249 ldpaa_fd_set_addr(&fd, (u64)buffer_start);
250 ldpaa_fd_set_offset(&fd, (uint16_t)(data_offset));
251 ldpaa_fd_set_bpid(&fd, dflt_dpbp->dpbp_attr.bpid);
252 ldpaa_fd_set_len(&fd, len);
254 fd.simple.ctrl = LDPAA_FD_CTRL_ASAL | LDPAA_FD_CTRL_PTA |
257 qbman_eq_desc_clear(&ed);
258 qbman_eq_desc_set_no_orp(&ed, 0);
259 qbman_eq_desc_set_qd(&ed, priv->tx_qdid, priv->tx_flow_id, 0);
260 err = qbman_swp_enqueue(swp, &ed, (const struct qbman_fd *)(&fd));
262 printf("error enqueueing Tx frame\n");
266 err = ldpaa_eth_pull_dequeue_tx_conf(priv);
268 printf("error Tx Conf frame\n");
273 static int ldpaa_eth_open(struct eth_device *net_dev, bd_t *bd)
275 struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
276 struct dpni_queue_attr rx_queue_attr;
277 struct dpni_tx_flow_attr tx_flow_attr;
281 if (net_dev->state == ETH_STATE_ACTIVE)
284 /* DPNI initialization */
285 err = ldpaa_dpni_setup(priv);
289 err = ldpaa_dpbp_setup();
293 /* DPNI binding DPBP */
294 err = ldpaa_dpni_bind(priv);
298 err = dpni_get_primary_mac_addr(dflt_mc_io, priv->dpni_handle,
301 printf("dpni_get_primary_mac_addr() failed\n");
305 memcpy(net_dev->enetaddr, mac_addr, 0x6);
307 /* setup the MAC address */
308 if (net_dev->enetaddr[0] & 0x01) {
309 printf("%s: MacAddress is multcast address\n", __func__);
314 /* TODO Check this path */
315 err = phy_startup(priv->phydev);
317 printf("%s: Could not initialize\n", priv->phydev->dev->name);
321 priv->phydev->speed = SPEED_1000;
322 priv->phydev->link = 1;
323 priv->phydev->duplex = DUPLEX_FULL;
326 err = dpni_enable(dflt_mc_io, priv->dpni_handle);
328 printf("dpni_enable() failed\n");
332 /* TODO: support multiple Rx flows */
333 err = dpni_get_rx_flow(dflt_mc_io, priv->dpni_handle, 0, 0,
336 printf("dpni_get_rx_flow() failed\n");
340 priv->rx_dflt_fqid = rx_queue_attr.fqid;
342 err = dpni_get_qdid(dflt_mc_io, priv->dpni_handle, &priv->tx_qdid);
344 printf("dpni_get_qdid() failed\n");
348 err = dpni_get_tx_flow(dflt_mc_io, priv->dpni_handle, priv->tx_flow_id,
351 printf("dpni_get_tx_flow() failed\n");
355 priv->tx_conf_fqid = tx_flow_attr.conf_err_attr.queue_attr.fqid;
357 if (!priv->phydev->link)
358 printf("%s: No link.\n", priv->phydev->dev->name);
360 return priv->phydev->link ? 0 : -1;
365 dpni_disable(dflt_mc_io, priv->dpni_handle);
369 dpni_close(dflt_mc_io, priv->dpni_handle);
374 static void ldpaa_eth_stop(struct eth_device *net_dev)
376 struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
379 if ((net_dev->state == ETH_STATE_PASSIVE) ||
380 (net_dev->state == ETH_STATE_INIT))
382 /* Stop Tx and Rx traffic */
383 err = dpni_disable(dflt_mc_io, priv->dpni_handle);
385 printf("dpni_disable() failed\n");
388 phy_shutdown(priv->phydev);
392 dpni_reset(dflt_mc_io, priv->dpni_handle);
393 dpni_close(dflt_mc_io, priv->dpni_handle);
396 static void ldpaa_dpbp_drain_cnt(int count)
398 uint64_t buf_array[7];
405 ret = qbman_swp_acquire(dflt_dpio->sw_portal,
406 dflt_dpbp->dpbp_attr.bpid,
409 printf("qbman_swp_acquire() failed\n");
412 for (i = 0; i < ret; i++) {
413 addr = (void *)buf_array[i];
414 debug("Free: buffer addr =0x%p\n", addr);
420 static void ldpaa_dpbp_drain(void)
423 for (i = 0; i < LDPAA_ETH_NUM_BUFS; i += 7)
424 ldpaa_dpbp_drain_cnt(7);
427 static int ldpaa_bp_add_7(uint16_t bpid)
429 uint64_t buf_array[7];
432 struct qbman_release_desc rd;
434 for (i = 0; i < 7; i++) {
435 addr = memalign(L1_CACHE_BYTES, LDPAA_ETH_RX_BUFFER_SIZE);
437 printf("addr allocation failed\n");
440 memset(addr, 0x00, LDPAA_ETH_RX_BUFFER_SIZE);
441 flush_dcache_range((u64)addr,
442 (u64)(addr + LDPAA_ETH_RX_BUFFER_SIZE));
444 buf_array[i] = (uint64_t)addr;
445 debug("Release: buffer addr =0x%p\n", addr);
449 /* In case the portal is busy, retry until successful.
450 * This function is guaranteed to succeed in a reasonable amount
456 qbman_release_desc_clear(&rd);
457 qbman_release_desc_set_bpid(&rd, bpid);
458 } while (qbman_swp_release(dflt_dpio->sw_portal, &rd, buf_array, i));
469 static int ldpaa_dpbp_seed(uint16_t bpid)
474 for (i = 0; i < LDPAA_ETH_NUM_BUFS; i += 7) {
475 count = ldpaa_bp_add_7(bpid);
477 printf("Buffer Seed= %d\n", count);
483 static int ldpaa_dpbp_setup(void)
487 err = dpbp_open(dflt_mc_io, dflt_dpbp->dpbp_attr.id,
488 &dflt_dpbp->dpbp_handle);
490 printf("dpbp_open() failed\n");
494 err = dpbp_enable(dflt_mc_io, dflt_dpbp->dpbp_handle);
496 printf("dpbp_enable() failed\n");
500 err = dpbp_get_attributes(dflt_mc_io, dflt_dpbp->dpbp_handle,
501 &dflt_dpbp->dpbp_attr);
503 printf("dpbp_get_attributes() failed\n");
507 err = ldpaa_dpbp_seed(dflt_dpbp->dpbp_attr.bpid);
509 printf("Buffer seeding failed for DPBP %d (bpid=%d)\n",
510 dflt_dpbp->dpbp_attr.id, dflt_dpbp->dpbp_attr.bpid);
518 dpbp_disable(dflt_mc_io, dflt_dpbp->dpbp_handle);
520 dpbp_close(dflt_mc_io, dflt_dpbp->dpbp_handle);
525 static void ldpaa_dpbp_free(void)
528 dpbp_disable(dflt_mc_io, dflt_dpbp->dpbp_handle);
529 dpbp_reset(dflt_mc_io, dflt_dpbp->dpbp_handle);
530 dpbp_close(dflt_mc_io, dflt_dpbp->dpbp_handle);
533 static int ldpaa_dpni_setup(struct ldpaa_eth_priv *priv)
537 /* and get a handle for the DPNI this interface is associate with */
538 err = dpni_open(dflt_mc_io, priv->dpni_id, &priv->dpni_handle);
540 printf("dpni_open() failed\n");
544 err = dpni_get_attributes(dflt_mc_io, priv->dpni_handle,
547 printf("dpni_get_attributes() failed (err=%d)\n", err);
551 /* Configure our buffers' layout */
552 priv->buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
553 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
554 DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
555 priv->buf_layout.pass_parser_result = true;
556 priv->buf_layout.pass_frame_status = true;
557 priv->buf_layout.private_data_size = LDPAA_ETH_SWA_SIZE;
559 err = dpni_set_rx_buffer_layout(dflt_mc_io, priv->dpni_handle,
562 printf("dpni_set_rx_buffer_layout() failed");
567 priv->buf_layout.options &= ~DPNI_BUF_LAYOUT_OPT_PARSER_RESULT;
568 err = dpni_set_tx_buffer_layout(dflt_mc_io, priv->dpni_handle,
571 printf("dpni_set_tx_buffer_layout() failed");
575 /* ... tx-confirm. */
576 priv->buf_layout.options &= ~DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
577 err = dpni_set_tx_conf_buffer_layout(dflt_mc_io, priv->dpni_handle,
580 printf("dpni_set_tx_conf_buffer_layout() failed");
584 /* Now that we've set our tx buffer layout, retrieve the minimum
585 * required tx data offset.
587 err = dpni_get_tx_data_offset(dflt_mc_io, priv->dpni_handle,
588 &priv->tx_data_offset);
590 printf("dpni_get_tx_data_offset() failed\n");
591 goto err_data_offset;
594 /* Warn in case TX data offset is not multiple of 64 bytes. */
595 WARN_ON(priv->tx_data_offset % 64);
597 /* Accomodate SWA space. */
598 priv->tx_data_offset += LDPAA_ETH_SWA_SIZE;
599 debug("priv->tx_data_offset=%d\n", priv->tx_data_offset);
606 dpni_close(dflt_mc_io, priv->dpni_handle);
611 static int ldpaa_dpni_bind(struct ldpaa_eth_priv *priv)
613 struct dpni_pools_cfg pools_params;
614 struct dpni_tx_flow_cfg dflt_tx_flow;
617 pools_params.num_dpbp = 1;
618 pools_params.pools[0].dpbp_id = (uint16_t)dflt_dpbp->dpbp_attr.id;
619 pools_params.pools[0].buffer_size = LDPAA_ETH_RX_BUFFER_SIZE;
620 err = dpni_set_pools(dflt_mc_io, priv->dpni_handle, &pools_params);
622 printf("dpni_set_pools() failed\n");
626 priv->tx_flow_id = DPNI_NEW_FLOW_ID;
627 memset(&dflt_tx_flow, 0, sizeof(dflt_tx_flow));
629 err = dpni_set_tx_flow(dflt_mc_io, priv->dpni_handle,
630 &priv->tx_flow_id, &dflt_tx_flow);
632 printf("dpni_set_tx_flow() failed\n");
639 static int ldpaa_eth_netdev_init(struct eth_device *net_dev)
642 struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
644 sprintf(net_dev->name, "DPNI%d", priv->dpni_id);
647 net_dev->init = ldpaa_eth_open;
648 net_dev->halt = ldpaa_eth_stop;
649 net_dev->send = ldpaa_eth_tx;
650 net_dev->recv = ldpaa_eth_pull_dequeue_rx;
652 TODO: PHY MDIO information
653 priv->bus = info->bus;
654 priv->phyaddr = info->phy_addr;
655 priv->enet_if = info->enet_if;
658 if (init_phy(net_dev))
661 err = eth_register(net_dev);
663 printf("eth_register() = %d\n", err);
670 int ldpaa_eth_init(struct dprc_obj_desc obj_desc)
672 struct eth_device *net_dev = NULL;
673 struct ldpaa_eth_priv *priv = NULL;
678 net_dev = (struct eth_device *)malloc(sizeof(struct eth_device));
680 printf("eth_device malloc() failed\n");
683 memset(net_dev, 0, sizeof(struct eth_device));
685 /* alloc the ldpaa ethernet private struct */
686 priv = (struct ldpaa_eth_priv *)malloc(sizeof(struct ldpaa_eth_priv));
688 printf("ldpaa_eth_priv malloc() failed\n");
691 memset(priv, 0, sizeof(struct ldpaa_eth_priv));
693 net_dev->priv = (void *)priv;
694 priv->net_dev = (struct eth_device *)net_dev;
695 priv->dpni_id = obj_desc.id;
697 err = ldpaa_eth_netdev_init(net_dev);
699 goto err_netdev_init;
701 debug("ldpaa ethernet: Probed interface %s\n", net_dev->name);
706 net_dev->priv = NULL;