2 * Copyright (C) 2014 Freescale Semiconductor
4 * SPDX-License-Identifier: GPL-2.0+
14 #include <linux/compat.h>
16 #include "ldpaa_eth.h"
19 static int init_phy(struct eth_device *dev)
21 /*TODO for external PHY */
26 static void ldpaa_eth_rx(struct ldpaa_eth_priv *priv,
27 const struct dpaa_fd *fd)
32 struct ldpaa_fas *fas;
34 struct qbman_release_desc releasedesc;
35 struct qbman_swp *swp = dflt_dpio->sw_portal;
37 fd_addr = ldpaa_fd_get_addr(fd);
38 fd_offset = ldpaa_fd_get_offset(fd);
39 fd_length = ldpaa_fd_get_len(fd);
41 debug("Rx frame:data addr=0x%p size=0x%x\n", (u64 *)fd_addr, fd_length);
43 if (fd->simple.frc & LDPAA_FD_FRC_FASV) {
44 /* Read the frame annotation status word and check for errors */
45 fas = (struct ldpaa_fas *)
46 ((uint8_t *)(fd_addr) +
47 priv->buf_layout.private_data_size);
48 status = le32_to_cpu(fas->status);
49 if (status & LDPAA_ETH_RX_ERR_MASK) {
50 printf("Rx frame error(s): 0x%08x\n",
51 status & LDPAA_ETH_RX_ERR_MASK);
53 } else if (status & LDPAA_ETH_RX_UNSUPP_MASK) {
54 printf("Unsupported feature in bitmask: 0x%08x\n",
55 status & LDPAA_ETH_RX_UNSUPP_MASK);
60 debug("Rx frame: To Upper layer\n");
61 net_process_received_packet((uint8_t *)(fd_addr) + fd_offset,
65 flush_dcache_range(fd_addr, fd_addr + LDPAA_ETH_RX_BUFFER_SIZE);
66 qbman_release_desc_clear(&releasedesc);
67 qbman_release_desc_set_bpid(&releasedesc, dflt_dpbp->dpbp_attr.bpid);
69 /* Release buffer into the QBMAN */
70 err = qbman_swp_release(swp, &releasedesc, &fd_addr, 1);
71 } while (err == -EBUSY);
75 static int ldpaa_eth_pull_dequeue_rx(struct eth_device *dev)
77 struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)dev->priv;
78 const struct ldpaa_dq *dq;
79 const struct dpaa_fd *fd;
80 int i = 5, err = 0, status, loop = 20;
81 static struct qbman_pull_desc pulldesc;
82 struct qbman_swp *swp = dflt_dpio->sw_portal;
85 qbman_pull_desc_clear(&pulldesc);
86 qbman_pull_desc_set_numframes(&pulldesc, 1);
87 qbman_pull_desc_set_fq(&pulldesc, priv->rx_dflt_fqid);
89 err = qbman_swp_pull(swp, &pulldesc);
91 printf("Dequeue frames error:0x%08x\n", err);
97 dq = qbman_swp_dqrr_next(swp);
104 /* Check for valid frame. If not sent a consume
105 * confirmation to QBMAN otherwise give it to NADK
106 * application and then send consume confirmation to
109 status = (uint8_t)ldpaa_dq_flags(dq);
110 if ((status & LDPAA_DQ_STAT_VALIDFRAME) == 0) {
111 debug("Dequeue RX frames:");
112 debug("No frame delivered\n");
114 qbman_swp_dqrr_consume(swp, dq);
118 fd = ldpaa_dq_fd(dq);
120 /* Obtain FD and process it */
121 ldpaa_eth_rx(priv, fd);
122 qbman_swp_dqrr_consume(swp, dq);
130 static void ldpaa_eth_tx_conf(struct ldpaa_eth_priv *priv,
131 const struct dpaa_fd *fd)
134 struct ldpaa_fas *fas;
135 uint32_t status, err;
136 struct qbman_release_desc releasedesc;
137 struct qbman_swp *swp = dflt_dpio->sw_portal;
139 fd_addr = ldpaa_fd_get_addr(fd);
142 debug("TX Conf frame:data addr=0x%p\n", (u64 *)fd_addr);
144 /* Check the status from the Frame Annotation */
145 if (fd->simple.frc & LDPAA_FD_FRC_FASV) {
146 fas = (struct ldpaa_fas *)
147 ((uint8_t *)(fd_addr) +
148 priv->buf_layout.private_data_size);
149 status = le32_to_cpu(fas->status);
150 if (status & LDPAA_ETH_TXCONF_ERR_MASK) {
151 printf("TxConf frame error(s): 0x%08x\n",
152 status & LDPAA_ETH_TXCONF_ERR_MASK);
156 flush_dcache_range(fd_addr, fd_addr + LDPAA_ETH_RX_BUFFER_SIZE);
157 qbman_release_desc_clear(&releasedesc);
158 qbman_release_desc_set_bpid(&releasedesc, dflt_dpbp->dpbp_attr.bpid);
160 /* Release buffer into the QBMAN */
161 err = qbman_swp_release(swp, &releasedesc, &fd_addr, 1);
162 } while (err == -EBUSY);
165 static int ldpaa_eth_pull_dequeue_tx_conf(struct ldpaa_eth_priv *priv)
167 const struct ldpaa_dq *dq;
168 const struct dpaa_fd *fd;
170 int i = 5, status, loop = 20;
171 static struct qbman_pull_desc pulldesc;
172 struct qbman_swp *swp = dflt_dpio->sw_portal;
175 qbman_pull_desc_clear(&pulldesc);
176 qbman_pull_desc_set_numframes(&pulldesc, 1);
177 qbman_pull_desc_set_fq(&pulldesc, priv->tx_conf_fqid);
179 err = qbman_swp_pull(swp, &pulldesc);
181 printf("Dequeue TX conf frames error:0x%08x\n", err);
187 dq = qbman_swp_dqrr_next(swp);
194 /* Check for valid frame. If not sent a consume
195 * confirmation to QBMAN otherwise give it to NADK
196 * application and then send consume confirmation to
199 status = (uint8_t)ldpaa_dq_flags(dq);
200 if ((status & LDPAA_DQ_STAT_VALIDFRAME) == 0) {
201 debug("Dequeue TX conf frames:");
202 debug("No frame is delivered\n");
204 qbman_swp_dqrr_consume(swp, dq);
207 fd = ldpaa_dq_fd(dq);
209 ldpaa_eth_tx_conf(priv, fd);
210 qbman_swp_dqrr_consume(swp, dq);
218 static int ldpaa_eth_tx(struct eth_device *net_dev, void *buf, int len)
220 struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
223 int data_offset, err;
224 struct qbman_swp *swp = dflt_dpio->sw_portal;
225 struct qbman_eq_desc ed;
227 /* Setup the FD fields */
228 memset(&fd, 0, sizeof(fd));
230 data_offset = priv->tx_data_offset;
233 err = qbman_swp_acquire(dflt_dpio->sw_portal,
234 dflt_dpbp->dpbp_attr.bpid,
236 } while (err == -EBUSY);
239 printf("qbman_swp_acquire() failed\n");
243 debug("TX data: malloc buffer start=0x%p\n", (u64 *)buffer_start);
245 memcpy(((uint8_t *)(buffer_start) + data_offset), buf, len);
247 flush_dcache_range(buffer_start, buffer_start +
248 LDPAA_ETH_RX_BUFFER_SIZE);
250 ldpaa_fd_set_addr(&fd, (u64)buffer_start);
251 ldpaa_fd_set_offset(&fd, (uint16_t)(data_offset));
252 ldpaa_fd_set_bpid(&fd, dflt_dpbp->dpbp_attr.bpid);
253 ldpaa_fd_set_len(&fd, len);
255 fd.simple.ctrl = LDPAA_FD_CTRL_ASAL | LDPAA_FD_CTRL_PTA |
258 qbman_eq_desc_clear(&ed);
259 qbman_eq_desc_set_no_orp(&ed, 0);
260 qbman_eq_desc_set_qd(&ed, priv->tx_qdid, priv->tx_flow_id, 0);
261 err = qbman_swp_enqueue(swp, &ed, (const struct qbman_fd *)(&fd));
263 printf("error enqueueing Tx frame\n");
267 err = ldpaa_eth_pull_dequeue_tx_conf(priv);
269 printf("error Tx Conf frame\n");
274 static int ldpaa_eth_open(struct eth_device *net_dev, bd_t *bd)
276 struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
277 struct dpni_queue_attr rx_queue_attr;
278 struct dpni_tx_flow_attr tx_flow_attr;
282 if (net_dev->state == ETH_STATE_ACTIVE)
285 /* DPNI initialization */
286 err = ldpaa_dpni_setup(priv);
290 err = ldpaa_dpbp_setup();
294 /* DPNI binding DPBP */
295 err = ldpaa_dpni_bind(priv);
299 err = dpni_get_primary_mac_addr(dflt_mc_io, priv->dpni_handle,
302 printf("dpni_get_primary_mac_addr() failed\n");
306 memcpy(net_dev->enetaddr, mac_addr, 0x6);
308 /* setup the MAC address */
309 if (net_dev->enetaddr[0] & 0x01) {
310 printf("%s: MacAddress is multcast address\n", __func__);
315 /* TODO Check this path */
316 err = phy_startup(priv->phydev);
318 printf("%s: Could not initialize\n", priv->phydev->dev->name);
322 priv->phydev->speed = SPEED_1000;
323 priv->phydev->link = 1;
324 priv->phydev->duplex = DUPLEX_FULL;
327 err = dpni_enable(dflt_mc_io, priv->dpni_handle);
329 printf("dpni_enable() failed\n");
333 /* TODO: support multiple Rx flows */
334 err = dpni_get_rx_flow(dflt_mc_io, priv->dpni_handle, 0, 0,
337 printf("dpni_get_rx_flow() failed\n");
341 priv->rx_dflt_fqid = rx_queue_attr.fqid;
343 err = dpni_get_qdid(dflt_mc_io, priv->dpni_handle, &priv->tx_qdid);
345 printf("dpni_get_qdid() failed\n");
349 err = dpni_get_tx_flow(dflt_mc_io, priv->dpni_handle, priv->tx_flow_id,
352 printf("dpni_get_tx_flow() failed\n");
356 priv->tx_conf_fqid = tx_flow_attr.conf_err_attr.queue_attr.fqid;
358 if (!priv->phydev->link)
359 printf("%s: No link.\n", priv->phydev->dev->name);
361 return priv->phydev->link ? 0 : -1;
366 dpni_disable(dflt_mc_io, priv->dpni_handle);
370 dpni_close(dflt_mc_io, priv->dpni_handle);
375 static void ldpaa_eth_stop(struct eth_device *net_dev)
377 struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
380 if ((net_dev->state == ETH_STATE_PASSIVE) ||
381 (net_dev->state == ETH_STATE_INIT))
383 /* Stop Tx and Rx traffic */
384 err = dpni_disable(dflt_mc_io, priv->dpni_handle);
386 printf("dpni_disable() failed\n");
389 phy_shutdown(priv->phydev);
393 dpni_reset(dflt_mc_io, priv->dpni_handle);
394 dpni_close(dflt_mc_io, priv->dpni_handle);
397 static void ldpaa_dpbp_drain_cnt(int count)
399 uint64_t buf_array[7];
406 ret = qbman_swp_acquire(dflt_dpio->sw_portal,
407 dflt_dpbp->dpbp_attr.bpid,
410 printf("qbman_swp_acquire() failed\n");
413 for (i = 0; i < ret; i++) {
414 addr = (void *)buf_array[i];
415 debug("Free: buffer addr =0x%p\n", addr);
421 static void ldpaa_dpbp_drain(void)
424 for (i = 0; i < LDPAA_ETH_NUM_BUFS; i += 7)
425 ldpaa_dpbp_drain_cnt(7);
428 static int ldpaa_bp_add_7(uint16_t bpid)
430 uint64_t buf_array[7];
433 struct qbman_release_desc rd;
435 for (i = 0; i < 7; i++) {
436 addr = memalign(L1_CACHE_BYTES, LDPAA_ETH_RX_BUFFER_SIZE);
438 printf("addr allocation failed\n");
441 memset(addr, 0x00, LDPAA_ETH_RX_BUFFER_SIZE);
442 flush_dcache_range((u64)addr,
443 (u64)(addr + LDPAA_ETH_RX_BUFFER_SIZE));
445 buf_array[i] = (uint64_t)addr;
446 debug("Release: buffer addr =0x%p\n", addr);
450 /* In case the portal is busy, retry until successful.
451 * This function is guaranteed to succeed in a reasonable amount
457 qbman_release_desc_clear(&rd);
458 qbman_release_desc_set_bpid(&rd, bpid);
459 } while (qbman_swp_release(dflt_dpio->sw_portal, &rd, buf_array, i));
470 static int ldpaa_dpbp_seed(uint16_t bpid)
475 for (i = 0; i < LDPAA_ETH_NUM_BUFS; i += 7) {
476 count = ldpaa_bp_add_7(bpid);
478 printf("Buffer Seed= %d\n", count);
484 static int ldpaa_dpbp_setup(void)
488 err = dpbp_open(dflt_mc_io, dflt_dpbp->dpbp_attr.id,
489 &dflt_dpbp->dpbp_handle);
491 printf("dpbp_open() failed\n");
495 err = dpbp_enable(dflt_mc_io, dflt_dpbp->dpbp_handle);
497 printf("dpbp_enable() failed\n");
501 err = dpbp_get_attributes(dflt_mc_io, dflt_dpbp->dpbp_handle,
502 &dflt_dpbp->dpbp_attr);
504 printf("dpbp_get_attributes() failed\n");
508 err = ldpaa_dpbp_seed(dflt_dpbp->dpbp_attr.bpid);
510 printf("Buffer seeding failed for DPBP %d (bpid=%d)\n",
511 dflt_dpbp->dpbp_attr.id, dflt_dpbp->dpbp_attr.bpid);
519 dpbp_disable(dflt_mc_io, dflt_dpbp->dpbp_handle);
521 dpbp_close(dflt_mc_io, dflt_dpbp->dpbp_handle);
526 static void ldpaa_dpbp_free(void)
529 dpbp_disable(dflt_mc_io, dflt_dpbp->dpbp_handle);
530 dpbp_reset(dflt_mc_io, dflt_dpbp->dpbp_handle);
531 dpbp_close(dflt_mc_io, dflt_dpbp->dpbp_handle);
534 static int ldpaa_dpni_setup(struct ldpaa_eth_priv *priv)
538 /* and get a handle for the DPNI this interface is associate with */
539 err = dpni_open(dflt_mc_io, priv->dpni_id, &priv->dpni_handle);
541 printf("dpni_open() failed\n");
545 err = dpni_get_attributes(dflt_mc_io, priv->dpni_handle,
548 printf("dpni_get_attributes() failed (err=%d)\n", err);
552 /* Configure our buffers' layout */
553 priv->buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
554 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
555 DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
556 priv->buf_layout.pass_parser_result = true;
557 priv->buf_layout.pass_frame_status = true;
558 priv->buf_layout.private_data_size = LDPAA_ETH_SWA_SIZE;
560 err = dpni_set_rx_buffer_layout(dflt_mc_io, priv->dpni_handle,
563 printf("dpni_set_rx_buffer_layout() failed");
568 priv->buf_layout.options &= ~DPNI_BUF_LAYOUT_OPT_PARSER_RESULT;
569 err = dpni_set_tx_buffer_layout(dflt_mc_io, priv->dpni_handle,
572 printf("dpni_set_tx_buffer_layout() failed");
576 /* ... tx-confirm. */
577 priv->buf_layout.options &= ~DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
578 err = dpni_set_tx_conf_buffer_layout(dflt_mc_io, priv->dpni_handle,
581 printf("dpni_set_tx_conf_buffer_layout() failed");
585 /* Now that we've set our tx buffer layout, retrieve the minimum
586 * required tx data offset.
588 err = dpni_get_tx_data_offset(dflt_mc_io, priv->dpni_handle,
589 &priv->tx_data_offset);
591 printf("dpni_get_tx_data_offset() failed\n");
592 goto err_data_offset;
595 /* Warn in case TX data offset is not multiple of 64 bytes. */
596 WARN_ON(priv->tx_data_offset % 64);
598 /* Accomodate SWA space. */
599 priv->tx_data_offset += LDPAA_ETH_SWA_SIZE;
600 debug("priv->tx_data_offset=%d\n", priv->tx_data_offset);
607 dpni_close(dflt_mc_io, priv->dpni_handle);
612 static int ldpaa_dpni_bind(struct ldpaa_eth_priv *priv)
614 struct dpni_pools_cfg pools_params;
615 struct dpni_tx_flow_cfg dflt_tx_flow;
618 pools_params.num_dpbp = 1;
619 pools_params.pools[0].dpbp_id = (uint16_t)dflt_dpbp->dpbp_attr.id;
620 pools_params.pools[0].buffer_size = LDPAA_ETH_RX_BUFFER_SIZE;
621 err = dpni_set_pools(dflt_mc_io, priv->dpni_handle, &pools_params);
623 printf("dpni_set_pools() failed\n");
627 priv->tx_flow_id = DPNI_NEW_FLOW_ID;
628 memset(&dflt_tx_flow, 0, sizeof(dflt_tx_flow));
630 err = dpni_set_tx_flow(dflt_mc_io, priv->dpni_handle,
631 &priv->tx_flow_id, &dflt_tx_flow);
633 printf("dpni_set_tx_flow() failed\n");
640 static int ldpaa_eth_netdev_init(struct eth_device *net_dev)
643 struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
645 sprintf(net_dev->name, "DPNI%d", priv->dpni_id);
648 net_dev->init = ldpaa_eth_open;
649 net_dev->halt = ldpaa_eth_stop;
650 net_dev->send = ldpaa_eth_tx;
651 net_dev->recv = ldpaa_eth_pull_dequeue_rx;
653 TODO: PHY MDIO information
654 priv->bus = info->bus;
655 priv->phyaddr = info->phy_addr;
656 priv->enet_if = info->enet_if;
659 if (init_phy(net_dev))
662 err = eth_register(net_dev);
664 printf("eth_register() = %d\n", err);
671 int ldpaa_eth_init(struct dprc_obj_desc obj_desc)
673 struct eth_device *net_dev = NULL;
674 struct ldpaa_eth_priv *priv = NULL;
679 net_dev = (struct eth_device *)malloc(sizeof(struct eth_device));
681 printf("eth_device malloc() failed\n");
684 memset(net_dev, 0, sizeof(struct eth_device));
686 /* alloc the ldpaa ethernet private struct */
687 priv = (struct ldpaa_eth_priv *)malloc(sizeof(struct ldpaa_eth_priv));
689 printf("ldpaa_eth_priv malloc() failed\n");
692 memset(priv, 0, sizeof(struct ldpaa_eth_priv));
694 net_dev->priv = (void *)priv;
695 priv->net_dev = (struct eth_device *)net_dev;
696 priv->dpni_id = obj_desc.id;
698 err = ldpaa_eth_netdev_init(net_dev);
700 goto err_netdev_init;
702 debug("ldpaa ethernet: Probed interface %s\n", net_dev->name);
707 net_dev->priv = NULL;