2 * (C) Copyright 2005-2006
3 * Stefan Roese, DENX Software Engineering, sr@denx.de.
5 * SPDX-License-Identifier: GPL-2.0+
9 #define DEBUG /* define for debug output */
17 #include <asm/processor.h>
18 #include <asm/arch-ixp/ixp425.h>
27 #include <IxFeatureCtrl.h>
31 static IxQMgrDispatcherFuncPtr qDispatcherFunc = NULL;
32 static int npe_exists[NPE_NUM_PORTS];
33 static int npe_used[NPE_NUM_PORTS];
35 /* A little extra so we can align to cacheline. */
36 static u8 npe_alloc_pool[NPE_MEM_POOL_SIZE + CONFIG_SYS_CACHELINE_SIZE - 1];
37 static u8 *npe_alloc_end;
38 static u8 *npe_alloc_free;
40 static void *npe_alloc(int size)
45 size = (size + (CONFIG_SYS_CACHELINE_SIZE-1)) & ~(CONFIG_SYS_CACHELINE_SIZE-1);
48 if ((npe_alloc_free + size) < npe_alloc_end) {
50 npe_alloc_free += size;
52 printf("npe_alloc: failed (count=%d, size=%d)!\n", count, size);
57 /* Not interrupt safe! */
58 static void mbuf_enqueue(IX_OSAL_MBUF **q, IX_OSAL_MBUF *new)
62 IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(new) = NULL;
65 while(IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m))
66 m = IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m);
67 IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m) = new;
72 /* Not interrupt safe! */
73 static IX_OSAL_MBUF *mbuf_dequeue(IX_OSAL_MBUF **q)
77 *q = IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m);
81 static void reset_tx_mbufs(struct npe* p_npe)
86 p_npe->txQHead = NULL;
88 for (i = 0; i < CONFIG_DEVS_ETH_INTEL_NPE_MAX_TX_DESCRIPTORS; i++) {
89 m = &p_npe->tx_mbufs[i];
91 memset(m, 0, sizeof(*m));
93 IX_OSAL_MBUF_MDATA(m) = (void *)&p_npe->tx_pkts[i * NPE_PKT_SIZE];
94 IX_OSAL_MBUF_MLEN(m) = IX_OSAL_MBUF_PKT_LEN(m) = NPE_PKT_SIZE;
95 mbuf_enqueue(&p_npe->txQHead, m);
99 static void reset_rx_mbufs(struct npe* p_npe)
104 p_npe->rxQHead = NULL;
106 HAL_DCACHE_INVALIDATE(p_npe->rx_pkts, NPE_PKT_SIZE *
107 CONFIG_DEVS_ETH_INTEL_NPE_MAX_RX_DESCRIPTORS);
109 for (i = 0; i < CONFIG_DEVS_ETH_INTEL_NPE_MAX_RX_DESCRIPTORS; i++) {
110 m = &p_npe->rx_mbufs[i];
112 memset(m, 0, sizeof(*m));
114 IX_OSAL_MBUF_MDATA(m) = (void *)&p_npe->rx_pkts[i * NPE_PKT_SIZE];
115 IX_OSAL_MBUF_MLEN(m) = IX_OSAL_MBUF_PKT_LEN(m) = NPE_PKT_SIZE;
117 if(ixEthAccPortRxFreeReplenish(p_npe->eth_id, m) != IX_SUCCESS) {
118 printf("ixEthAccPortRxFreeReplenish failed for port %d\n", p_npe->eth_id);
124 static void init_rx_mbufs(struct npe* p_npe)
126 p_npe->rxQHead = NULL;
128 p_npe->rx_pkts = npe_alloc(NPE_PKT_SIZE *
129 CONFIG_DEVS_ETH_INTEL_NPE_MAX_RX_DESCRIPTORS);
130 if (p_npe->rx_pkts == NULL) {
131 printf("alloc of packets failed.\n");
135 p_npe->rx_mbufs = (IX_OSAL_MBUF *)
136 npe_alloc(sizeof(IX_OSAL_MBUF) *
137 CONFIG_DEVS_ETH_INTEL_NPE_MAX_RX_DESCRIPTORS);
138 if (p_npe->rx_mbufs == NULL) {
139 printf("alloc of mbufs failed.\n");
143 reset_rx_mbufs(p_npe);
146 static void init_tx_mbufs(struct npe* p_npe)
148 p_npe->tx_pkts = npe_alloc(NPE_PKT_SIZE *
149 CONFIG_DEVS_ETH_INTEL_NPE_MAX_TX_DESCRIPTORS);
150 if (p_npe->tx_pkts == NULL) {
151 printf("alloc of packets failed.\n");
155 p_npe->tx_mbufs = (IX_OSAL_MBUF *)
156 npe_alloc(sizeof(IX_OSAL_MBUF) *
157 CONFIG_DEVS_ETH_INTEL_NPE_MAX_TX_DESCRIPTORS);
158 if (p_npe->tx_mbufs == NULL) {
159 printf("alloc of mbufs failed.\n");
163 reset_tx_mbufs(p_npe);
166 /* Convert IX_ETH_PORT_n to IX_NPEMH_NPEID_NPEx */
167 static int __eth_to_npe(int eth_id)
171 return IX_NPEMH_NPEID_NPEB;
174 return IX_NPEMH_NPEID_NPEC;
177 return IX_NPEMH_NPEID_NPEA;
182 /* Poll the CSR machinery. */
183 static void npe_poll(int eth_id)
185 if (qDispatcherFunc != NULL) {
186 ixNpeMhMessagesReceive(__eth_to_npe(eth_id));
187 (*qDispatcherFunc)(IX_QMGR_QUELOW_GROUP);
191 /* ethAcc RX callback */
192 static void npe_rx_callback(u32 cbTag, IX_OSAL_MBUF *m, IxEthAccPortId portid)
194 struct npe* p_npe = (struct npe *)cbTag;
196 if (IX_OSAL_MBUF_MLEN(m) > 0) {
197 mbuf_enqueue(&p_npe->rxQHead, m);
199 if (p_npe->rx_write == ((p_npe->rx_read-1) & (PKTBUFSRX-1))) {
200 debug("Rx overflow: rx_write=%d rx_read=%d\n",
201 p_npe->rx_write, p_npe->rx_read);
203 debug("Received message #%d (len=%d)\n", p_npe->rx_write,
204 IX_OSAL_MBUF_MLEN(m));
205 memcpy((void *)NetRxPackets[p_npe->rx_write], IX_OSAL_MBUF_MDATA(m),
206 IX_OSAL_MBUF_MLEN(m));
207 p_npe->rx_len[p_npe->rx_write] = IX_OSAL_MBUF_MLEN(m);
209 if (p_npe->rx_write == PKTBUFSRX)
212 #ifdef CONFIG_PRINT_RX_FRAMES
214 u8 *ptr = IX_OSAL_MBUF_MDATA(m);
217 for (i=0; i<60; i++) {
218 debug("%02x ", *ptr++);
225 m = mbuf_dequeue(&p_npe->rxQHead);
227 debug("Received frame with length 0!!!\n");
228 m = mbuf_dequeue(&p_npe->rxQHead);
231 /* Now return mbuf to NPE */
232 IX_OSAL_MBUF_MLEN(m) = IX_OSAL_MBUF_PKT_LEN(m) = NPE_PKT_SIZE;
233 IX_OSAL_MBUF_NEXT_BUFFER_IN_PKT_PTR(m) = NULL;
234 IX_OSAL_MBUF_FLAGS(m) = 0;
236 if(ixEthAccPortRxFreeReplenish(p_npe->eth_id, m) != IX_SUCCESS) {
237 debug("npe_rx_callback: Error returning mbuf.\n");
241 /* ethAcc TX callback */
242 static void npe_tx_callback(u32 cbTag, IX_OSAL_MBUF *m)
244 struct npe* p_npe = (struct npe *)cbTag;
246 debug("%s\n", __FUNCTION__);
248 IX_OSAL_MBUF_MLEN(m) = IX_OSAL_MBUF_PKT_LEN(m) = NPE_PKT_SIZE;
249 IX_OSAL_MBUF_NEXT_BUFFER_IN_PKT_PTR(m) = NULL;
250 IX_OSAL_MBUF_FLAGS(m) = 0;
252 mbuf_enqueue(&p_npe->txQHead, m);
256 static int npe_set_mac_address(struct eth_device *dev)
258 struct npe *p_npe = (struct npe *)dev->priv;
259 IxEthAccMacAddr npeMac;
261 debug("%s\n", __FUNCTION__);
263 /* Set MAC address */
264 memcpy(npeMac.macAddress, dev->enetaddr, 6);
266 if (ixEthAccPortUnicastMacAddressSet(p_npe->eth_id, &npeMac) != IX_ETH_ACC_SUCCESS) {
267 printf("Error setting unicast address! %02x:%02x:%02x:%02x:%02x:%02x\n",
268 npeMac.macAddress[0], npeMac.macAddress[1],
269 npeMac.macAddress[2], npeMac.macAddress[3],
270 npeMac.macAddress[4], npeMac.macAddress[5]);
277 /* Boot-time CSR library initialization. */
278 static int npe_csr_load(void)
282 if (ixQMgrInit() != IX_SUCCESS) {
283 debug("Error initialising queue manager!\n");
287 ixQMgrDispatcherLoopGet(&qDispatcherFunc);
289 if(ixNpeMhInitialize(IX_NPEMH_NPEINTERRUPTS_YES) != IX_SUCCESS) {
290 printf("Error initialising NPE Message handler!\n");
294 if (npe_used[IX_ETH_PORT_1] && npe_exists[IX_ETH_PORT_1] &&
295 ixNpeDlNpeInitAndStart(IX_NPEDL_NPEIMAGE_NPEB_ETH_LEARN_FILTER_SPAN_FIREWALL_VLAN_QOS)
297 printf("Error downloading firmware to NPE-B!\n");
301 if (npe_used[IX_ETH_PORT_2] && npe_exists[IX_ETH_PORT_2] &&
302 ixNpeDlNpeInitAndStart(IX_NPEDL_NPEIMAGE_NPEC_ETH_LEARN_FILTER_SPAN_FIREWALL_VLAN_QOS)
304 printf("Error downloading firmware to NPE-C!\n");
308 /* don't need this for U-Boot */
309 ixFeatureCtrlSwConfigurationWrite(IX_FEATURECTRL_ETH_LEARNING, false);
311 if (ixEthAccInit() != IX_ETH_ACC_SUCCESS) {
312 printf("Error initialising Ethernet access driver!\n");
316 for (i = 0; i < IX_ETH_ACC_NUMBER_OF_PORTS; i++) {
317 if (!npe_used[i] || !npe_exists[i])
319 if (ixEthAccPortInit(i) != IX_ETH_ACC_SUCCESS) {
320 printf("Error initialising Ethernet port%d!\n", i);
322 if (ixEthAccTxSchedulingDisciplineSet(i, FIFO_NO_PRIORITY) != IX_ETH_ACC_SUCCESS) {
323 printf("Error setting scheduling discipline for port %d.\n", i);
325 if (ixEthAccPortRxFrameAppendFCSDisable(i) != IX_ETH_ACC_SUCCESS) {
326 printf("Error disabling RX FCS for port %d.\n", i);
328 if (ixEthAccPortTxFrameAppendFCSEnable(i) != IX_ETH_ACC_SUCCESS) {
329 printf("Error enabling TX FCS for port %d.\n", i);
336 static int npe_init(struct eth_device *dev, bd_t * bis)
338 struct npe *p_npe = (struct npe *)dev->priv;
344 debug("%s: 1\n", __FUNCTION__);
346 #ifdef CONFIG_MII_NPE0_FIXEDLINK
347 if (0 == p_npe->eth_id) {
348 speed = CONFIG_MII_NPE0_SPEED;
349 duplex = CONFIG_MII_NPE0_FULLDUPLEX ? FULL : HALF;
352 #ifdef CONFIG_MII_NPE1_FIXEDLINK
353 if (1 == p_npe->eth_id) {
354 speed = CONFIG_MII_NPE1_SPEED;
355 duplex = CONFIG_MII_NPE1_FULLDUPLEX ? FULL : HALF;
359 miiphy_read(dev->name, p_npe->phy_no, MII_BMSR, ®_short);
362 * Wait if PHY is capable of autonegotiation and
363 * autonegotiation is not complete
365 if ((reg_short & BMSR_ANEGCAPABLE) &&
366 !(reg_short & BMSR_ANEGCOMPLETE)) {
367 puts("Waiting for PHY auto negotiation to complete");
369 while (!(reg_short & BMSR_ANEGCOMPLETE)) {
373 if (i > PHY_AUTONEGOTIATE_TIMEOUT) {
374 puts(" TIMEOUT !\n");
378 if ((i++ % 1000) == 0) {
380 miiphy_read(dev->name, p_npe->phy_no,
381 MII_BMSR, ®_short);
383 udelay(1000); /* 1 ms */
386 /* another 500 ms (results in faster booting) */
389 speed = miiphy_speed(dev->name, p_npe->phy_no);
390 duplex = miiphy_duplex(dev->name, p_npe->phy_no);
393 if (p_npe->print_speed) {
394 p_npe->print_speed = 0;
395 printf ("ENET Speed is %d Mbps - %s duplex connection\n",
396 (int) speed, (duplex == HALF) ? "HALF" : "FULL");
399 npe_alloc_end = npe_alloc_pool + sizeof(npe_alloc_pool);
400 npe_alloc_free = (u8 *)(((unsigned)npe_alloc_pool +
401 CONFIG_SYS_CACHELINE_SIZE - 1) & ~(CONFIG_SYS_CACHELINE_SIZE - 1));
403 /* initialize mbuf pool */
404 init_rx_mbufs(p_npe);
405 init_tx_mbufs(p_npe);
407 if (ixEthAccPortRxCallbackRegister(p_npe->eth_id, npe_rx_callback,
408 (u32)p_npe) != IX_ETH_ACC_SUCCESS) {
409 printf("can't register RX callback!\n");
413 if (ixEthAccPortTxDoneCallbackRegister(p_npe->eth_id, npe_tx_callback,
414 (u32)p_npe) != IX_ETH_ACC_SUCCESS) {
415 printf("can't register TX callback!\n");
419 npe_set_mac_address(dev);
421 if (ixEthAccPortEnable(p_npe->eth_id) != IX_ETH_ACC_SUCCESS) {
422 printf("can't enable port!\n");
431 #if 0 /* test-only: probably have to deal with it when booting linux (for a clean state) */
432 /* Uninitialize CSR library. */
433 static void npe_csr_unload(void)
441 /* callback which is used by ethAcc to recover RX buffers when stopping */
442 static void npe_rx_stop_callback(u32 cbTag, IX_OSAL_MBUF *m, IxEthAccPortId portid)
444 debug("%s\n", __FUNCTION__);
447 /* callback which is used by ethAcc to recover TX buffers when stopping */
448 static void npe_tx_stop_callback(u32 cbTag, IX_OSAL_MBUF *m)
450 debug("%s\n", __FUNCTION__);
454 static void npe_halt(struct eth_device *dev)
456 struct npe *p_npe = (struct npe *)dev->priv;
459 debug("%s\n", __FUNCTION__);
461 /* Delay to give time for recovery of mbufs */
462 for (i = 0; i < 100; i++) {
463 npe_poll(p_npe->eth_id);
467 #if 0 /* test-only: probably have to deal with it when booting linux (for a clean state) */
468 if (ixEthAccPortRxCallbackRegister(p_npe->eth_id, npe_rx_stop_callback,
469 (u32)p_npe) != IX_ETH_ACC_SUCCESS) {
470 debug("Error registering rx callback!\n");
473 if (ixEthAccPortTxDoneCallbackRegister(p_npe->eth_id, npe_tx_stop_callback,
474 (u32)p_npe) != IX_ETH_ACC_SUCCESS) {
475 debug("Error registering tx callback!\n");
478 if (ixEthAccPortDisable(p_npe->eth_id) != IX_ETH_ACC_SUCCESS) {
479 debug("npe_stop: Error disabling NPEB!\n");
482 /* Delay to give time for recovery of mbufs */
483 for (i = 0; i < 100; i++) {
484 npe_poll(p_npe->eth_id);
489 * For U-Boot only, we are probably launching Linux or other OS that
490 * needs a clean slate for its NPE library.
492 #if 0 /* test-only */
493 for (i = 0; i < IX_ETH_ACC_NUMBER_OF_PORTS; i++) {
494 if (npe_used[i] && npe_exists[i])
495 if (ixNpeDlNpeStopAndReset(__eth_to_npe(i)) != IX_SUCCESS)
496 printf("Failed to stop and reset NPE B.\n");
505 static int npe_send(struct eth_device *dev, void *packet, int len)
507 struct npe *p_npe = (struct npe *)dev->priv;
512 debug("%s\n", __FUNCTION__);
513 m = mbuf_dequeue(&p_npe->txQHead);
514 dest = IX_OSAL_MBUF_MDATA(m);
515 IX_OSAL_MBUF_PKT_LEN(m) = IX_OSAL_MBUF_MLEN(m) = len;
516 IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m) = NULL;
518 memcpy(dest, (char *)packet, len);
520 if ((err = ixEthAccPortTxFrameSubmit(p_npe->eth_id, m, IX_ETH_ACC_TX_DEFAULT_PRIORITY))
521 != IX_ETH_ACC_SUCCESS) {
522 printf("npe_send: Can't submit frame. err[%d]\n", err);
523 mbuf_enqueue(&p_npe->txQHead, m);
527 #ifdef DEBUG_PRINT_TX_FRAMES
529 u8 *ptr = IX_OSAL_MBUF_MDATA(m);
532 for (i=0; i<IX_OSAL_MBUF_MLEN(m); i++) {
533 printf("%02x ", *ptr++);
535 printf(" (tx-len=%d)\n", IX_OSAL_MBUF_MLEN(m));
539 npe_poll(p_npe->eth_id);
544 static int npe_rx(struct eth_device *dev)
546 struct npe *p_npe = (struct npe *)dev->priv;
548 debug("%s\n", __FUNCTION__);
549 npe_poll(p_npe->eth_id);
551 debug("%s: rx_write=%d rx_read=%d\n", __FUNCTION__, p_npe->rx_write, p_npe->rx_read);
552 while (p_npe->rx_write != p_npe->rx_read) {
553 debug("Reading message #%d\n", p_npe->rx_read);
554 NetReceive(NetRxPackets[p_npe->rx_read], p_npe->rx_len[p_npe->rx_read]);
556 if (p_npe->rx_read == PKTBUFSRX)
563 int npe_initialize(bd_t * bis)
565 static int virgin = 0;
566 struct eth_device *dev;
568 struct npe *p_npe = NULL;
571 for (eth_num = 0; eth_num < CONFIG_SYS_NPE_NUMS; eth_num++) {
573 /* See if we can actually bring up the interface, otherwise, skip it */
574 #ifdef CONFIG_HAS_ETH1
576 if (!eth_getenv_enetaddr("eth1addr", enetaddr))
580 if (!eth_getenv_enetaddr("ethaddr", enetaddr))
583 /* Allocate device structure */
584 dev = (struct eth_device *)malloc(sizeof(*dev));
586 printf ("%s: Cannot allocate eth_device %d\n", __FUNCTION__, eth_num);
589 memset(dev, 0, sizeof(*dev));
591 /* Allocate our private use data */
592 p_npe = (struct npe *)malloc(sizeof(struct npe));
594 printf("%s: Cannot allocate private hw data for eth_device %d",
595 __FUNCTION__, eth_num);
599 memset(p_npe, 0, sizeof(struct npe));
601 p_npe->eth_id = eth_num;
602 memcpy(dev->enetaddr, enetaddr, 6);
603 #ifdef CONFIG_HAS_ETH1
605 p_npe->phy_no = CONFIG_PHY1_ADDR;
608 p_npe->phy_no = CONFIG_PHY_ADDR;
610 sprintf(dev->name, "NPE%d", eth_num);
611 dev->priv = (void *)p_npe;
612 dev->init = npe_init;
613 dev->halt = npe_halt;
614 dev->send = npe_send;
617 p_npe->print_speed = 1;
622 if (ixFeatureCtrlDeviceRead() == IX_FEATURE_CTRL_DEVICE_TYPE_IXP42X) {
623 switch (ixFeatureCtrlProductIdRead() & IX_FEATURE_CTRL_SILICON_STEPPING_MASK) {
624 case IX_FEATURE_CTRL_SILICON_TYPE_B0:
625 default: /* newer than B0 */
627 * If it is B0 or newer Silicon, we
628 * only enable port when its
629 * corresponding Eth Coprocessor is
632 if (ixFeatureCtrlComponentCheck(IX_FEATURECTRL_ETH0) ==
633 IX_FEATURE_CTRL_COMPONENT_ENABLED)
634 npe_exists[IX_ETH_PORT_1] = true;
636 if (ixFeatureCtrlComponentCheck(IX_FEATURECTRL_ETH1) ==
637 IX_FEATURE_CTRL_COMPONENT_ENABLED)
638 npe_exists[IX_ETH_PORT_2] = true;
640 case IX_FEATURE_CTRL_SILICON_TYPE_A0:
642 * If it is A0 Silicon, we enable both as both Eth Coprocessors
645 npe_exists[IX_ETH_PORT_1] = true;
646 npe_exists[IX_ETH_PORT_2] = true;
649 } else if (ixFeatureCtrlDeviceRead() == IX_FEATURE_CTRL_DEVICE_TYPE_IXP46X) {
650 if (ixFeatureCtrlComponentCheck(IX_FEATURECTRL_ETH0) ==
651 IX_FEATURE_CTRL_COMPONENT_ENABLED)
652 npe_exists[IX_ETH_PORT_1] = true;
654 if (ixFeatureCtrlComponentCheck(IX_FEATURECTRL_ETH1) ==
655 IX_FEATURE_CTRL_COMPONENT_ENABLED)
656 npe_exists[IX_ETH_PORT_2] = true;
659 npe_used[IX_ETH_PORT_1] = 1;
660 npe_used[IX_ETH_PORT_2] = 1;
662 npe_alloc_end = npe_alloc_pool + sizeof(npe_alloc_pool);
663 npe_alloc_free = (u8 *)(((unsigned)npe_alloc_pool +
664 CONFIG_SYS_CACHELINE_SIZE - 1)
665 & ~(CONFIG_SYS_CACHELINE_SIZE - 1));
673 #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
674 miiphy_register(dev->name, npe_miiphy_read, npe_miiphy_write);
677 } /* end for each supported device */