2 * JMicron JMC2x0 series PCIe Ethernet gPXE Device Driver
4 * Copyright 2010 Guo-Fu Tseng <cooldavid@cooldavid.org>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 FILE_LICENCE ( GPL2_OR_LATER );
32 #include <ipxe/if_ether.h>
33 #include <ipxe/ethernet.h>
34 #include <ipxe/iobuf.h>
35 #include <ipxe/netdevice.h>
36 #include <ipxe/malloc.h>
41 jme_mdio_read(struct net_device *netdev, int phy, int reg)
43 struct jme_adapter *jme = netdev->priv;
44 int i, val, again = (reg == MII_BMSR) ? 1 : 0;
47 jwrite32(jme, JME_SMI, SMI_OP_REQ |
51 for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
53 val = jread32(jme, JME_SMI);
54 if ((val & SMI_OP_REQ) == 0)
59 DBG("phy(%d) read timeout : %d\n", phy, reg);
66 return (val & SMI_DATA_MASK) >> SMI_DATA_SHIFT;
70 jme_mdio_write(struct net_device *netdev,
71 int phy, int reg, int val)
73 struct jme_adapter *jme = netdev->priv;
76 jwrite32(jme, JME_SMI, SMI_OP_WRITE | SMI_OP_REQ |
77 ((val << SMI_DATA_SHIFT) & SMI_DATA_MASK) |
78 smi_phy_addr(phy) | smi_reg_addr(reg));
81 for (i = JME_PHY_TIMEOUT * 50 ; i > 0 ; --i) {
83 if ((jread32(jme, JME_SMI) & SMI_OP_REQ) == 0)
88 DBG("phy(%d) write timeout : %d\n", phy, reg);
94 jme_reset_phy_processor(struct jme_adapter *jme)
98 jme_mdio_write(jme->mii_if.dev,
100 MII_ADVERTISE, ADVERTISE_ALL |
101 ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
103 if (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250)
104 jme_mdio_write(jme->mii_if.dev,
107 ADVERTISE_1000FULL | ADVERTISE_1000HALF);
109 val = jme_mdio_read(jme->mii_if.dev,
113 jme_mdio_write(jme->mii_if.dev,
115 MII_BMCR, val | BMCR_RESET);
121 jme_phy_init(struct jme_adapter *jme)
125 reg26 = jme_mdio_read(jme->mii_if.dev, jme->mii_if.phy_id, 26);
126 jme_mdio_write(jme->mii_if.dev, jme->mii_if.phy_id, 26, reg26 | 0x1000);
130 jme_set_phyfifoa(struct jme_adapter *jme)
132 jme_mdio_write(jme->mii_if.dev, jme->mii_if.phy_id, 27, 0x0004);
136 jme_set_phyfifob(struct jme_adapter *jme)
138 jme_mdio_write(jme->mii_if.dev, jme->mii_if.phy_id, 27, 0x0000);
142 jme_phy_off(struct jme_adapter *jme)
144 jme_mdio_write(jme->mii_if.dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN);
148 jme_restart_an(struct jme_adapter *jme)
152 bmcr = jme_mdio_read(jme->mii_if.dev, jme->mii_if.phy_id, MII_BMCR);
153 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
154 jme_mdio_write(jme->mii_if.dev, jme->mii_if.phy_id, MII_BMCR, bmcr);
158 jme_reset_ghc_speed(struct jme_adapter *jme)
160 jme->reg_ghc &= ~(GHC_SPEED_1000M | GHC_DPX);
161 jwrite32(jme, JME_GHC, jme->reg_ghc);
165 jme_start_irq(struct jme_adapter *jme)
170 jwrite32(jme, JME_IENS, INTR_ENABLE);
174 jme_stop_irq(struct jme_adapter *jme)
179 jwrite32f(jme, JME_IENC, INTR_ENABLE);
183 jme_setup_wakeup_frame(struct jme_adapter *jme,
184 u32 *mask, u32 crc, int fnr)
191 jwrite32(jme, JME_WFOI, WFOI_CRC_SEL | (fnr & WFOI_FRAME_SEL));
193 jwrite32(jme, JME_WFODP, crc);
199 for (i = 0 ; i < WAKEUP_FRAME_MASK_DWNR ; ++i) {
200 jwrite32(jme, JME_WFOI,
201 ((i << WFOI_MASK_SHIFT) & WFOI_MASK_SEL) |
202 (fnr & WFOI_FRAME_SEL));
204 jwrite32(jme, JME_WFODP, mask[i]);
210 jme_reset_mac_processor(struct jme_adapter *jme)
212 u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0};
213 u32 crc = 0xCDCDCDCD;
216 jwrite32(jme, JME_GHC, jme->reg_ghc | GHC_SWRST);
218 jwrite32(jme, JME_GHC, jme->reg_ghc);
220 jwrite32(jme, JME_RXDBA_LO, 0x00000000);
221 jwrite32(jme, JME_RXDBA_HI, 0x00000000);
222 jwrite32(jme, JME_RXQDC, 0x00000000);
223 jwrite32(jme, JME_RXNDA, 0x00000000);
224 jwrite32(jme, JME_TXDBA_LO, 0x00000000);
225 jwrite32(jme, JME_TXDBA_HI, 0x00000000);
226 jwrite32(jme, JME_TXQDC, 0x00000000);
227 jwrite32(jme, JME_TXNDA, 0x00000000);
229 jwrite32(jme, JME_RXMCHT_LO, 0x00000000);
230 jwrite32(jme, JME_RXMCHT_HI, 0x00000000);
231 for (i = 0 ; i < WAKEUP_FRAME_NR ; ++i)
232 jme_setup_wakeup_frame(jme, mask, crc, i);
233 jwrite32(jme, JME_GPREG0, GPREG0_DEFAULT);
234 jwrite32(jme, JME_GPREG1, GPREG1_DEFAULT);
238 jme_free_tx_buffers(struct jme_adapter *jme)
240 struct jme_ring *txring = &jme->txring;
241 struct io_buffer *txbi;
244 for (i = 0; i < jme->tx_ring_size; ++i) {
245 txbi = txring->bufinf[i];
247 netdev_tx_complete_err(jme->mii_if.dev,
249 txring->bufinf[i] = NULL;
255 jme_free_tx_resources(struct jme_adapter *jme)
257 struct jme_ring *txring = &jme->txring;
260 if (txring->bufinf) {
261 memset(txring->bufinf, 0,
262 sizeof(struct io_buffer *) * jme->tx_ring_size);
263 free(txring->bufinf);
265 free_dma(txring->desc, jme->tx_ring_size * TX_DESC_SIZE);
268 txring->bufinf = NULL;
270 txring->next_to_use = 0;
271 txring->next_to_clean = 0;
276 jme_alloc_tx_resources(struct jme_adapter *jme)
278 struct jme_ring *txring = &jme->txring;
280 txring->desc = malloc_dma(jme->tx_ring_size * TX_DESC_SIZE,
283 DBG("Can not allocate transmit ring descriptors.\n");
290 txring->dma = virt_to_bus(txring->desc);
291 txring->bufinf = malloc(sizeof(struct io_buffer *) *
293 if (!(txring->bufinf)) {
294 DBG("Can not allocate transmit buffer info.\n");
299 * Initialize Transmit Buffer Pointers
301 memset(txring->bufinf, 0,
302 sizeof(struct io_buffer *) * jme->tx_ring_size);
307 jme_free_tx_resources(jme);
312 jme_init_tx_ring(struct jme_adapter *jme)
314 struct jme_ring *txring = &jme->txring;
316 txring->next_to_clean = 0;
317 txring->next_to_use = 0;
318 txring->nr_free = jme->tx_ring_size;
321 * Initialize Transmit Descriptors
323 memset(txring->desc, 0, jme->tx_ring_size * TX_DESC_SIZE);
324 jme_free_tx_buffers(jme);
328 jme_enable_tx_engine(struct jme_adapter *jme)
333 jwrite32(jme, JME_TXCS, TXCS_DEFAULT | TXCS_SELECT_QUEUE0);
337 * Setup TX Queue 0 DMA Bass Address
339 jwrite32(jme, JME_TXDBA_LO, (uint64_t)jme->txring.dma & 0xFFFFFFFFUL);
340 jwrite32(jme, JME_TXDBA_HI, (uint64_t)(jme->txring.dma) >> 32);
341 jwrite32(jme, JME_TXNDA, (uint64_t)jme->txring.dma & 0xFFFFFFFFUL);
344 * Setup TX Descptor Count
346 jwrite32(jme, JME_TXQDC, jme->tx_ring_size);
352 jwrite32(jme, JME_TXCS, jme->reg_txcs |
359 jme_disable_tx_engine(struct jme_adapter *jme)
367 jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0);
370 val = jread32(jme, JME_TXCS);
371 for (i = JME_TX_DISABLE_TIMEOUT ; (val & TXCS_ENABLE) && i > 0 ; --i) {
373 val = jread32(jme, JME_TXCS);
378 DBG("Disable TX engine timeout.\n");
383 jme_set_clean_rxdesc(struct jme_adapter *jme, int i)
385 struct jme_ring *rxring = &jme->rxring;
386 register struct rxdesc *rxdesc = rxring->desc;
387 struct io_buffer *rxbi = rxring->bufinf[i];
391 mapping = virt_to_bus(rxbi->data);
395 rxdesc->desc1.bufaddrh = cpu_to_le32(mapping >> 32);
396 rxdesc->desc1.bufaddrl = cpu_to_le32(mapping & 0xFFFFFFFFUL);
397 rxdesc->desc1.datalen = cpu_to_le16(RX_ALLOC_LEN);
399 rxdesc->desc1.flags |= RXFLAG_OWN | RXFLAG_INT;
403 jme_make_new_rx_buf(struct io_buffer **rxbip)
405 struct io_buffer *inbuf;
410 inbuf = alloc_iob(RX_ALLOC_LEN);
412 DBG("Allocate receive iob error.\n");
421 jme_free_rx_buf(struct jme_adapter *jme, int i)
423 struct jme_ring *rxring = &jme->rxring;
424 struct io_buffer *rxbi = rxring->bufinf[i];
428 rxring->bufinf[i] = NULL;
433 jme_free_rx_resources(struct jme_adapter *jme)
436 struct jme_ring *rxring = &jme->rxring;
439 if (rxring->bufinf) {
440 for (i = 0 ; i < jme->rx_ring_size ; ++i)
441 jme_free_rx_buf(jme, i);
442 free(rxring->bufinf);
445 free_dma(rxring->desc, jme->rx_ring_size * RX_DESC_SIZE);
448 rxring->bufinf = NULL;
450 rxring->next_to_fill = 0;
451 rxring->next_to_clean = 0;
455 jme_alloc_rx_resources(struct jme_adapter *jme)
458 struct jme_ring *rxring = &jme->rxring;
459 struct io_buffer **bufinf;
461 rxring->desc = malloc_dma(jme->rx_ring_size * RX_DESC_SIZE,
464 DBG("Can not allocate receive ring descriptors.\n");
471 rxring->dma = virt_to_bus(rxring->desc);
472 rxring->bufinf = malloc(sizeof(struct io_buffer *) *
474 if (!(rxring->bufinf)) {
475 DBG("Can not allocate receive buffer info.\n");
480 * Initiallize Receive Buffer Pointers
482 bufinf = rxring->bufinf;
483 memset(bufinf, 0, sizeof(struct io_buffer *) * jme->rx_ring_size);
484 for (i = 0 ; i < jme->rx_ring_size ; ++i) {
485 if (jme_make_new_rx_buf(bufinf))
493 jme_free_rx_resources(jme);
498 jme_init_rx_ring(struct jme_adapter *jme)
501 struct jme_ring *rxring = &jme->rxring;
503 for (i = 0 ; i < jme->rx_ring_size ; ++i)
504 jme_set_clean_rxdesc(jme, i);
506 rxring->next_to_fill = 0;
507 rxring->next_to_clean = 0;
511 jme_set_multi(struct jme_adapter *jme)
514 * Just receive all kind of packet for new.
516 jme->reg_rxmcs |= RXMCS_ALLFRAME | RXMCS_BRDFRAME | RXMCS_UNIFRAME;
517 jwrite32(jme, JME_RXMCS, jme->reg_rxmcs);
521 jme_enable_rx_engine(struct jme_adapter *jme)
526 jwrite32(jme, JME_RXCS, jme->reg_rxcs |
531 * Setup RX DMA Bass Address
533 jwrite32(jme, JME_RXDBA_LO, (uint64_t)(jme->rxring.dma) & 0xFFFFFFFFUL);
534 jwrite32(jme, JME_RXDBA_HI, (uint64_t)(jme->rxring.dma) >> 32);
535 jwrite32(jme, JME_RXNDA, (uint64_t)(jme->rxring.dma) & 0xFFFFFFFFUL);
538 * Setup RX Descriptor Count
540 jwrite32(jme, JME_RXQDC, jme->rx_ring_size);
543 * Setup Unicast Filter
551 jwrite32(jme, JME_RXCS, jme->reg_rxcs |
558 jme_restart_rx_engine(struct jme_adapter *jme)
563 jwrite32(jme, JME_RXCS, jme->reg_rxcs |
570 jme_disable_rx_engine(struct jme_adapter *jme)
578 jwrite32(jme, JME_RXCS, jme->reg_rxcs);
581 val = jread32(jme, JME_RXCS);
582 for (i = JME_RX_DISABLE_TIMEOUT ; (val & RXCS_ENABLE) && i > 0 ; --i) {
584 val = jread32(jme, JME_RXCS);
589 DBG("Disable RX engine timeout.\n");
594 jme_refill_rx_ring(struct jme_adapter *jme, int curhole)
596 struct jme_ring *rxring = &jme->rxring;
597 int i = rxring->next_to_fill;
598 struct io_buffer **bufinf = rxring->bufinf;
599 int mask = jme->rx_ring_mask;
600 int limit = jme->rx_ring_size;
604 if (jme_make_new_rx_buf(bufinf + i))
606 jme_set_clean_rxdesc(jme, i);
612 rxring->next_to_fill = i;
616 jme_alloc_and_feed_iob(struct jme_adapter *jme, int idx)
618 struct jme_ring *rxring = &jme->rxring;
619 struct rxdesc *rxdesc = rxring->desc;
620 struct io_buffer *rxbi = rxring->bufinf[idx];
621 struct net_device *netdev = jme->mii_if.dev;
626 framesize = le16_to_cpu(rxdesc->descwb.framesize);
627 iob_put(rxbi, framesize);
628 netdev_rx(netdev, rxbi);
630 rxring->bufinf[idx] = NULL;
631 jme_refill_rx_ring(jme, idx);
635 jme_process_receive(struct jme_adapter *jme)
637 struct jme_ring *rxring = &jme->rxring;
638 struct rxdesc *rxdesc = rxring->desc;
639 struct net_device *netdev = jme->mii_if.dev;
640 int i, j, ccnt, desccnt, mask = jme->rx_ring_mask;
641 unsigned int limit = jme->rx_ring_size;
643 i = rxring->next_to_clean;
645 while (rxring->bufinf[i] &&
646 !(rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) &&
647 (rxdesc->descwb.desccnt & RXWBDCNT_WBCPL) &&
651 desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
652 DBG2("Cleaning rx desc=%d, cnt=%d\n", i, desccnt);
654 if (desccnt > 1 || rxdesc->descwb.errstat & RXWBERR_ALLERR) {
655 for (j = i, ccnt = desccnt ; ccnt-- ; ) {
656 jme_set_clean_rxdesc(jme, j);
657 j = (j + 1) & (mask);
659 DBG("Dropped packet due to ");
661 DBG("long packet.(%d descriptors)\n", desccnt);
663 DBG("Packet error.\n");
664 netdev_rx_err(netdev, NULL, -EINVAL);
666 jme_alloc_and_feed_iob(jme, i);
669 i = (i + desccnt) & (mask);
670 rxdesc = rxring->desc;
673 rxring->next_to_clean = i;
679 jme_set_custom_macaddr(struct net_device *netdev)
681 struct jme_adapter *jme = netdev->priv;
682 uint8_t *addr = netdev->ll_addr;
685 val = (addr[3] & 0xff) << 24 |
686 (addr[2] & 0xff) << 16 |
687 (addr[1] & 0xff) << 8 |
689 jwrite32(jme, JME_RXUMA_LO, val);
690 val = (addr[5] & 0xff) << 8 |
692 jwrite32(jme, JME_RXUMA_HI, val);
698 * @v netdev Net device
699 * @ret rc Return status code
702 jme_open(struct net_device *netdev)
704 struct jme_adapter *jme = netdev->priv;
708 * Allocate receive resources
710 rc = jme_alloc_rx_resources(jme);
712 DBG("Allocate receive resources error.\n");
717 * Allocate transmit resources
719 rc = jme_alloc_tx_resources(jme);
721 DBG("Allocate transmit resources error.\n");
722 goto free_rx_resources_out;
725 jme_set_custom_macaddr(netdev);
726 jme_reset_phy_processor(jme);
731 free_rx_resources_out:
732 jme_free_rx_resources(jme);
740 * @v netdev Net device
743 jme_close(struct net_device *netdev)
745 struct jme_adapter *jme = netdev->priv;
747 jme_free_tx_resources(jme);
748 jme_free_rx_resources(jme);
749 jme_reset_mac_processor(jme);
752 netdev_link_down(netdev);
756 jme_alloc_txdesc(struct jme_adapter *jme)
758 struct jme_ring *txring = &jme->txring;
761 idx = txring->next_to_use;
762 if (txring->nr_free < 1)
765 txring->next_to_use = (txring->next_to_use + 1) & jme->tx_ring_mask;
771 jme_fill_tx_desc(struct jme_adapter *jme, struct io_buffer *iob, int idx)
773 struct jme_ring *txring = &jme->txring;
774 struct txdesc *txdesc = txring->desc;
775 uint16_t len = iob_len(iob);
776 unsigned long int mapping;
779 mapping = virt_to_bus(iob->data);
780 DBG2("TX buffer address: %p(%08lx+%x)\n",
781 iob->data, mapping, len);
786 txdesc->desc1.datalen = cpu_to_le16(len);
787 txdesc->desc1.pktsize = cpu_to_le16(len);
788 txdesc->desc1.bufaddr = cpu_to_le32(mapping);
790 * Set OWN bit at final.
791 * When kernel transmit faster than NIC.
792 * And NIC trying to send this descriptor before we tell
793 * it to start sending this TX queue.
794 * Other fields are already filled correctly.
797 txdesc->desc1.flags = TXFLAG_OWN | TXFLAG_INT;
799 * Set tx buffer info after telling NIC to send
800 * For better tx_clean timing
803 txring->bufinf[idx] = iob;
809 * @v netdev Network device
810 * @v iobuf I/O buffer
811 * @ret rc Return status code
814 jme_transmit(struct net_device *netdev, struct io_buffer *iobuf)
816 struct jme_adapter *jme = netdev->priv;
819 idx = jme_alloc_txdesc(jme);
822 * Pause transmit queue somehow if possible.
824 DBG("TX ring full!\n");
828 jme_fill_tx_desc(jme, iobuf, idx);
830 jwrite32(jme, JME_TXCS, jme->reg_txcs |
834 DBG2("xmit: idx=%d\n", idx);
840 jme_check_link(struct net_device *netdev, int testonly)
842 struct jme_adapter *jme = netdev->priv;
843 u32 phylink, ghc, cnt = JME_SPDRSV_TIMEOUT, gpreg1;
846 phylink = jread32(jme, JME_PHY_LINK);
848 if (phylink & PHY_LINK_UP) {
850 * Keep polling for speed/duplex resolve complete
852 while (!(phylink & PHY_LINK_SPEEDDPU_RESOLVED) &&
856 phylink = jread32(jme, JME_PHY_LINK);
859 DBG("Waiting speed resolve timeout.\n");
861 if (jme->phylink == phylink) {
868 jme->phylink = phylink;
870 ghc = jme->reg_ghc & ~(GHC_SPEED | GHC_DPX |
871 GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE |
872 GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY);
873 switch (phylink & PHY_LINK_SPEED_MASK) {
874 case PHY_LINK_SPEED_10M:
875 ghc |= GHC_SPEED_10M |
876 GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
878 case PHY_LINK_SPEED_100M:
879 ghc |= GHC_SPEED_100M |
880 GHC_TO_CLK_PCIE | GHC_TXMAC_CLK_PCIE;
882 case PHY_LINK_SPEED_1000M:
883 ghc |= GHC_SPEED_1000M |
884 GHC_TO_CLK_GPHY | GHC_TXMAC_CLK_GPHY;
890 if (phylink & PHY_LINK_DUPLEX) {
891 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT);
894 jwrite32(jme, JME_TXMCS, TXMCS_DEFAULT |
898 jwrite32(jme, JME_TXTRHD, TXTRHD_TXPEN |
899 ((0x2000 << TXTRHD_TXP_SHIFT) & TXTRHD_TXP) |
901 ((8 << TXTRHD_TXRL_SHIFT) & TXTRHD_TXRL));
904 gpreg1 = GPREG1_DEFAULT;
905 if (is_buggy250(jme->pdev->device, jme->chiprev)) {
906 if (!(phylink & PHY_LINK_DUPLEX))
907 gpreg1 |= GPREG1_HALFMODEPATCH;
908 switch (phylink & PHY_LINK_SPEED_MASK) {
909 case PHY_LINK_SPEED_10M:
910 jme_set_phyfifoa(jme);
911 gpreg1 |= GPREG1_RSSPATCH;
913 case PHY_LINK_SPEED_100M:
914 jme_set_phyfifob(jme);
915 gpreg1 |= GPREG1_RSSPATCH;
917 case PHY_LINK_SPEED_1000M:
918 jme_set_phyfifoa(jme);
925 jwrite32(jme, JME_GPREG1, gpreg1);
926 jwrite32(jme, JME_GHC, ghc);
929 DBG("Link is up at %d Mbps, %s-Duplex, MDI%s.\n",
930 ((phylink & PHY_LINK_SPEED_MASK)
931 == PHY_LINK_SPEED_1000M) ? 1000 :
932 ((phylink & PHY_LINK_SPEED_MASK)
933 == PHY_LINK_SPEED_100M) ? 100 : 10,
934 (phylink & PHY_LINK_DUPLEX) ? "Full" : "Half",
935 (phylink & PHY_LINK_MDI_STAT) ? "-X" : "");
936 netdev_link_up(netdev);
941 DBG("Link is down.\n");
943 netdev_link_down(netdev);
951 jme_link_change(struct net_device *netdev)
953 struct jme_adapter *jme = netdev->priv;
956 * Do nothing if the link status did not change.
958 if (jme_check_link(netdev, 1))
961 if (netdev_link_ok(netdev)) {
962 netdev_link_down(netdev);
963 jme_disable_rx_engine(jme);
964 jme_disable_tx_engine(jme);
965 jme_reset_ghc_speed(jme);
966 jme_reset_mac_processor(jme);
969 jme_check_link(netdev, 0);
970 if (netdev_link_ok(netdev)) {
971 jme_init_rx_ring(jme);
972 jme_enable_rx_engine(jme);
973 jme_init_tx_ring(jme);
974 jme_enable_tx_engine(jme);
981 jme_tx_clean(struct jme_adapter *jme)
983 struct jme_ring *txring = &jme->txring;
984 struct txdesc *txdesc = txring->desc;
985 struct io_buffer *txbi;
986 struct net_device *netdev = jme->mii_if.dev;
987 int i, cnt = 0, max, err, mask;
989 max = jme->tx_ring_size - txring->nr_free;
990 mask = jme->tx_ring_mask;
992 for (i = txring->next_to_clean ; cnt < max ; ++cnt) {
994 txbi = txring->bufinf[i];
996 if (txbi && !(txdesc[i].descwb.flags & TXWBFLAG_OWN)) {
997 DBG2("TX clean address: %08lx(%08lx+%zx)\n",
998 (unsigned long)txbi->data,
999 virt_to_bus(txbi->data),
1001 err = txdesc[i].descwb.flags & TXWBFLAG_ALLERR;
1003 netdev_tx_complete_err(netdev, txbi, -EIO);
1005 netdev_tx_complete(netdev, txbi);
1006 txring->bufinf[i] = NULL;
1014 DBG2("txclean: next %d\n", i);
1015 txring->next_to_clean = i;
1016 txring->nr_free += cnt;
1019 * Poll for received packets
1021 * @v netdev Network device
1024 jme_poll(struct net_device *netdev)
1026 struct jme_adapter *jme = netdev->priv;
1029 intrstat = jread32(jme, JME_IEVE);
1032 * Check if any actions needs to perform.
1034 if ((intrstat & INTR_ENABLE) == 0)
1038 * Check if the device still exist
1040 if (intrstat == ~((typeof(intrstat))0))
1043 DBG2("intrstat 0x%08x\n", intrstat);
1044 if (intrstat & (INTR_LINKCH | INTR_SWINTR)) {
1045 DBG2("Link changed\n");
1046 jme_link_change(netdev);
1049 * Clear all interrupt status
1051 jwrite32(jme, JME_IEVE, intrstat);
1054 * Link change event is critical
1055 * all other events are ignored
1061 * Process transmission complete first to free more memory.
1063 if (intrstat & INTR_TX0) {
1064 DBG2("Packet transmit complete\n");
1066 jwrite32(jme, JME_IEVE, intrstat & INTR_TX0);
1069 if (intrstat & (INTR_RX0 | INTR_RX0EMP)) {
1070 DBG2("Packet received\n");
1071 jme_process_receive(jme);
1072 jwrite32(jme, JME_IEVE,
1073 intrstat & (INTR_RX0 | INTR_RX0EMP));
1074 if (intrstat & INTR_RX0EMP)
1075 jme_restart_rx_engine(jme);
1079 * Clean all other interrupt status
1081 jwrite32(jme, JME_IEVE,
1082 intrstat & ~(INTR_RX0 | INTR_RX0EMP | INTR_TX0));
1086 * Enable/disable interrupts
1088 * @v netdev Network device
1089 * @v enable Interrupts should be enabled
1092 jme_irq(struct net_device *netdev, int enable)
1094 struct jme_adapter *jme = netdev->priv;
1096 DBG("jme interrupts %s\n", (enable ? "enabled" : "disabled"));
1103 /** JME net device operations */
1104 static struct net_device_operations jme_operations = {
1107 .transmit = jme_transmit,
1113 jme_check_hw_ver(struct jme_adapter *jme)
1117 chipmode = jread32(jme, JME_CHIPMODE);
1119 jme->fpgaver = (chipmode & CM_FPGAVER_MASK) >> CM_FPGAVER_SHIFT;
1120 jme->chiprev = (chipmode & CM_CHIPREV_MASK) >> CM_CHIPREV_SHIFT;
1124 jme_reload_eeprom(struct jme_adapter *jme)
1129 val = jread32(jme, JME_SMBCSR);
1131 if (val & SMBCSR_EEPROMD) {
1132 val |= SMBCSR_CNACK;
1133 jwrite32(jme, JME_SMBCSR, val);
1134 val |= SMBCSR_RELOAD;
1135 jwrite32(jme, JME_SMBCSR, val);
1138 for (i = JME_EEPROM_RELOAD_TIMEOUT; i > 0; --i) {
1140 if ((jread32(jme, JME_SMBCSR) & SMBCSR_RELOAD) == 0)
1145 DBG("eeprom reload timeout\n");
1154 jme_load_macaddr(struct net_device *netdev)
1156 struct jme_adapter *jme = netdev_priv(netdev);
1157 unsigned char macaddr[6];
1160 val = jread32(jme, JME_RXUMA_LO);
1161 macaddr[0] = (val >> 0) & 0xFF;
1162 macaddr[1] = (val >> 8) & 0xFF;
1163 macaddr[2] = (val >> 16) & 0xFF;
1164 macaddr[3] = (val >> 24) & 0xFF;
1165 val = jread32(jme, JME_RXUMA_HI);
1166 macaddr[4] = (val >> 0) & 0xFF;
1167 macaddr[5] = (val >> 8) & 0xFF;
1168 memcpy(netdev->hw_addr, macaddr, 6);
1176 * @ret rc Return status code
1179 jme_probe(struct pci_device *pci)
1181 struct net_device *netdev;
1182 struct jme_adapter *jme;
1186 /* Allocate net device */
1187 netdev = alloc_etherdev(sizeof(*jme));
1190 netdev_init(netdev, &jme_operations);
1192 pci_set_drvdata(pci, netdev);
1193 netdev->dev = &pci->dev;
1194 jme->regs = ioremap(pci->membase, JME_REGS_SIZE);
1196 DBG("Mapping PCI resource region error.\n");
1201 jme->reg_rxcs = RXCS_DEFAULT;
1202 jme->reg_rxmcs = RXMCS_DEFAULT;
1205 jme->mii_if.dev = netdev;
1206 jme->mii_if.phy_id = 1;
1207 jme->mii_if.mdio_read = jme_mdio_read;
1208 jme->mii_if.mdio_write = jme_mdio_write;
1209 jme->rx_ring_size = 1 << 4;
1210 jme->rx_ring_mask = jme->rx_ring_size - 1;
1211 jme->tx_ring_size = 1 << 4;
1212 jme->tx_ring_mask = jme->tx_ring_size - 1;
1214 /* Fix up PCI device */
1215 adjust_pci_device(pci);
1218 * Get Max Read Req Size from PCI Config Space
1220 pci_read_config_byte(pci, PCI_DCSR_MRRS, &mrrs);
1221 mrrs &= PCI_DCSR_MRRS_MASK;
1224 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_128B;
1227 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_256B;
1230 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
1235 * Get basic hardware info.
1237 jme_check_hw_ver(jme);
1238 if (pci->device == PCI_DEVICE_ID_JMICRON_JMC250)
1239 jme->mii_if.supports_gmii = 1;
1241 jme->mii_if.supports_gmii = 0;
1246 jme_set_phyfifoa(jme);
1250 * Bring down phy before interface is opened.
1255 * Reset MAC processor and reload EEPROM for MAC Address
1257 jme_reset_mac_processor(jme);
1258 rc = jme_reload_eeprom(jme);
1260 DBG("Reload eeprom for reading MAC Address error.\n");
1263 jme_load_macaddr(netdev);
1265 /* Register network device */
1266 if ((rc = register_netdev(netdev)) != 0) {
1267 DBG("Register net_device error.\n");
1276 netdev_nullify(netdev);
1287 jme_remove(struct pci_device *pci)
1289 struct net_device *netdev = pci_get_drvdata(pci);
1290 struct jme_adapter *jme = netdev->priv;
1293 unregister_netdev(netdev);
1294 netdev_nullify(netdev);
1298 static struct pci_device_id jm_nics[] = {
1299 PCI_ROM(0x197b, 0x0250, "jme", "JMicron Gigabit Ethernet", 0),
1300 PCI_ROM(0x197b, 0x0260, "jmfe", "JMicron Fast Ethernet", 0),
1303 struct pci_driver jme_driver __pci_driver = {
1305 .id_count = ( sizeof ( jm_nics ) / sizeof ( jm_nics[0] ) ),
1307 .remove = jme_remove,