2 FILE_LICENCE ( GPL2_ONLY );
10 #include <ipxe/iobuf.h>
11 #include <ipxe/timer.h>
12 #include <ipxe/malloc.h>
13 #include <ipxe/if_ether.h>
14 #include <ipxe/ethernet.h>
15 #include <ipxe/netdevice.h>
19 #define TG3_DEF_RX_MODE 0
20 #define TG3_DEF_TX_MODE 0
22 static void tg3_refill_prod_ring(struct tg3 *tp);
24 /* Do not place this n-ring entries value into the tp struct itself,
25 * we really want to expose these constants to GCC so that modulo et
26 * al. operations are done with shifts and masks instead of with
27 * hw multiply/modulo instructions. Another solution would be to
28 * replace things like '% foo' with '& (foo - 1)'.
31 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
34 /* FIXME: does TG3_RX_RET_MAX_SIZE_5705 work for all cards? */
35 #define TG3_RX_RCB_RING_BYTES(tp) \
36 (sizeof(struct tg3_rx_buffer_desc) * (TG3_RX_RET_MAX_SIZE_5705))
38 #define TG3_RX_STD_RING_BYTES(tp) \
39 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_MAX_SIZE_5700)
41 void tg3_rx_prodring_fini(struct tg3_rx_prodring_set *tpr)
42 { DBGP("%s\n", __func__);
45 free_dma(tpr->rx_std, TG3_RX_STD_RING_BYTES(tp));
51 * Must not be invoked with interrupt sources disabled and
52 * the hardware shutdown down.
54 static void tg3_free_consistent(struct tg3 *tp)
55 { DBGP("%s\n", __func__);
58 free_dma(tp->tx_ring, TG3_TX_RING_BYTES);
63 tp->tx_buffers = NULL;
66 free_dma(tp->rx_rcb, TG3_RX_RCB_RING_BYTES(tp));
67 tp->rx_rcb_mapping = 0;
71 tg3_rx_prodring_fini(&tp->prodring);
74 free_dma(tp->hw_status, TG3_HW_STATUS_SIZE);
75 tp->status_mapping = 0;
81 * Must not be invoked with interrupt sources disabled and
82 * the hardware shutdown down. Can sleep.
84 int tg3_alloc_consistent(struct tg3 *tp)
85 { DBGP("%s\n", __func__);
87 struct tg3_hw_status *sblk;
88 struct tg3_rx_prodring_set *tpr = &tp->prodring;
90 tp->hw_status = malloc_dma(TG3_HW_STATUS_SIZE, TG3_DMA_ALIGNMENT);
92 DBGC(tp->dev, "hw_status alloc failed\n");
95 tp->status_mapping = virt_to_bus(tp->hw_status);
97 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
100 tpr->rx_std = malloc_dma(TG3_RX_STD_RING_BYTES(tp), TG3_DMA_ALIGNMENT);
102 DBGC(tp->dev, "rx prodring alloc failed\n");
105 tpr->rx_std_mapping = virt_to_bus(tpr->rx_std);
106 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
108 tp->tx_buffers = zalloc(sizeof(struct ring_info) * TG3_TX_RING_SIZE);
112 tp->tx_ring = malloc_dma(TG3_TX_RING_BYTES, TG3_DMA_ALIGNMENT);
115 tp->tx_desc_mapping = virt_to_bus(tp->tx_ring);
118 * When RSS is enabled, the status block format changes
119 * slightly. The "rx_jumbo_consumer", "reserved",
120 * and "rx_mini_consumer" members get mapped to the
121 * other three rx return ring producer indexes.
124 tp->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
126 tp->rx_rcb = malloc_dma(TG3_RX_RCB_RING_BYTES(tp), TG3_DMA_ALIGNMENT);
129 tp->rx_rcb_mapping = virt_to_bus(tp->rx_rcb);
131 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
136 tg3_free_consistent(tp);
140 #define TG3_RX_STD_BUFF_RING_BYTES(tp) \
141 (sizeof(struct ring_info) * TG3_RX_STD_MAX_SIZE_5700)
142 #define TG3_RX_STD_RING_BYTES(tp) \
143 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_MAX_SIZE_5700)
145 /* Initialize rx rings for packet processing.
147 * The chip has been shut down and the driver detached from
148 * the networking, so no interrupts or new tx packets will
149 * end up in the driver.
151 static int tg3_rx_prodring_alloc(struct tg3 __unused *tp,
152 struct tg3_rx_prodring_set *tpr)
153 { DBGP("%s\n", __func__);
157 tpr->rx_std_cons_idx = 0;
158 tpr->rx_std_prod_idx = 0;
160 /* Initialize invariants of the rings, we only set this
161 * stuff once. This works because the card does not
162 * write into the rx buffer posting rings.
164 /* FIXME: does TG3_RX_STD_MAX_SIZE_5700 work on all cards? */
165 for (i = 0; i < TG3_RX_STD_MAX_SIZE_5700; i++) {
166 struct tg3_rx_buffer_desc *rxd;
168 rxd = &tpr->rx_std[i];
169 rxd->idx_len = (TG3_RX_STD_DMA_SZ - 64 - 2) << RXD_LEN_SHIFT;
170 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
171 rxd->opaque = (RXD_OPAQUE_RING_STD |
172 (i << RXD_OPAQUE_INDEX_SHIFT));
178 static void tg3_rx_iob_free(struct io_buffer *iobs[], int i)
179 { DBGP("%s\n", __func__);
188 static void tg3_rx_prodring_free(struct tg3_rx_prodring_set *tpr)
189 { DBGP("%s\n", __func__);
193 for (i = 0; i < TG3_DEF_RX_RING_PENDING; i++)
194 tg3_rx_iob_free(tpr->rx_iobufs, i);
197 /* Initialize tx/rx rings for packet processing.
199 * The chip has been shut down and the driver detached from
200 * the networking, so no interrupts or new tx packets will
201 * end up in the driver.
203 int tg3_init_rings(struct tg3 *tp)
204 { DBGP("%s\n", __func__);
206 /* Free up all the SKBs. */
207 /// tg3_free_rings(tp);
210 tp->last_irq_tag = 0;
211 tp->hw_status->status = 0;
212 tp->hw_status->status_tag = 0;
213 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
218 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
222 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
224 if (tg3_rx_prodring_alloc(tp, &tp->prodring)) {
225 DBGC(tp->dev, "tg3_rx_prodring_alloc() failed\n");
226 tg3_rx_prodring_free(&tp->prodring);
233 static int tg3_open(struct net_device *dev)
234 { DBGP("%s\n", __func__);
236 struct tg3 *tp = netdev_priv(dev);
237 struct tg3_rx_prodring_set *tpr = &tp->prodring;
240 tg3_set_power_state_0(tp);
242 /* Initialize MAC address and backoff seed. */
243 __tg3_set_mac_addr(tp, 0);
245 err = tg3_alloc_consistent(tp);
249 tpr->rx_std_iob_cnt = 0;
251 err = tg3_init_hw(tp, 1);
253 DBGC(tp->dev, "tg3_init_hw failed: %s\n", strerror(err));
255 tg3_refill_prod_ring(tp);
260 static inline u32 tg3_tx_avail(struct tg3 *tp)
261 { DBGP("%s\n", __func__);
263 /* Tell compiler to fetch tx indices from memory. */
265 return TG3_DEF_TX_RING_PENDING -
266 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1));
272 * Prints all registers that could cause a set ERR bit in hw_status->status
274 static void tg3_dump_err_reg(struct tg3 *tp)
275 { DBGP("%s\n", __func__);
277 printf("FLOW_ATTN: %#08x\n", tr32(HOSTCC_FLOW_ATTN));
278 printf("MAC ATTN: %#08x\n", tr32(MAC_STATUS));
279 printf("MSI STATUS: %#08x\n", tr32(MSGINT_STATUS));
280 printf("DMA RD: %#08x\n", tr32(RDMAC_STATUS));
281 printf("DMA WR: %#08x\n", tr32(WDMAC_STATUS));
282 printf("TX CPU STATE: %#08x\n", tr32(TX_CPU_STATE));
283 printf("RX CPU STATE: %#08x\n", tr32(RX_CPU_STATE));
286 static void __unused tw32_mailbox2(struct tg3 *tp, uint32_t reg, uint32_t val)
287 { DBGP("%s\n", __func__);
289 tw32_mailbox(reg, val);
294 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
296 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
297 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
299 static int tg3_transmit(struct net_device *dev, struct io_buffer *iob)
300 { DBGP("%s\n", __func__);
302 struct tg3 *tp = netdev_priv(dev);
306 if (tg3_tx_avail(tp) < 1) {
307 DBGC(dev, "Transmit ring full\n");
313 iob_pad(iob, ETH_ZLEN);
314 mapping = virt_to_bus(iob->data);
317 tp->tx_buffers[entry].iob = iob;
319 tg3_set_txd(tp, entry, mapping, len, TXD_FLAG_END);
321 entry = NEXT_TX(entry);
323 /* Packets are ready, update Tx producer idx local and on card. */
324 tw32_tx_mbox(tp->prodmbox, entry);
333 static void tg3_tx_complete(struct net_device *dev)
334 { DBGP("%s\n", __func__);
336 struct tg3 *tp = netdev_priv(dev);
337 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
338 u32 sw_idx = tp->tx_cons;
340 while (sw_idx != hw_idx) {
341 struct io_buffer *iob = tp->tx_buffers[sw_idx].iob;
343 DBGC2(dev, "Transmitted packet: %zd bytes\n", iob_len(iob));
345 netdev_tx_complete(dev, iob);
346 sw_idx = NEXT_TX(sw_idx);
349 tp->tx_cons = sw_idx;
352 #define TG3_RX_STD_BUFF_RING_BYTES(tp) \
353 (sizeof(struct ring_info) * TG3_RX_STD_MAX_SIZE_5700)
354 #define TG3_RX_STD_RING_BYTES(tp) \
355 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_MAX_SIZE_5700)
357 /* Returns 0 or < 0 on error.
359 * We only need to fill in the address because the other members
360 * of the RX descriptor are invariant, see tg3_init_rings.
362 * Note the purposeful assymetry of cpu vs. chip accesses. For
363 * posting buffers we only dirty the first cache line of the RX
364 * descriptor (containing the address). Whereas for the RX status
365 * buffers the cpu only reads the last cacheline of the RX descriptor
366 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
368 static int tg3_alloc_rx_iob(struct tg3_rx_prodring_set *tpr, u32 dest_idx_unmasked)
369 { DBGP("%s\n", __func__);
371 struct tg3_rx_buffer_desc *desc;
372 struct io_buffer *iob;
374 int dest_idx, iob_idx;
376 dest_idx = dest_idx_unmasked & (TG3_RX_STD_MAX_SIZE_5700 - 1);
377 desc = &tpr->rx_std[dest_idx];
379 /* Do not overwrite any of the map or rp information
380 * until we are sure we can commit to a new buffer.
382 * Callers depend upon this behavior and assume that
383 * we leave everything unchanged if we fail.
385 iob = alloc_iob(TG3_RX_STD_DMA_SZ);
389 iob_idx = dest_idx % TG3_DEF_RX_RING_PENDING;
390 tpr->rx_iobufs[iob_idx] = iob;
392 mapping = virt_to_bus(iob->data);
394 desc->addr_hi = ((u64)mapping >> 32);
395 desc->addr_lo = ((u64)mapping & 0xffffffff);
400 static void tg3_refill_prod_ring(struct tg3 *tp)
401 { DBGP("%s\n", __func__);
403 struct tg3_rx_prodring_set *tpr = &tp->prodring;
404 int idx = tpr->rx_std_prod_idx;
406 DBGCP(tp->dev, "%s\n", __func__);
408 while (tpr->rx_std_iob_cnt < TG3_DEF_RX_RING_PENDING) {
409 if (tpr->rx_iobufs[idx % TG3_DEF_RX_RING_PENDING] == NULL) {
410 if (tg3_alloc_rx_iob(tpr, idx) < 0) {
411 DBGC(tp->dev, "alloc_iob() failed for descriptor %d\n", idx);
414 DBGC2(tp->dev, "allocated iob_buffer for descriptor %d\n", idx);
417 idx = (idx + 1) % TG3_RX_STD_MAX_SIZE_5700;
418 tpr->rx_std_iob_cnt++;
421 if ((u32)idx != tpr->rx_std_prod_idx) {
422 tpr->rx_std_prod_idx = idx;
423 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, idx);
427 static void tg3_rx_complete(struct net_device *dev)
428 { DBGP("%s\n", __func__);
430 struct tg3 *tp = netdev_priv(dev);
432 u32 sw_idx = tp->rx_rcb_ptr;
434 struct tg3_rx_prodring_set *tpr = &tp->prodring;
436 hw_idx = *(tp->rx_rcb_prod_idx);
438 while (sw_idx != hw_idx) {
439 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
440 u32 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
441 int iob_idx = desc_idx % TG3_DEF_RX_RING_PENDING;
442 struct io_buffer *iob = tpr->rx_iobufs[iob_idx];
445 DBGC2(dev, "RX - desc_idx: %d sw_idx: %d hw_idx: %d\n", desc_idx, sw_idx, hw_idx);
449 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
450 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
452 DBGC(dev, "Corrupted packet received\n");
453 netdev_rx_err(dev, iob, -EINVAL);
455 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
460 DBGC2(dev, "Received packet: %d bytes %d %d\n", len, sw_idx, hw_idx);
464 sw_idx &= TG3_RX_RET_MAX_SIZE_5705 - 1;
466 tpr->rx_iobufs[iob_idx] = NULL;
467 tpr->rx_std_iob_cnt--;
470 if (tp->rx_rcb_ptr != sw_idx) {
471 tw32_rx_mbox(tp->consmbox, sw_idx);
472 tp->rx_rcb_ptr = sw_idx;
475 tg3_refill_prod_ring(tp);
478 static void tg3_poll(struct net_device *dev)
479 { DBGP("%s\n", __func__);
481 struct tg3 *tp = netdev_priv(dev);
485 *tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00);
487 tp->hw_status->status &= ~SD_STATUS_UPDATED;
490 tg3_tx_complete(dev);
491 tg3_rx_complete(dev);
494 static void tg3_close(struct net_device *dev)
495 { DBGP("%s\n", __func__);
497 struct tg3 *tp = netdev_priv(dev);
499 DBGP("%s\n", __func__);
502 tg3_rx_prodring_free(&tp->prodring);
503 tg3_flag_clear(tp, INIT_COMPLETE);
505 tg3_free_consistent(tp);
509 static void tg3_irq(struct net_device *dev, int enable)
510 { DBGP("%s\n", __func__);
512 struct tg3 *tp = netdev_priv(dev);
514 DBGP("%s: %d\n", __func__, enable);
519 tg3_disable_ints(tp);
522 static struct net_device_operations tg3_netdev_ops = {
526 .transmit = tg3_transmit,
530 #define TEST_BUFFER_SIZE 0x2000
532 int tg3_do_test_dma(struct tg3 *tp, u32 __unused *buf, dma_addr_t buf_dma, int size, int to_device);
533 void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val);
535 static int tg3_test_dma(struct tg3 *tp)
536 { DBGP("%s\n", __func__);
542 buf = malloc_dma(TEST_BUFFER_SIZE, TG3_DMA_ALIGNMENT);
547 buf_dma = virt_to_bus(buf);
548 DBGC2(tp->dev, "dma test buffer, virt: %p phys: %#08x\n", buf, buf_dma);
550 if (tg3_flag(tp, 57765_PLUS)) {
551 tp->dma_rwctrl = DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
555 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
556 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
558 if (tg3_flag(tp, PCI_EXPRESS)) {
559 /* DMA read watermark not used on PCIE */
560 tp->dma_rwctrl |= 0x00180000;
561 } else if (!tg3_flag(tp, PCIX_MODE)) {
562 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
563 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
564 tp->dma_rwctrl |= 0x003f0000;
566 tp->dma_rwctrl |= 0x003f000f;
568 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
569 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
570 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
571 u32 read_water = 0x7;
573 if (ccval == 0x6 || ccval == 0x7)
574 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
576 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
578 /* Set bit 23 to enable PCIX hw bug fix */
580 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
581 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
583 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
584 /* 5780 always in PCIX mode */
585 tp->dma_rwctrl |= 0x00144000;
586 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
587 /* 5714 always in PCIX mode */
588 tp->dma_rwctrl |= 0x00148000;
590 tp->dma_rwctrl |= 0x001b000f;
594 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
595 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
596 tp->dma_rwctrl &= 0xfffffff0;
598 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
599 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
600 /* Remove this if it causes problems for some boards. */
601 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
603 /* On 5700/5701 chips, we need to set this bit.
604 * Otherwise the chip will issue cacheline transactions
605 * to streamable DMA memory with not all the byte
606 * enables turned on. This is an error on several
607 * RISC PCI controllers, in particular sparc64.
609 * On 5703/5704 chips, this bit has been reassigned
610 * a different meaning. In particular, it is used
611 * on those chips to enable a PCI-X workaround.
613 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
616 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
619 /* Unneeded, already done by tg3_get_invariants. */
620 tg3_switch_clocks(tp);
623 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
624 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
627 /* It is best to perform DMA test with maximum write burst size
628 * to expose the 5700/5701 write DMA bug.
630 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
631 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
636 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
639 /* Send the buffer to the chip. */
640 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
643 "%s: Buffer write failed. err = %d\n",
648 /* validate data reached card RAM correctly. */
649 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
651 tg3_read_mem(tp, 0x2100 + (i*4), &val);
652 if (le32_to_cpu(val) != p[i]) {
654 "%s: Buffer corrupted on device! "
655 "(%d != %d)\n", __func__, val, i);
656 /* ret = -ENODEV here? */
661 /* Now read it back. */
662 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
664 DBGC(&tp->pdev->dev, "%s: Buffer read failed. "
665 "err = %d\n", __func__, ret);
670 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
674 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
675 DMA_RWCTRL_WRITE_BNDRY_16) {
676 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
677 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
678 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
682 "%s: Buffer corrupted on read back! "
683 "(%d != %d)\n", __func__, p[i], i);
689 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
696 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
697 DMA_RWCTRL_WRITE_BNDRY_16) {
698 /* DMA test passed without adjusting DMA boundary,
699 * now look for chipsets that are known to expose the
700 * DMA bug without failing the test.
702 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
703 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
705 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
709 free_dma(buf, TEST_BUFFER_SIZE);
714 static int tg3_init_one(struct pci_device *pdev)
715 { DBGP("%s\n", __func__);
717 struct net_device *dev;
720 unsigned long reg_base, reg_size;
722 adjust_pci_device(pdev);
724 dev = alloc_etherdev(sizeof(*tp));
726 DBGC(&pdev->dev, "Failed to allocate etherdev\n");
728 goto err_out_disable_pdev;
731 netdev_init(dev, &tg3_netdev_ops);
732 pci_set_drvdata(pdev, dev);
734 dev->dev = &pdev->dev;
736 tp = netdev_priv(dev);
739 tp->rx_mode = TG3_DEF_RX_MODE;
740 tp->tx_mode = TG3_DEF_TX_MODE;
742 /* Subsystem IDs are required later */
743 pci_read_config_word(tp->pdev, PCI_SUBSYSTEM_VENDOR_ID, &tp->subsystem_vendor);
744 pci_read_config_word(tp->pdev, PCI_SUBSYSTEM_ID, &tp->subsystem_device);
746 /* The word/byte swap controls here control register access byte
747 * swapping. DMA data byte swapping is controlled in the GRC_MODE
751 MISC_HOST_CTRL_MASK_PCI_INT |
752 MISC_HOST_CTRL_WORD_SWAP |
753 MISC_HOST_CTRL_INDIR_ACCESS |
754 MISC_HOST_CTRL_PCISTATE_RW;
756 /* The NONFRM (non-frame) byte/word swap controls take effect
757 * on descriptor entries, anything which isn't packet data.
759 * The StrongARM chips on the board (one for tx, one for rx)
760 * are running in big-endian mode.
762 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
763 GRC_MODE_WSWAP_NONFRM_DATA);
764 #if __BYTE_ORDER == __BIG_ENDIAN
765 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
768 /* FIXME: how can we detect errors here? */
769 reg_base = pci_bar_start(pdev, PCI_BASE_ADDRESS_0);
770 reg_size = pci_bar_size(pdev, PCI_BASE_ADDRESS_0);
772 tp->regs = ioremap(reg_base, reg_size);
774 DBGC(&pdev->dev, "Failed to remap device registers\n");
776 goto err_out_disable_pdev;
779 err = tg3_get_invariants(tp);
781 DBGC(&pdev->dev, "Problem fetching invariants of chip, aborting\n");
782 goto err_out_iounmap;
785 tg3_init_bufmgr_config(tp);
787 err = tg3_get_device_address(tp);
789 DBGC(&pdev->dev, "Could not obtain valid ethernet address, aborting\n");
790 goto err_out_iounmap;
794 * Reset chip in case UNDI or EFI driver did not shutdown
795 * DMA self test will enable WDMAC and we'll see (spurious)
796 * pending DMA on the PCI bus at that point.
798 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
799 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
800 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
804 err = tg3_test_dma(tp);
806 DBGC(&pdev->dev, "DMA engine test failed, aborting\n");
807 goto err_out_iounmap;
810 tp->int_mbox = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
811 tp->consmbox = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
812 tp->prodmbox = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
814 tp->coal_now = HOSTCC_MODE_NOW;
816 err = register_netdev(dev);
818 DBGC(&pdev->dev, "Cannot register net device, aborting\n");
819 goto err_out_iounmap;
822 /* Call tg3_setup_phy() to start autoneg process, which saves time
823 * over starting autoneg in tg3_open();
825 err = tg3_setup_phy(tp, 0);
827 DBGC(tp->dev, "tg3_setup_phy() call failed in %s\n", __func__);
828 goto err_out_iounmap;
841 err_out_disable_pdev:
842 pci_set_drvdata(pdev, NULL);
846 static void tg3_remove_one(struct pci_device *pci)
847 { DBGP("%s\n", __func__);
849 struct net_device *netdev = pci_get_drvdata(pci);
851 unregister_netdev(netdev);
852 netdev_nullify(netdev);
856 static struct pci_device_id tg3_nics[] = {
857 PCI_ROM(0x14e4, 0x1644, "14e4-1644", "14e4-1644", 0),
858 PCI_ROM(0x14e4, 0x1645, "14e4-1645", "14e4-1645", 0),
859 PCI_ROM(0x14e4, 0x1646, "14e4-1646", "14e4-1646", 0),
860 PCI_ROM(0x14e4, 0x1647, "14e4-1647", "14e4-1647", 0),
861 PCI_ROM(0x14e4, 0x1648, "14e4-1648", "14e4-1648", 0),
862 PCI_ROM(0x14e4, 0x164d, "14e4-164d", "14e4-164d", 0),
863 PCI_ROM(0x14e4, 0x1653, "14e4-1653", "14e4-1653", 0),
864 PCI_ROM(0x14e4, 0x1654, "14e4-1654", "14e4-1654", 0),
865 PCI_ROM(0x14e4, 0x165d, "14e4-165d", "14e4-165d", 0),
866 PCI_ROM(0x14e4, 0x165e, "14e4-165e", "14e4-165e", 0),
867 PCI_ROM(0x14e4, 0x16a6, "14e4-16a6", "14e4-16a6", 0),
868 PCI_ROM(0x14e4, 0x16a7, "14e4-16a7", "14e4-16a7", 0),
869 PCI_ROM(0x14e4, 0x16a8, "14e4-16a8", "14e4-16a8", 0),
870 PCI_ROM(0x14e4, 0x16c6, "14e4-16c6", "14e4-16c6", 0),
871 PCI_ROM(0x14e4, 0x16c7, "14e4-16c7", "14e4-16c7", 0),
872 PCI_ROM(0x14e4, 0x1696, "14e4-1696", "14e4-1696", 0),
873 PCI_ROM(0x14e4, 0x169c, "14e4-169c", "14e4-169c", 0),
874 PCI_ROM(0x14e4, 0x169d, "14e4-169d", "14e4-169d", 0),
875 PCI_ROM(0x14e4, 0x170d, "14e4-170d", "14e4-170d", 0),
876 PCI_ROM(0x14e4, 0x170e, "14e4-170e", "14e4-170e", 0),
877 PCI_ROM(0x14e4, 0x1649, "14e4-1649", "14e4-1649", 0),
878 PCI_ROM(0x14e4, 0x166e, "14e4-166e", "14e4-166e", 0),
879 PCI_ROM(0x14e4, 0x1659, "14e4-1659", "14e4-1659", 0),
880 PCI_ROM(0x14e4, 0x165a, "14e4-165a", "14e4-165a", 0),
881 PCI_ROM(0x14e4, 0x1677, "14e4-1677", "14e4-1677", 0),
882 PCI_ROM(0x14e4, 0x167d, "14e4-167d", "14e4-167d", 0),
883 PCI_ROM(0x14e4, 0x167e, "14e4-167e", "14e4-167e", 0),
884 PCI_ROM(0x14e4, 0x1600, "14e4-1600", "14e4-1600", 0),
885 PCI_ROM(0x14e4, 0x1601, "14e4-1601", "14e4-1601", 0),
886 PCI_ROM(0x14e4, 0x16f7, "14e4-16f7", "14e4-16f7", 0),
887 PCI_ROM(0x14e4, 0x16fd, "14e4-16fd", "14e4-16fd", 0),
888 PCI_ROM(0x14e4, 0x16fe, "14e4-16fe", "14e4-16fe", 0),
889 PCI_ROM(0x14e4, 0x167a, "14e4-167a", "14e4-167a", 0),
890 PCI_ROM(0x14e4, 0x1672, "14e4-1672", "14e4-1672", 0),
891 PCI_ROM(0x14e4, 0x167b, "14e4-167b", "14e4-167b", 0),
892 PCI_ROM(0x14e4, 0x1673, "14e4-1673", "14e4-1673", 0),
893 PCI_ROM(0x14e4, 0x1674, "14e4-1674", "14e4-1674", 0),
894 PCI_ROM(0x14e4, 0x169a, "14e4-169a", "14e4-169a", 0),
895 PCI_ROM(0x14e4, 0x169b, "14e4-169b", "14e4-169b", 0),
896 PCI_ROM(0x14e4, 0x1693, "14e4-1693", "14e4-1693", 0),
897 PCI_ROM(0x14e4, 0x167f, "14e4-167f", "14e4-167f", 0),
898 PCI_ROM(0x14e4, 0x1668, "14e4-1668", "14e4-1668", 0),
899 PCI_ROM(0x14e4, 0x1669, "14e4-1669", "14e4-1669", 0),
900 PCI_ROM(0x14e4, 0x1678, "14e4-1678", "14e4-1678", 0),
901 PCI_ROM(0x14e4, 0x1679, "14e4-1679", "14e4-1679", 0),
902 PCI_ROM(0x14e4, 0x166a, "14e4-166a", "14e4-166a", 0),
903 PCI_ROM(0x14e4, 0x166b, "14e4-166b", "14e4-166b", 0),
904 PCI_ROM(0x14e4, 0x16dd, "14e4-16dd", "14e4-16dd", 0),
905 PCI_ROM(0x14e4, 0x1712, "14e4-1712", "14e4-1712", 0),
906 PCI_ROM(0x14e4, 0x1713, "14e4-1713", "14e4-1713", 0),
907 PCI_ROM(0x14e4, 0x1698, "14e4-1698", "14e4-1698", 0),
908 PCI_ROM(0x14e4, 0x1684, "14e4-1684", "14e4-1684", 0),
909 PCI_ROM(0x14e4, 0x165b, "14e4-165b", "14e4-165b", 0),
910 PCI_ROM(0x14e4, 0x1681, "14e4-1681", "14e4-1681", 0),
911 PCI_ROM(0x14e4, 0x1682, "14e4-1682", "14e4-1682", 0),
912 PCI_ROM(0x14e4, 0x1680, "14e4-1680", "14e4-1680", 0),
913 PCI_ROM(0x14e4, 0x1688, "14e4-1688", "14e4-1688", 0),
914 PCI_ROM(0x14e4, 0x1689, "14e4-1689", "14e4-1689", 0),
915 PCI_ROM(0x14e4, 0x1699, "14e4-1699", "14e4-1699", 0),
916 PCI_ROM(0x14e4, 0x16a0, "14e4-16a0", "14e4-16a0", 0),
917 PCI_ROM(0x14e4, 0x1692, "14e4-1692", "14e4-1692", 0),
918 PCI_ROM(0x14e4, 0x1690, "14e4-1690", "14e4-1690", 0),
919 PCI_ROM(0x14e4, 0x1694, "14e4-1694", "14e4-1694", 0),
920 PCI_ROM(0x14e4, 0x1691, "14e4-1691", "14e4-1691", 0),
921 PCI_ROM(0x14e4, 0x1655, "14e4-1655", "14e4-1655", 0),
922 PCI_ROM(0x14e4, 0x1656, "14e4-1656", "14e4-1656", 0),
923 PCI_ROM(0x14e4, 0x16b1, "14e4-16b1", "14e4-16b1", 0),
924 PCI_ROM(0x14e4, 0x16b5, "14e4-16b5", "14e4-16b5", 0),
925 PCI_ROM(0x14e4, 0x16b0, "14e4-16b0", "14e4-16b0", 0),
926 PCI_ROM(0x14e4, 0x16b4, "14e4-16b4", "14e4-16b4", 0),
927 PCI_ROM(0x14e4, 0x16b2, "14e4-16b2", "14e4-16b2", 0),
928 PCI_ROM(0x14e4, 0x16b6, "14e4-16b6", "14e4-16b6", 0),
929 PCI_ROM(0x14e4, 0x1657, "14e4-1657", "14e4-1657", 0),
930 PCI_ROM(0x14e4, 0x165f, "14e4-165f", "14e4-165f", 0),
931 PCI_ROM(0x1148, 0x4400, "1148-4400", "1148-4400", 0),
932 PCI_ROM(0x1148, 0x4500, "1148-4500", "1148-4500", 0),
933 PCI_ROM(0x173b, 0x03e8, "173b-03e8", "173b-03e8", 0),
934 PCI_ROM(0x173b, 0x03e9, "173b-03e9", "173b-03e9", 0),
935 PCI_ROM(0x173b, 0x03eb, "173b-03eb", "173b-03eb", 0),
936 PCI_ROM(0x173b, 0x03ea, "173b-03ea", "173b-03ea", 0),
937 PCI_ROM(0x106b, 0x1645, "106b-1645", "106b-1645", 0),
940 struct pci_driver tg3_pci_driver __pci_driver = {
942 .id_count = ARRAY_SIZE(tg3_nics),
943 .probe = tg3_init_one,
944 .remove = tg3_remove_one,