2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
18 FILE_LICENCE ( GPL2_ONLY );
26 #include <ipxe/iobuf.h>
27 #include <ipxe/timer.h>
28 #include <ipxe/malloc.h>
29 #include <ipxe/if_ether.h>
30 #include <ipxe/ethernet.h>
31 #include <ipxe/netdevice.h>
35 #define RESET_KIND_SHUTDOWN 0
36 #define RESET_KIND_INIT 1
37 #define RESET_KIND_SUSPEND 2
39 #define TG3_DEF_MAC_MODE 0
41 void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
42 { DBGP("%s\n", __func__);
44 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
45 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
48 u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
49 { DBGP("%s\n", __func__);
53 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
54 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
58 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
59 { DBGP("%s\n", __func__);
61 return readl(tp->regs + off + GRCMBOX_BASE);
64 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
65 { DBGP("%s\n", __func__);
67 writel(val, tp->regs + off + GRCMBOX_BASE);
70 void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
71 { DBGP("%s\n", __func__);
73 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
74 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
75 TG3_64BIT_REG_LOW, val);
78 if (off == TG3_RX_STD_PROD_IDX_REG) {
79 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
80 TG3_64BIT_REG_LOW, val);
84 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
85 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
87 /* In indirect mode when disabling interrupts, we also need
88 * to clear the interrupt bit in the GRC local ctrl register.
90 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
92 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
93 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
97 u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
98 { DBGP("%s\n", __func__);
102 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
103 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
108 /* usec_wait specifies the wait time in usec when writing to certain registers
109 * where it is unsafe to read back the register without some delay.
110 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
111 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
113 void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
114 { DBGP("%s\n", __func__);
121 /* Wait again after the read for the posted method to guarantee that
122 * the wait time is met.
128 /* stolen from legacy etherboot tg3 driver */
129 void tg3_set_power_state_0(struct tg3 *tp)
130 { DBGP("%s\n", __func__);
132 uint16_t power_control;
135 /* Make sure register accesses (indirect or otherwise)
136 * will function correctly.
138 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
140 pci_read_config_word(tp->pdev, pm + PCI_PM_CTRL, &power_control);
142 power_control |= PCI_PM_CTRL_PME_STATUS;
143 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
145 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
147 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
152 void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
153 { DBGP("%s\n", __func__);
155 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
156 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
161 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
162 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
164 /* Always leave this as zero. */
165 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
168 #define PCI_VENDOR_ID_ARIMA 0x161f
170 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
171 { DBGP("%s\n", __func__);
176 /* On some early chips the SRAM cannot be accessed in D3hot state,
177 * so need make sure we're in D0.
179 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
180 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
181 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
184 /* Make sure register accesses (indirect or otherwise)
185 * will function correctly.
187 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
190 /* The memory arbiter has to be enabled in order for SRAM accesses
191 * to succeed. Normally on powerup the tg3 chip firmware will make
192 * sure it is enabled, but other entities such as system netboot
193 * code might disable it.
195 val = tr32(MEMARB_MODE);
196 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
198 tp->phy_id = TG3_PHY_ID_INVALID;
199 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
201 /* Assume an onboard device by default. */
202 tg3_flag_set(tp, EEPROM_WRITE_PROT);
204 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
205 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
206 u32 nic_cfg, led_cfg;
207 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
208 int eeprom_phy_serdes = 0;
210 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
211 tp->nic_sram_data_cfg = nic_cfg;
213 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
214 ver >>= NIC_SRAM_DATA_VER_SHIFT;
215 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
216 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
217 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
218 (ver > 0) && (ver < 0x100))
219 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
221 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
222 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
224 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
225 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
226 eeprom_phy_serdes = 1;
228 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
229 if (nic_phy_id != 0) {
230 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
231 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
233 eeprom_phy_id = (id1 >> 16) << 10;
234 eeprom_phy_id |= (id2 & 0xfc00) << 16;
235 eeprom_phy_id |= (id2 & 0x03ff) << 0;
239 tp->phy_id = eeprom_phy_id;
240 if (eeprom_phy_serdes) {
241 if (!tg3_flag(tp, 5705_PLUS))
242 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
244 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
247 if (tg3_flag(tp, 5750_PLUS))
248 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
249 SHASTA_EXT_LED_MODE_MASK);
251 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
255 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
256 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
259 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
260 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
263 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
264 tp->led_ctrl = LED_CTRL_MODE_MAC;
266 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
267 * read on some older 5700/5701 bootcode.
269 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
271 GET_ASIC_REV(tp->pci_chip_rev_id) ==
273 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
277 case SHASTA_EXT_LED_SHARED:
278 tp->led_ctrl = LED_CTRL_MODE_SHARED;
279 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
280 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
281 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
282 LED_CTRL_MODE_PHY_2);
285 case SHASTA_EXT_LED_MAC:
286 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
289 case SHASTA_EXT_LED_COMBO:
290 tp->led_ctrl = LED_CTRL_MODE_COMBO;
291 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
292 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
293 LED_CTRL_MODE_PHY_2);
298 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
299 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
300 tp->subsystem_vendor == PCI_VENDOR_ID_DELL)
301 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
303 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
304 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
306 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
307 tg3_flag_set(tp, EEPROM_WRITE_PROT);
308 if ((tp->subsystem_vendor ==
309 PCI_VENDOR_ID_ARIMA) &&
310 (tp->subsystem_device == 0x205a ||
311 tp->subsystem_device == 0x2063))
312 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
314 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
315 tg3_flag_set(tp, IS_NIC);
318 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
319 tg3_flag_set(tp, ENABLE_ASF);
320 if (tg3_flag(tp, 5750_PLUS))
321 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
324 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
325 tg3_flag(tp, ENABLE_ASF))
326 tg3_flag_set(tp, ENABLE_APE);
328 if (cfg2 & (1 << 17))
329 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
331 /* serdes signal pre-emphasis in register 0x590 set by */
332 /* bootcode if bit 18 is set */
333 if (cfg2 & (1 << 18))
334 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
336 if ((tg3_flag(tp, 57765_PLUS) ||
337 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
338 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
339 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
340 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
342 if (tg3_flag(tp, PCI_EXPRESS) &&
343 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
344 !tg3_flag(tp, 57765_PLUS)) {
347 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
350 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
351 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
352 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
353 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
354 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
355 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
359 static void tg3_switch_clocks(struct tg3 *tp)
360 { DBGP("%s\n", __func__);
365 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
368 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
370 orig_clock_ctrl = clock_ctrl;
371 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
372 CLOCK_CTRL_CLKRUN_OENABLE |
374 tp->pci_clock_ctrl = clock_ctrl;
376 if (tg3_flag(tp, 5705_PLUS)) {
377 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
378 tw32_wait_f(TG3PCI_CLOCK_CTRL,
379 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
381 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
382 tw32_wait_f(TG3PCI_CLOCK_CTRL,
384 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
386 tw32_wait_f(TG3PCI_CLOCK_CTRL,
387 clock_ctrl | (CLOCK_CTRL_ALTCLK),
390 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
393 int tg3_get_invariants(struct tg3 *tp)
394 { DBGP("%s\n", __func__);
397 u32 pci_state_reg, grc_misc_cfg;
402 /* Force memory write invalidate off. If we leave it on,
403 * then on 5700_BX chips we have to enable a workaround.
404 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
405 * to match the cacheline size. The Broadcom driver have this
406 * workaround but turns MWI off all the times so never uses
407 * it. This seems to suggest that the workaround is insufficient.
409 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
410 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
411 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
413 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
414 * has the register indirect write enable bit set before
415 * we try to access any of the MMIO registers. It is also
416 * critical that the PCI-X hw workaround situation is decided
417 * before that as well.
419 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
422 tp->pci_chip_rev_id = (misc_ctrl_reg >>
423 MISC_HOST_CTRL_CHIPREV_SHIFT);
424 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
425 u32 prod_id_asic_rev;
427 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
428 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
429 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
430 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
431 pci_read_config_dword(tp->pdev,
432 TG3PCI_GEN2_PRODID_ASICREV,
434 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
435 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
436 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
437 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
438 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
439 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
440 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
441 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
442 pci_read_config_dword(tp->pdev,
443 TG3PCI_GEN15_PRODID_ASICREV,
446 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
449 tp->pci_chip_rev_id = prod_id_asic_rev;
452 /* Wrong chip ID in 5752 A0. This code can be removed later
453 * as A0 is not in production.
455 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
456 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
458 /* Initialize misc host control in PCI block. */
459 tp->misc_host_ctrl |= (misc_ctrl_reg &
460 MISC_HOST_CTRL_CHIPREV);
461 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
464 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
465 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
466 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
467 tg3_flag_set(tp, 5717_PLUS);
469 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
470 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766 ||
471 tg3_flag(tp, 5717_PLUS))
472 tg3_flag_set(tp, 57765_PLUS);
474 /* Intentionally exclude ASIC_REV_5906 */
475 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
476 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
477 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
478 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
479 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
480 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
481 tg3_flag(tp, 57765_PLUS))
482 tg3_flag_set(tp, 5755_PLUS);
484 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
485 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
486 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
487 tg3_flag(tp, 5755_PLUS) ||
488 tg3_flag(tp, 5780_CLASS))
489 tg3_flag_set(tp, 5750_PLUS);
491 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
492 tg3_flag(tp, 5750_PLUS))
493 tg3_flag_set(tp, 5705_PLUS);
495 if (tg3_flag(tp, 5717_PLUS))
496 tg3_flag_set(tp, LRG_PROD_RING_CAP);
498 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
501 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
502 if (tp->pcie_cap != 0) {
505 tg3_flag_set(tp, PCI_EXPRESS);
507 pci_read_config_word(tp->pdev,
508 tp->pcie_cap + PCI_EXP_LNKCTL,
510 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
511 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
512 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
513 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
514 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
515 tg3_flag_set(tp, CLKREQ_BUG);
516 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
517 tg3_flag_set(tp, L1PLLPD_EN);
519 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
520 tg3_flag_set(tp, PCI_EXPRESS);
521 } else if (!tg3_flag(tp, 5705_PLUS) ||
522 tg3_flag(tp, 5780_CLASS)) {
523 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
526 "Cannot find PCI-X capability, aborting\n");
530 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
531 tg3_flag_set(tp, PCIX_MODE);
534 /* If we have an AMD 762 or VIA K8T800 chipset, write
535 * reordering to the mailbox registers done by the host
536 * controller can cause major troubles. We read back from
537 * every mailbox register write to force the writes to be
538 * posted to the chip in order.
541 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
542 &tp->pci_cacheline_sz);
543 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
545 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
546 tp->pci_lat_timer < 64) {
547 tp->pci_lat_timer = 64;
548 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
552 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
553 /* 5700 BX chips need to have their TX producer index
554 * mailboxes written twice to workaround a bug.
556 tg3_flag_set(tp, TXD_MBOX_HWBUG);
558 /* If we are in PCI-X mode, enable register write workaround.
560 * The workaround is to use indirect register accesses
561 * for all chip writes not to mailbox registers.
563 if (tg3_flag(tp, PCIX_MODE)) {
566 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
568 /* The chip can have it's power management PCI config
569 * space registers clobbered due to this bug.
570 * So explicitly force the chip into D0 here.
572 pci_read_config_dword(tp->pdev,
573 tp->pm_cap + PCI_PM_CTRL,
575 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
576 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
577 pci_write_config_dword(tp->pdev,
578 tp->pm_cap + PCI_PM_CTRL,
581 /* Also, force SERR#/PERR# in PCI command. */
582 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
583 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
584 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
588 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
589 tg3_flag_set(tp, PCI_HIGH_SPEED);
590 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
591 tg3_flag_set(tp, PCI_32BIT);
593 /* Chip-specific fixup from Broadcom driver */
594 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
595 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
596 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
597 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
600 tp->write32_mbox = tg3_write_indirect_reg32;
601 tp->write32_rx_mbox = tg3_write_indirect_mbox;
602 tp->write32_tx_mbox = tg3_write_indirect_mbox;
603 tp->read32_mbox = tg3_read_indirect_mbox;
605 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
606 tp->read32_mbox = tg3_read32_mbox_5906;
607 tp->write32_mbox = tg3_write32_mbox_5906;
608 tp->write32_tx_mbox = tg3_write32_mbox_5906;
609 tp->write32_rx_mbox = tg3_write32_mbox_5906;
612 /* Get eeprom hw config before calling tg3_set_power_state().
613 * In particular, the TG3_FLAG_IS_NIC flag must be
614 * determined before calling tg3_set_power_state() so that
615 * we know whether or not to switch out of Vaux power.
616 * When the flag is set, it means that GPIO1 is used for eeprom
617 * write protect and also implies that it is a LOM where GPIOs
618 * are not used to switch power.
620 tg3_get_eeprom_hw_cfg(tp);
622 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
623 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
624 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
625 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
626 tg3_flag(tp, 57765_PLUS))
627 tg3_flag_set(tp, CPMU_PRESENT);
629 /* Set up tp->grc_local_ctrl before calling tg3_power_up().
630 * GPIO1 driven high will bring 5700's external PHY out of reset.
631 * It is also used as eeprom write protect on LOMs.
633 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
634 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
635 tg3_flag(tp, EEPROM_WRITE_PROT))
636 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
637 GRC_LCLCTRL_GPIO_OUTPUT1);
638 /* Unused GPIO3 must be driven as output on 5752 because there
639 * are no pull-up resistors on unused GPIO pins.
641 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
642 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
644 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
645 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
646 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
647 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
649 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
650 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
651 /* Turn off the debug UART. */
652 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
653 if (tg3_flag(tp, IS_NIC))
654 /* Keep VMain power. */
655 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
656 GRC_LCLCTRL_GPIO_OUTPUT0;
659 /* Force the chip into D0. */
660 tg3_set_power_state_0(tp);
662 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
663 tp->phy_flags |= TG3_PHYFLG_IS_FET;
665 /* A few boards don't want Ethernet@WireSpeed phy feature */
666 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
667 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
668 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
669 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
670 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
671 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
672 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
674 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
675 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
676 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
677 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
678 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
680 if (tg3_flag(tp, 5705_PLUS) &&
681 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
682 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
683 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
684 !tg3_flag(tp, 57765_PLUS)) {
685 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
686 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
687 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
688 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
689 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
690 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
691 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
692 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
693 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
695 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
698 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
699 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
700 tp->phy_otp = tg3_read_otp_phycfg(tp);
701 if (tp->phy_otp == 0)
702 tp->phy_otp = TG3_OTP_DEFAULT;
705 if (tg3_flag(tp, CPMU_PRESENT))
706 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
708 tp->mi_mode = MAC_MI_MODE_BASE;
710 tp->coalesce_mode = 0;
711 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
712 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
713 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
715 /* Set these bits to enable statistics workaround. */
716 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
717 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
718 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
719 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
720 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
725 /* Initialize data/descriptor byte/word swapping. */
726 val = tr32(GRC_MODE);
727 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
728 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
729 GRC_MODE_WORD_SWAP_B2HRX_DATA |
730 GRC_MODE_B2HRX_ENABLE |
731 GRC_MODE_HTX2B_ENABLE |
732 GRC_MODE_HOST_STACKUP);
734 val &= GRC_MODE_HOST_STACKUP;
736 tw32(GRC_MODE, val | tp->grc_mode);
738 tg3_switch_clocks(tp);
740 /* Clear this out for sanity. */
741 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
743 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
745 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
746 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
747 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
749 if (chiprevid == CHIPREV_ID_5701_A0 ||
750 chiprevid == CHIPREV_ID_5701_B0 ||
751 chiprevid == CHIPREV_ID_5701_B2 ||
752 chiprevid == CHIPREV_ID_5701_B5) {
755 /* Write some dummy words into the SRAM status block
756 * area, see if it reads back correctly. If the return
757 * value is bad, force enable the PCIX workaround.
759 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
761 writel(0x00000000, sram_base);
762 writel(0x00000000, sram_base + 4);
763 writel(0xffffffff, sram_base + 4);
764 if (readl(sram_base) != 0x00000000)
765 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
770 /* FIXME: do we need nvram access? */
771 /// tg3_nvram_init(tp);
773 grc_misc_cfg = tr32(GRC_MISC_CFG);
774 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
776 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
777 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
778 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
779 tg3_flag_set(tp, IS_5788);
781 if (!tg3_flag(tp, IS_5788) &&
782 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
783 tg3_flag_set(tp, TAGGED_STATUS);
784 if (tg3_flag(tp, TAGGED_STATUS)) {
785 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
786 HOSTCC_MODE_CLRTICK_TXBD);
788 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
789 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
793 /* Preserve the APE MAC_MODE bits */
794 if (tg3_flag(tp, ENABLE_APE))
795 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
797 tp->mac_mode = TG3_DEF_MAC_MODE;
799 /* these are limited to 10/100 only */
800 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
801 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
802 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
803 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
804 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
805 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
806 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
807 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
808 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
809 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
810 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
811 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
812 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
813 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
814 (tp->phy_flags & TG3_PHYFLG_IS_FET))
815 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
817 err = tg3_phy_probe(tp);
819 DBGC(&tp->pdev->dev, "phy probe failed, err: %s\n", strerror(err));
820 /* ... but do not return immediately ... */
823 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
824 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
826 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
827 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
829 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
832 /* For all SERDES we poll the MAC status register. */
833 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
834 tg3_flag_set(tp, POLL_SERDES);
836 tg3_flag_clear(tp, POLL_SERDES);
838 /* Increment the rx prod index on the rx std ring by at most
839 * 8 for these chips to workaround hw errata.
841 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
842 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
843 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
844 tp->rx_std_max_post = 8;
849 void tg3_init_bufmgr_config(struct tg3 *tp)
850 { DBGP("%s\n", __func__);
852 if (tg3_flag(tp, 57765_PLUS)) {
853 tp->bufmgr_config.mbuf_read_dma_low_water =
854 DEFAULT_MB_RDMA_LOW_WATER_5705;
855 tp->bufmgr_config.mbuf_mac_rx_low_water =
856 DEFAULT_MB_MACRX_LOW_WATER_57765;
857 tp->bufmgr_config.mbuf_high_water =
858 DEFAULT_MB_HIGH_WATER_57765;
860 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
861 DEFAULT_MB_RDMA_LOW_WATER_5705;
862 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
863 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
864 tp->bufmgr_config.mbuf_high_water_jumbo =
865 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
866 } else if (tg3_flag(tp, 5705_PLUS)) {
867 tp->bufmgr_config.mbuf_read_dma_low_water =
868 DEFAULT_MB_RDMA_LOW_WATER_5705;
869 tp->bufmgr_config.mbuf_mac_rx_low_water =
870 DEFAULT_MB_MACRX_LOW_WATER_5705;
871 tp->bufmgr_config.mbuf_high_water =
872 DEFAULT_MB_HIGH_WATER_5705;
873 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
874 tp->bufmgr_config.mbuf_mac_rx_low_water =
875 DEFAULT_MB_MACRX_LOW_WATER_5906;
876 tp->bufmgr_config.mbuf_high_water =
877 DEFAULT_MB_HIGH_WATER_5906;
880 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
881 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
882 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
883 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
884 tp->bufmgr_config.mbuf_high_water_jumbo =
885 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
887 tp->bufmgr_config.mbuf_read_dma_low_water =
888 DEFAULT_MB_RDMA_LOW_WATER;
889 tp->bufmgr_config.mbuf_mac_rx_low_water =
890 DEFAULT_MB_MACRX_LOW_WATER;
891 tp->bufmgr_config.mbuf_high_water =
892 DEFAULT_MB_HIGH_WATER;
894 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
895 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
896 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
897 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
898 tp->bufmgr_config.mbuf_high_water_jumbo =
899 DEFAULT_MB_HIGH_WATER_JUMBO;
902 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
903 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
906 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
908 void tg3_wait_for_event_ack(struct tg3 *tp)
909 { DBGP("%s\n", __func__);
913 for (i = 0; i < TG3_FW_EVENT_TIMEOUT_USEC / 10; i++) {
914 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
921 void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
922 { DBGP("%s\n", __func__);
924 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
925 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
928 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
929 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
931 /* Always leave this as zero. */
932 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
935 static void tg3_stop_fw(struct tg3 *tp)
936 { DBGP("%s\n", __func__);
938 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
939 /* Wait for RX cpu to ACK the previous event. */
940 tg3_wait_for_event_ack(tp);
942 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
944 tg3_generate_fw_event(tp);
946 /* Wait for RX cpu to ACK this event. */
947 tg3_wait_for_event_ack(tp);
951 static void tg3_write_sig_pre_reset(struct tg3 *tp)
952 { DBGP("%s\n", __func__);
954 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
955 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
958 void tg3_disable_ints(struct tg3 *tp)
959 { DBGP("%s\n", __func__);
961 tw32(TG3PCI_MISC_HOST_CTRL,
962 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
964 tw32_mailbox_f(tp->int_mbox, 0x00000001);
967 void tg3_enable_ints(struct tg3 *tp)
968 { DBGP("%s\n", __func__);
970 tw32(TG3PCI_MISC_HOST_CTRL,
971 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
973 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
975 tw32_mailbox_f(tp->int_mbox, tp->last_tag << 24);
977 /* Force an initial interrupt */
978 if (!tg3_flag(tp, TAGGED_STATUS) &&
979 (tp->hw_status->status & SD_STATUS_UPDATED))
980 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
982 tw32(HOSTCC_MODE, tp->coal_now);
985 #define MAX_WAIT_CNT 1000
987 /* To stop a block, clear the enable bit and poll till it clears. */
988 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
989 { DBGP("%s\n", __func__);
994 if (tg3_flag(tp, 5705_PLUS)) {
1001 /* We can't enable/disable these bits of the
1002 * 5705/5750, just say success.
1015 for (i = 0; i < MAX_WAIT_CNT; i++) {
1018 if ((val & enable_bit) == 0)
1022 if (i == MAX_WAIT_CNT) {
1023 DBGC(&tp->pdev->dev,
1024 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
1032 static int tg3_abort_hw(struct tg3 *tp)
1033 { DBGP("%s\n", __func__);
1037 tg3_disable_ints(tp);
1039 tp->rx_mode &= ~RX_MODE_ENABLE;
1040 tw32_f(MAC_RX_MODE, tp->rx_mode);
1043 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
1044 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
1045 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
1046 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
1047 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
1048 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
1050 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
1051 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
1052 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
1053 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
1054 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
1055 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE);
1056 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
1058 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
1059 tw32_f(MAC_MODE, tp->mac_mode);
1062 tp->tx_mode &= ~TX_MODE_ENABLE;
1063 tw32_f(MAC_TX_MODE, tp->tx_mode);
1065 for (i = 0; i < MAX_WAIT_CNT; i++) {
1067 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
1070 if (i >= MAX_WAIT_CNT) {
1071 DBGC(&tp->pdev->dev,
1072 "%s timed out, TX_MODE_ENABLE will not clear "
1073 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
1077 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
1078 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
1079 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
1081 tw32(FTQ_RESET, 0xffffffff);
1082 tw32(FTQ_RESET, 0x00000000);
1084 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
1085 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
1088 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
1093 void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
1094 { DBGP("%s\n", __func__);
1096 u32 addr_high, addr_low;
1099 addr_high = ((tp->dev->ll_addr[0] << 8) |
1100 tp->dev->ll_addr[1]);
1101 addr_low = ((tp->dev->ll_addr[2] << 24) |
1102 (tp->dev->ll_addr[3] << 16) |
1103 (tp->dev->ll_addr[4] << 8) |
1104 (tp->dev->ll_addr[5] << 0));
1105 for (i = 0; i < 4; i++) {
1106 if (i == 1 && skip_mac_1)
1108 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
1109 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
1112 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1113 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1114 for (i = 0; i < 12; i++) {
1115 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
1116 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
1120 addr_high = (tp->dev->ll_addr[0] +
1121 tp->dev->ll_addr[1] +
1122 tp->dev->ll_addr[2] +
1123 tp->dev->ll_addr[3] +
1124 tp->dev->ll_addr[4] +
1125 tp->dev->ll_addr[5]) &
1126 TX_BACKOFF_SEED_MASK;
1127 tw32(MAC_TX_BACKOFF_SEED, addr_high);
1130 /* Save PCI command register before chip reset */
1131 static void tg3_save_pci_state(struct tg3 *tp)
1132 { DBGP("%s\n", __func__);
1134 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
1137 /* Restore PCI state after chip reset */
1138 static void tg3_restore_pci_state(struct tg3 *tp)
1139 { DBGP("%s\n", __func__);
1143 /* Re-enable indirect register accesses. */
1144 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
1145 tp->misc_host_ctrl);
1147 /* Set MAX PCI retry to zero. */
1148 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
1149 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
1150 tg3_flag(tp, PCIX_MODE))
1151 val |= PCISTATE_RETRY_SAME_DMA;
1153 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
1155 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
1157 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
1158 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
1159 tp->pci_cacheline_sz);
1160 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
1165 /* Make sure PCI-X relaxed ordering bit is clear. */
1166 if (tg3_flag(tp, PCIX_MODE)) {
1169 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
1171 pcix_cmd &= ~PCI_X_CMD_ERO;
1172 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
1177 static int tg3_poll_fw(struct tg3 *tp)
1178 { DBGP("%s\n", __func__);
1183 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1184 /* Wait up to 20ms for init done. */
1185 for (i = 0; i < 200; i++) {
1186 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1193 /* Wait for firmware initialization to complete. */
1194 for (i = 0; i < 100000; i++) {
1195 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1196 if (val == (u32)~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1201 /* Chip might not be fitted with firmware. Some Sun onboard
1202 * parts are configured like that. So don't signal the timeout
1203 * of the above loop as an error, but do report the lack of
1204 * running firmware once.
1206 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1207 tg3_flag_set(tp, NO_FWARE_REPORTED);
1209 DBGC(tp->dev, "No firmware running\n");
1212 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1213 /* The 57765 A0 needs a little more
1214 * time to do some important work.
1222 static int tg3_nvram_lock(struct tg3 *tp)
1223 { DBGP("%s\n", __func__);
1225 if (tg3_flag(tp, NVRAM)) {
1228 if (tp->nvram_lock_cnt == 0) {
1229 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
1230 for (i = 0; i < 8000; i++) {
1231 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
1236 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
1240 tp->nvram_lock_cnt++;
1245 static void tg3_nvram_unlock(struct tg3 *tp)
1246 { DBGP("%s\n", __func__);
1248 if (tg3_flag(tp, NVRAM)) {
1249 if (tp->nvram_lock_cnt > 0)
1250 tp->nvram_lock_cnt--;
1251 if (tp->nvram_lock_cnt == 0)
1252 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
1256 static int tg3_chip_reset(struct tg3 *tp)
1257 { DBGP("%s\n", __func__);
1265 /* No matching tg3_nvram_unlock() after this because
1266 * chip reset below will undo the nvram lock.
1268 tp->nvram_lock_cnt = 0;
1270 /* GRC_MISC_CFG core clock reset will clear the memory
1271 * enable bit in PCI register 4 and the MSI enable bit
1272 * on some chips, so we save relevant registers here.
1274 tg3_save_pci_state(tp);
1276 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
1277 tg3_flag(tp, 5755_PLUS))
1278 tw32(GRC_FASTBOOT_PC, 0);
1282 * We must avoid the readl() that normally takes place.
1283 * It locks machines, causes machine checks, and other
1284 * fun things. So, temporarily disable the 5701
1285 * hardware workaround, while we do the reset.
1287 write_op = tp->write32;
1288 if (write_op == tg3_write_flush_reg32)
1289 tp->write32 = tg3_write32;
1292 /* Prevent the irq handler from reading or writing PCI registers
1293 * during chip reset when the memory enable bit in the PCI command
1294 * register may be cleared. The chip does not generate interrupt
1295 * at this time, but the irq handler may still be called due to irq
1296 * sharing or irqpoll.
1298 tg3_flag_set(tp, CHIP_RESETTING);
1300 if (tp->hw_status) {
1301 tp->hw_status->status = 0;
1302 tp->hw_status->status_tag = 0;
1305 tp->last_irq_tag = 0;
1309 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
1310 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
1311 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
1315 val = GRC_MISC_CFG_CORECLK_RESET;
1317 if (tg3_flag(tp, PCI_EXPRESS)) {
1318 /* Force PCIe 1.0a mode */
1319 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
1320 !tg3_flag(tp, 57765_PLUS) &&
1321 tr32(TG3_PCIE_PHY_TSTCTL) ==
1322 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
1323 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
1325 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
1326 tw32(GRC_MISC_CFG, (1 << 29));
1331 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1332 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
1333 tw32(GRC_VCPU_EXT_CTRL,
1334 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
1337 /* Manage gphy power for all CPMU absent PCIe devices. */
1338 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
1339 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
1341 tw32(GRC_MISC_CFG, val);
1343 /* Unfortunately, we have to delay before the PCI read back.
1344 * Some 575X chips even will not respond to a PCI cfg access
1345 * when the reset command is given to the chip.
1347 * How do these hardware designers expect things to work
1348 * properly if the PCI write is posted for a long period
1349 * of time? It is always necessary to have some method by
1350 * which a register read back can occur to push the write
1351 * out which does the reset.
1353 * For most tg3 variants the trick below was working.
1358 /* Flush PCI posted writes. The normal MMIO registers
1359 * are inaccessible at this time so this is the only
1360 * way to make this reliably (actually, this is no longer
1361 * the case, see above). I tried to use indirect
1362 * register read/write but this upset some 5701 variants.
1364 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
1368 if (tg3_flag(tp, PCI_EXPRESS) && tp->pcie_cap) {
1371 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
1375 /* Wait for link training to complete. */
1376 for (i = 0; i < 5000; i++)
1379 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
1380 pci_write_config_dword(tp->pdev, 0xc4,
1381 cfg_val | (1 << 15));
1384 /* Clear the "no snoop" and "relaxed ordering" bits. */
1385 pci_read_config_word(tp->pdev,
1386 tp->pcie_cap + PCI_EXP_DEVCTL,
1388 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
1389 PCI_EXP_DEVCTL_NOSNOOP_EN);
1391 * Older PCIe devices only support the 128 byte
1392 * MPS setting. Enforce the restriction.
1394 if (!tg3_flag(tp, CPMU_PRESENT))
1395 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
1396 pci_write_config_word(tp->pdev,
1397 tp->pcie_cap + PCI_EXP_DEVCTL,
1400 /* Clear error status */
1401 pci_write_config_word(tp->pdev,
1402 tp->pcie_cap + PCI_EXP_DEVSTA,
1403 PCI_EXP_DEVSTA_CED |
1404 PCI_EXP_DEVSTA_NFED |
1405 PCI_EXP_DEVSTA_FED |
1406 PCI_EXP_DEVSTA_URD);
1409 tg3_restore_pci_state(tp);
1411 tg3_flag_clear(tp, CHIP_RESETTING);
1412 tg3_flag_clear(tp, ERROR_PROCESSED);
1415 if (tg3_flag(tp, 5780_CLASS))
1416 val = tr32(MEMARB_MODE);
1417 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
1419 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
1421 tw32(0x5000, 0x400);
1424 tw32(GRC_MODE, tp->grc_mode);
1426 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
1429 tw32(0xc4, val | (1 << 15));
1432 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
1433 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1434 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
1435 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
1436 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
1437 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
1440 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
1441 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
1443 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
1444 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1449 tw32_f(MAC_MODE, val);
1452 err = tg3_poll_fw(tp);
1456 if (tg3_flag(tp, PCI_EXPRESS) &&
1457 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
1458 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
1459 !tg3_flag(tp, 57765_PLUS)) {
1462 tw32(0x7c00, val | (1 << 25));
1465 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
1466 val = tr32(TG3_CPMU_CLCK_ORIDE);
1467 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
1470 if (tg3_flag(tp, CPMU_PRESENT)) {
1471 tw32(TG3_CPMU_D0_CLCK_POLICY, 0);
1472 val = tr32(TG3_CPMU_CLCK_ORIDE_EN);
1473 tw32(TG3_CPMU_CLCK_ORIDE_EN,
1474 val | CPMU_CLCK_ORIDE_MAC_CLCK_ORIDE_EN);
1480 int tg3_halt(struct tg3 *tp)
1481 { DBGP("%s\n", __func__);
1487 tg3_write_sig_pre_reset(tp);
1490 err = tg3_chip_reset(tp);
1492 __tg3_set_mac_addr(tp, 0);
1500 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
1501 u32 offset, u32 *val)
1502 { DBGP("%s\n", __func__);
1507 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
1510 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
1511 EEPROM_ADDR_DEVID_MASK |
1513 tw32(GRC_EEPROM_ADDR,
1515 (0 << EEPROM_ADDR_DEVID_SHIFT) |
1516 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
1517 EEPROM_ADDR_ADDR_MASK) |
1518 EEPROM_ADDR_READ | EEPROM_ADDR_START);
1520 for (i = 0; i < 1000; i++) {
1521 tmp = tr32(GRC_EEPROM_ADDR);
1523 if (tmp & EEPROM_ADDR_COMPLETE)
1527 if (!(tmp & EEPROM_ADDR_COMPLETE))
1530 tmp = tr32(GRC_EEPROM_DATA);
1533 * The data will always be opposite the native endian
1534 * format. Perform a blind byteswap to compensate.
1536 *val = bswap_32(tmp);
1541 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
1542 { DBGP("%s\n", __func__);
1544 if (tg3_flag(tp, NVRAM) &&
1545 tg3_flag(tp, NVRAM_BUFFERED) &&
1546 tg3_flag(tp, FLASH) &&
1547 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
1548 (tp->nvram_jedecnum == JEDEC_ATMEL))
1550 addr = ((addr / tp->nvram_pagesize) <<
1551 ATMEL_AT45DB0X1B_PAGE_POS) +
1552 (addr % tp->nvram_pagesize);
1557 static void tg3_enable_nvram_access(struct tg3 *tp)
1558 { DBGP("%s\n", __func__);
1560 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
1561 u32 nvaccess = tr32(NVRAM_ACCESS);
1563 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
1567 static void tg3_disable_nvram_access(struct tg3 *tp)
1568 { DBGP("%s\n", __func__);
1570 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
1571 u32 nvaccess = tr32(NVRAM_ACCESS);
1573 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
1577 #define NVRAM_CMD_TIMEOUT 10000
1579 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
1580 { DBGP("%s\n", __func__);
1584 tw32(NVRAM_CMD, nvram_cmd);
1585 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
1587 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
1593 if (i == NVRAM_CMD_TIMEOUT)
1599 /* NOTE: Data read in from NVRAM is byteswapped according to
1600 * the byteswapping settings for all other register accesses.
1601 * tg3 devices are BE devices, so on a BE machine, the data
1602 * returned will be exactly as it is seen in NVRAM. On a LE
1603 * machine, the 32-bit value will be byteswapped.
1605 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
1606 { DBGP("%s\n", __func__);
1610 if (!tg3_flag(tp, NVRAM))
1611 return tg3_nvram_read_using_eeprom(tp, offset, val);
1613 offset = tg3_nvram_phys_addr(tp, offset);
1615 if (offset > NVRAM_ADDR_MSK)
1618 ret = tg3_nvram_lock(tp);
1622 tg3_enable_nvram_access(tp);
1624 tw32(NVRAM_ADDR, offset);
1625 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
1626 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
1629 *val = tr32(NVRAM_RDDATA);
1631 tg3_disable_nvram_access(tp);
1633 tg3_nvram_unlock(tp);
1638 /* Ensures NVRAM data is in bytestream format. */
1639 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, u32 *val)
1640 { DBGP("%s\n", __func__);
1643 int res = tg3_nvram_read(tp, offset, &v);
1645 *val = cpu_to_be32(v);
1649 int tg3_get_device_address(struct tg3 *tp)
1650 { DBGP("%s\n", __func__);
1652 struct net_device *dev = tp->dev;
1653 u32 hi, lo, mac_offset;
1657 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1658 tg3_flag(tp, 5780_CLASS)) {
1659 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
1661 if (tg3_nvram_lock(tp))
1662 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
1664 tg3_nvram_unlock(tp);
1665 } else if (tg3_flag(tp, 5717_PLUS)) {
1666 if (PCI_FUNC(tp->pdev->busdevfn) & 1)
1668 if (PCI_FUNC(tp->pdev->busdevfn) > 1)
1669 mac_offset += 0x18c;
1670 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
1673 /* First try to get it from MAC address mailbox. */
1674 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
1675 if ((hi >> 16) == 0x484b) {
1676 dev->hw_addr[0] = (hi >> 8) & 0xff;
1677 dev->hw_addr[1] = (hi >> 0) & 0xff;
1679 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
1680 dev->hw_addr[2] = (lo >> 24) & 0xff;
1681 dev->hw_addr[3] = (lo >> 16) & 0xff;
1682 dev->hw_addr[4] = (lo >> 8) & 0xff;
1683 dev->hw_addr[5] = (lo >> 0) & 0xff;
1685 /* Some old bootcode may report a 0 MAC address in SRAM */
1686 addr_ok = is_valid_ether_addr(&dev->hw_addr[0]);
1689 /* Next, try NVRAM. */
1690 if (!tg3_flag(tp, NO_NVRAM) &&
1691 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
1692 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
1693 memcpy(&dev->hw_addr[0], ((char *)&hi) + 2, 2);
1694 memcpy(&dev->hw_addr[2], (char *)&lo, sizeof(lo));
1696 /* Finally just fetch it out of the MAC control regs. */
1698 hi = tr32(MAC_ADDR_0_HIGH);
1699 lo = tr32(MAC_ADDR_0_LOW);
1701 dev->hw_addr[5] = lo & 0xff;
1702 dev->hw_addr[4] = (lo >> 8) & 0xff;
1703 dev->hw_addr[3] = (lo >> 16) & 0xff;
1704 dev->hw_addr[2] = (lo >> 24) & 0xff;
1705 dev->hw_addr[1] = hi & 0xff;
1706 dev->hw_addr[0] = (hi >> 8) & 0xff;
1710 if (!is_valid_ether_addr(&dev->hw_addr[0])) {
1717 static void __tg3_set_rx_mode(struct net_device *dev)
1718 { DBGP("%s\n", __func__);
1720 struct tg3 *tp = netdev_priv(dev);
1723 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
1724 RX_MODE_KEEP_VLAN_TAG);
1726 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
1728 /* Accept all multicast. */
1729 tw32(MAC_HASH_REG_0, 0xffffffff);
1730 tw32(MAC_HASH_REG_1, 0xffffffff);
1731 tw32(MAC_HASH_REG_2, 0xffffffff);
1732 tw32(MAC_HASH_REG_3, 0xffffffff);
1734 if (rx_mode != tp->rx_mode) {
1735 tp->rx_mode = rx_mode;
1736 tw32_f(MAC_RX_MODE, rx_mode);
1741 static void __tg3_set_coalesce(struct tg3 *tp)
1742 { DBGP("%s\n", __func__);
1745 tw32(HOSTCC_RXCOL_TICKS, 0);
1746 tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
1747 tw32(HOSTCC_RXMAX_FRAMES, 1);
1748 /* FIXME: mix between TXMAX and RXMAX taken from legacy driver */
1749 tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
1750 tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
1751 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
1753 if (!tg3_flag(tp, 5705_PLUS)) {
1754 u32 val = DEFAULT_STAT_COAL_TICKS;
1756 tw32(HOSTCC_RXCOAL_TICK_INT, DEFAULT_RXCOAL_TICK_INT);
1757 tw32(HOSTCC_TXCOAL_TICK_INT, DEFAULT_TXCOAL_TICK_INT);
1759 if (!netdev_link_ok(tp->dev))
1762 tw32(HOSTCC_STAT_COAL_TICKS, val);
1766 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
1767 dma_addr_t mapping, u32 maxlen_flags,
1769 { DBGP("%s\n", __func__);
1772 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
1773 ((u64) mapping >> 32));
1775 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
1776 ((u64) mapping & 0xffffffff));
1778 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
1781 if (!tg3_flag(tp, 5705_PLUS))
1783 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
1787 static void tg3_rings_reset(struct tg3 *tp)
1788 { DBGP("%s\n", __func__);
1791 u32 txrcb, rxrcb, limit;
1793 /* Disable all transmit rings but the first. */
1794 if (!tg3_flag(tp, 5705_PLUS))
1795 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
1796 else if (tg3_flag(tp, 5717_PLUS))
1797 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
1798 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
1799 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
1801 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
1803 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
1804 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
1805 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
1806 BDINFO_FLAGS_DISABLED);
1809 /* Disable all receive return rings but the first. */
1810 if (tg3_flag(tp, 5717_PLUS))
1811 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
1812 else if (!tg3_flag(tp, 5705_PLUS))
1813 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
1814 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
1815 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
1816 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
1818 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
1820 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
1821 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
1822 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
1823 BDINFO_FLAGS_DISABLED);
1825 /* Disable interrupts */
1826 tw32_mailbox_f(tp->int_mbox, 1);
1830 tw32_mailbox(tp->prodmbox, 0);
1831 tw32_rx_mbox(tp->consmbox, 0);
1833 /* Make sure the NIC-based send BD rings are disabled. */
1834 if (!tg3_flag(tp, 5705_PLUS)) {
1835 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
1836 for (i = 0; i < 16; i++)
1837 tw32_tx_mbox(mbox + i * 8, 0);
1840 txrcb = NIC_SRAM_SEND_RCB;
1841 rxrcb = NIC_SRAM_RCV_RET_RCB;
1843 /* Clear status block in ram. */
1844 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
1846 /* Set status block DMA address */
1847 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
1848 ((u64) tp->status_mapping >> 32));
1849 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
1850 ((u64) tp->status_mapping & 0xffffffff));
1853 tg3_set_bdinfo(tp, txrcb, tp->tx_desc_mapping,
1854 (TG3_TX_RING_SIZE <<
1855 BDINFO_FLAGS_MAXLEN_SHIFT),
1856 NIC_SRAM_TX_BUFFER_DESC);
1857 txrcb += TG3_BDINFO_SIZE;
1860 /* FIXME: will TG3_RX_RET_MAX_SIZE_5705 work on all cards? */
1862 tg3_set_bdinfo(tp, rxrcb, tp->rx_rcb_mapping,
1863 TG3_RX_RET_MAX_SIZE_5705 <<
1864 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
1865 rxrcb += TG3_BDINFO_SIZE;
1869 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
1870 { DBGP("%s\n", __func__);
1872 u32 val, bdcache_maxcnt;
1874 if (!tg3_flag(tp, 5750_PLUS) ||
1875 tg3_flag(tp, 5780_CLASS) ||
1876 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
1877 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
1878 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
1879 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
1880 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
1881 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
1883 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
1886 /* NOTE: legacy driver uses RX_PENDING / 8, we only use 4 descriptors
1887 * for now, use / 4 so the result is > 0
1889 val = TG3_DEF_RX_RING_PENDING / 4;
1890 tw32(RCVBDI_STD_THRESH, val);
1892 if (tg3_flag(tp, 57765_PLUS))
1893 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
1896 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
1897 { DBGP("%s\n", __func__);
1899 u32 val, rdmac_mode;
1901 struct tg3_rx_prodring_set *tpr = &tp->prodring;
1905 tg3_write_sig_pre_reset(tp);
1907 if (tg3_flag(tp, INIT_COMPLETE))
1913 err = tg3_chip_reset(tp);
1917 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
1918 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
1919 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
1920 PCIE_PWR_MGMT_L1_THRESH_4MS;
1921 tw32(PCIE_PWR_MGMT_THRESH, val);
1923 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
1924 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
1926 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
1928 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
1929 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
1932 if (tg3_flag(tp, L1PLLPD_EN)) {
1933 u32 grc_mode = tr32(GRC_MODE);
1935 /* Access the lower 1K of PL PCIE block registers. */
1936 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
1937 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
1939 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
1940 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
1941 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
1943 tw32(GRC_MODE, grc_mode);
1946 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
1947 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1948 u32 grc_mode = tr32(GRC_MODE);
1950 /* Access the lower 1K of PL PCIE block registers. */
1951 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
1952 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
1954 val = tr32(TG3_PCIE_TLDLPL_PORT +
1955 TG3_PCIE_PL_LO_PHYCTL5);
1956 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
1957 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
1959 tw32(GRC_MODE, grc_mode);
1962 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
1963 u32 grc_mode = tr32(GRC_MODE);
1965 /* Access the lower 1K of DL PCIE block registers. */
1966 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
1967 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
1969 val = tr32(TG3_PCIE_TLDLPL_PORT +
1970 TG3_PCIE_DL_LO_FTSMAX);
1971 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
1972 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
1973 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
1975 tw32(GRC_MODE, grc_mode);
1978 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
1979 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
1980 val |= CPMU_LSPD_10MB_MACCLK_6_25;
1981 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
1984 /* This works around an issue with Athlon chipsets on
1985 * B3 tigon3 silicon. This bit has no effect on any
1986 * other revision. But do not set this on PCI Express
1987 * chips and don't even touch the clocks if the CPMU is present.
1989 if (!tg3_flag(tp, CPMU_PRESENT)) {
1990 if (!tg3_flag(tp, PCI_EXPRESS))
1991 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
1992 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
1995 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
1996 tg3_flag(tp, PCIX_MODE)) {
1997 val = tr32(TG3PCI_PCISTATE);
1998 val |= PCISTATE_RETRY_SAME_DMA;
1999 tw32(TG3PCI_PCISTATE, val);
2002 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
2003 /* Enable some hw fixes. */
2004 val = tr32(TG3PCI_MSI_DATA);
2005 val |= (1 << 26) | (1 << 28) | (1 << 29);
2006 tw32(TG3PCI_MSI_DATA, val);
2009 /* Descriptor ring init may make accesses to the
2010 * NIC SRAM area to setup the TX descriptors, so we
2011 * can only do this after the hardware has been
2012 * successfully reset.
2014 err = tg3_init_rings(tp);
2018 if (tg3_flag(tp, 57765_PLUS)) {
2019 val = tr32(TG3PCI_DMA_RW_CTRL) &
2020 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
2021 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
2022 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
2023 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
2024 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
2025 val |= DMA_RWCTRL_TAGGED_STAT_WA;
2026 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
2027 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
2028 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
2029 /* This value is determined during the probe time DMA
2030 * engine test, tg3_test_dma.
2032 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
2035 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
2036 GRC_MODE_4X_NIC_SEND_RINGS |
2037 GRC_MODE_NO_TX_PHDR_CSUM |
2038 GRC_MODE_NO_RX_PHDR_CSUM);
2039 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
2040 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
2042 /* Pseudo-header checksum is done by hardware logic and not
2043 * the offload processers, so make the chip do the pseudo-
2044 * header checksums on receive. For transmit it is more
2045 * convenient to do the pseudo-header checksum in software
2046 * as Linux does that on transmit for us in all cases.
2048 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
2052 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
2054 /* Setup the timer prescalar register. Clock is always 66Mhz. */
2055 val = tr32(GRC_MISC_CFG);
2057 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
2058 tw32(GRC_MISC_CFG, val);
2060 /* Initialize MBUF/DESC pool. */
2061 if (tg3_flag(tp, 5750_PLUS)) {
2063 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
2064 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
2065 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
2066 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
2068 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
2069 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
2070 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
2073 tw32(BUFMGR_MB_RDMA_LOW_WATER,
2074 tp->bufmgr_config.mbuf_read_dma_low_water);
2075 tw32(BUFMGR_MB_MACRX_LOW_WATER,
2076 tp->bufmgr_config.mbuf_mac_rx_low_water);
2077 tw32(BUFMGR_MB_HIGH_WATER,
2078 tp->bufmgr_config.mbuf_high_water);
2080 tw32(BUFMGR_DMA_LOW_WATER,
2081 tp->bufmgr_config.dma_low_water);
2082 tw32(BUFMGR_DMA_HIGH_WATER,
2083 tp->bufmgr_config.dma_high_water);
2085 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
2086 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2087 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
2088 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2089 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
2090 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
2091 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
2092 tw32(BUFMGR_MODE, val);
2093 for (i = 0; i < 2000; i++) {
2094 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
2099 DBGC(tp->dev, "%s cannot enable BUFMGR\n", __func__);
2103 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
2104 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
2106 tg3_setup_rxbd_thresholds(tp);
2108 /* Initialize TG3_BDINFO's at:
2109 * RCVDBDI_STD_BD: standard eth size rx ring
2110 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
2111 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
2114 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
2115 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
2116 * ring attribute flags
2117 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
2119 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
2120 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
2122 * The size of each ring is fixed in the firmware, but the location is
2125 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
2126 ((u64) tpr->rx_std_mapping >> 32));
2127 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
2128 ((u64) tpr->rx_std_mapping & 0xffffffff));
2129 if (!tg3_flag(tp, 5717_PLUS))
2130 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
2131 NIC_SRAM_RX_BUFFER_DESC);
2133 /* Disable the mini ring */
2134 if (!tg3_flag(tp, 5705_PLUS))
2135 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
2136 BDINFO_FLAGS_DISABLED);
2138 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
2140 if (tg3_flag(tp, 57765_PLUS))
2141 val |= (RX_STD_MAX_SIZE << 2);
2143 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
2145 tpr->rx_std_prod_idx = 0;
2147 /* std prod index is updated by tg3_refill_prod_ring() */
2148 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 0);
2149 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, 0);
2151 tg3_rings_reset(tp);
2153 __tg3_set_mac_addr(tp,0);
2155 #define TG3_MAX_MTU 1522
2156 /* MTU + ethernet header + FCS + optional VLAN tag */
2157 tw32(MAC_RX_MTU_SIZE, TG3_MAX_MTU);
2159 /* The slot time is changed by tg3_setup_phy if we
2160 * run at gigabit with half duplex.
2162 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2163 (6 << TX_LENGTHS_IPG_SHIFT) |
2164 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
2166 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
2167 val |= tr32(MAC_TX_LENGTHS) &
2168 (TX_LENGTHS_JMB_FRM_LEN_MSK |
2169 TX_LENGTHS_CNT_DWN_VAL_MSK);
2171 tw32(MAC_TX_LENGTHS, val);
2173 /* Receive rules. */
2174 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
2175 tw32(RCVLPC_CONFIG, 0x0181);
2177 /* Calculate RDMAC_MODE setting early, we need it to determine
2178 * the RCVLPC_STATE_ENABLE mask.
2180 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
2181 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
2182 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
2183 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
2184 RDMAC_MODE_LNGREAD_ENAB);
2186 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
2187 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
2189 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
2190 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
2191 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
2192 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
2193 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
2194 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
2196 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
2197 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
2198 if (tg3_flag(tp, TSO_CAPABLE) &&
2199 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2200 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
2201 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
2202 !tg3_flag(tp, IS_5788)) {
2203 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
2207 if (tg3_flag(tp, PCI_EXPRESS))
2208 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
2210 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
2211 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
2213 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
2214 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
2215 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
2216 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
2217 tg3_flag(tp, 57765_PLUS)) {
2218 val = tr32(TG3_RDMA_RSRVCTRL_REG);
2219 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2220 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2221 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
2222 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
2223 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
2224 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
2225 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
2226 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
2228 tw32(TG3_RDMA_RSRVCTRL_REG,
2229 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
2232 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2233 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2234 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
2235 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
2236 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
2237 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
2240 /* Receive/send statistics. */
2241 if (tg3_flag(tp, 5750_PLUS)) {
2242 val = tr32(RCVLPC_STATS_ENABLE);
2243 val &= ~RCVLPC_STATSENAB_DACK_FIX;
2244 tw32(RCVLPC_STATS_ENABLE, val);
2245 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
2246 tg3_flag(tp, TSO_CAPABLE)) {
2247 val = tr32(RCVLPC_STATS_ENABLE);
2248 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
2249 tw32(RCVLPC_STATS_ENABLE, val);
2251 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
2253 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
2254 tw32(SNDDATAI_STATSENAB, 0xffffff);
2255 tw32(SNDDATAI_STATSCTRL,
2256 (SNDDATAI_SCTRL_ENABLE |
2257 SNDDATAI_SCTRL_FASTUPD));
2259 /* Setup host coalescing engine. */
2260 tw32(HOSTCC_MODE, 0);
2261 for (i = 0; i < 2000; i++) {
2262 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
2267 __tg3_set_coalesce(tp);
2269 if (!tg3_flag(tp, 5705_PLUS)) {
2270 /* Status/statistics block address. See tg3_timer,
2271 * the tg3_periodic_fetch_stats call there, and
2272 * tg3_get_stats to see how this works for 5705/5750 chips.
2273 * NOTE: stats block removed for iPXE
2275 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
2277 /* Clear statistics and status block memory areas */
2278 for (i = NIC_SRAM_STATS_BLK;
2279 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
2281 tg3_write_mem(tp, i, 0);
2286 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
2288 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
2289 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
2290 if (!tg3_flag(tp, 5705_PLUS))
2291 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
2293 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
2294 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
2295 /* reset to prevent losing 1st rx packet intermittently */
2296 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
2300 if (tg3_flag(tp, ENABLE_APE))
2301 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
2304 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
2305 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
2306 if (!tg3_flag(tp, 5705_PLUS) &&
2307 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
2308 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
2309 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2310 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
2313 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
2314 * If TG3_FLAG_IS_NIC is zero, we should read the
2315 * register to preserve the GPIO settings for LOMs. The GPIOs,
2316 * whether used as inputs or outputs, are set by boot code after
2319 if (!tg3_flag(tp, IS_NIC)) {
2322 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
2323 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
2324 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
2326 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
2327 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
2328 GRC_LCLCTRL_GPIO_OUTPUT3;
2330 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
2331 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
2333 tp->grc_local_ctrl &= ~gpio_mask;
2334 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
2336 /* GPIO1 must be driven high for eeprom write protect */
2337 if (tg3_flag(tp, EEPROM_WRITE_PROT))
2338 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
2339 GRC_LCLCTRL_GPIO_OUTPUT1);
2341 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
2344 if (!tg3_flag(tp, 5705_PLUS)) {
2345 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
2349 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
2350 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
2351 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
2352 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
2353 WDMAC_MODE_LNGREAD_ENAB);
2355 /* Enable host coalescing bug fix */
2356 if (tg3_flag(tp, 5755_PLUS))
2357 val |= WDMAC_MODE_STATUS_TAG_FIX;
2359 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
2360 val |= WDMAC_MODE_BURST_ALL_DATA;
2362 tw32_f(WDMAC_MODE, val);
2365 if (tg3_flag(tp, PCIX_MODE)) {
2368 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
2370 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
2371 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
2372 pcix_cmd |= PCI_X_CMD_READ_2K;
2373 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2374 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
2375 pcix_cmd |= PCI_X_CMD_READ_2K;
2377 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
2381 tw32_f(RDMAC_MODE, rdmac_mode);
2384 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
2385 if (!tg3_flag(tp, 5705_PLUS))
2386 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
2388 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
2390 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
2392 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
2394 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
2395 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
2396 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
2397 if (tg3_flag(tp, LRG_PROD_RING_CAP))
2398 val |= RCVDBDI_MODE_LRG_RING_SZ;
2399 tw32(RCVDBDI_MODE, val);
2400 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
2402 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
2403 if (tg3_flag(tp, ENABLE_TSS))
2404 val |= SNDBDI_MODE_MULTI_TXQ_EN;
2405 tw32(SNDBDI_MODE, val);
2406 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
2409 /* FIXME: 5701 firmware fix? */
2411 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
2412 err = tg3_load_5701_a0_firmware_fix(tp);
2418 tp->tx_mode = TX_MODE_ENABLE;
2420 if (tg3_flag(tp, 5755_PLUS) ||
2421 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
2422 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
2424 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2425 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
2426 tp->tx_mode &= ~val;
2427 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
2430 tw32_f(MAC_TX_MODE, tp->tx_mode);
2433 tp->rx_mode = RX_MODE_ENABLE;
2435 tw32_f(MAC_RX_MODE, tp->rx_mode);
2438 tw32(MAC_LED_CTRL, tp->led_ctrl);
2440 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2441 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2442 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
2445 tw32_f(MAC_RX_MODE, tp->rx_mode);
2448 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2449 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
2450 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
2451 /* Set drive transmission level to 1.2V */
2452 /* only if the signal pre-emphasis bit is not set */
2453 val = tr32(MAC_SERDES_CFG);
2456 tw32(MAC_SERDES_CFG, val);
2458 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
2459 tw32(MAC_SERDES_CFG, 0x616000);
2462 /* Prevent chip from dropping frames when flow control
2465 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2469 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
2471 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
2472 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2473 /* Use hardware link auto-negotiation */
2474 tg3_flag_set(tp, HW_AUTONEG);
2477 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
2478 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2481 tmp = tr32(SERDES_RX_CTRL);
2482 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
2483 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
2484 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
2485 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
2488 err = tg3_setup_phy(tp, 0);
2492 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
2493 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2496 /* Clear CRC stats. */
2497 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
2498 tg3_writephy(tp, MII_TG3_TEST1,
2499 tmp | MII_TG3_TEST1_CRC_EN);
2500 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
2504 __tg3_set_rx_mode(tp->dev);
2506 /* Initialize receive rules. */
2507 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
2508 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
2509 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
2510 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
2512 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
2516 if (tg3_flag(tp, ENABLE_ASF))
2520 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
2522 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
2524 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
2526 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
2528 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
2530 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
2532 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
2534 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
2536 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
2538 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
2540 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
2542 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
2544 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
2546 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
2557 /* Called at device open time to get the chip ready for
2558 * packet processing. Invoked with tp->lock held.
2560 int tg3_init_hw(struct tg3 *tp, int reset_phy)
2561 { DBGP("%s\n", __func__);
2563 tg3_switch_clocks(tp);
2565 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
2567 return tg3_reset_hw(tp, reset_phy);
2570 void tg3_set_txd(struct tg3 *tp, int entry,
2571 dma_addr_t mapping, int len, u32 flags)
2572 { DBGP("%s\n", __func__);
2574 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
2576 txd->addr_hi = ((u64) mapping >> 32);
2577 txd->addr_lo = ((u64) mapping & 0xffffffff);
2578 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
2582 int tg3_do_test_dma(struct tg3 *tp, u32 __unused *buf, dma_addr_t buf_dma, int size, int to_device)
2583 { DBGP("%s\n", __func__);
2585 struct tg3_internal_buffer_desc test_desc;
2590 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
2592 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
2593 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
2594 tw32(RDMAC_STATUS, 0);
2595 tw32(WDMAC_STATUS, 0);
2597 tw32(BUFMGR_MODE, 0);
2600 test_desc.addr_hi = ((u64) buf_dma) >> 32;
2601 test_desc.addr_lo = buf_dma & 0xffffffff;
2602 test_desc.nic_mbuf = 0x00002100;
2603 test_desc.len = size;
2606 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
2607 * the *second* time the tg3 driver was getting loaded after an
2610 * Broadcom tells me:
2611 * ...the DMA engine is connected to the GRC block and a DMA
2612 * reset may affect the GRC block in some unpredictable way...
2613 * The behavior of resets to individual blocks has not been tested.
2615 * Broadcom noted the GRC reset will also reset all sub-components.
2618 test_desc.cqid_sqid = (13 << 8) | 2;
2620 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
2623 test_desc.cqid_sqid = (16 << 8) | 7;
2625 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
2628 test_desc.flags = 0x00000005;
2630 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
2633 val = *(((u32 *)&test_desc) + i);
2634 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
2635 sram_dma_descs + (i * sizeof(u32)));
2636 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
2638 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
2641 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
2643 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
2646 for (i = 0; i < 40; i++) {
2650 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
2652 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
2653 if ((val & 0xffff) == sram_dma_descs) {