2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
18 FILE_LICENCE ( GPL2_ONLY );
26 #include <ipxe/iobuf.h>
27 #include <ipxe/timer.h>
28 #include <ipxe/malloc.h>
29 #include <ipxe/if_ether.h>
30 #include <ipxe/ethernet.h>
31 #include <ipxe/netdevice.h>
35 #define RESET_KIND_SHUTDOWN 0
36 #define RESET_KIND_INIT 1
37 #define RESET_KIND_SUSPEND 2
39 #define TG3_DEF_MAC_MODE 0
41 void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
42 { DBGP("%s\n", __func__);
44 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
45 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
48 u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
49 { DBGP("%s\n", __func__);
53 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
54 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
58 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
59 { DBGP("%s\n", __func__);
61 return readl(tp->regs + off + GRCMBOX_BASE);
64 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
65 { DBGP("%s\n", __func__);
67 writel(val, tp->regs + off + GRCMBOX_BASE);
70 void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
71 { DBGP("%s\n", __func__);
73 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
74 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
75 TG3_64BIT_REG_LOW, val);
78 if (off == TG3_RX_STD_PROD_IDX_REG) {
79 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
80 TG3_64BIT_REG_LOW, val);
84 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
85 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
87 /* In indirect mode when disabling interrupts, we also need
88 * to clear the interrupt bit in the GRC local ctrl register.
90 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
92 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
93 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
97 u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
98 { DBGP("%s\n", __func__);
102 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
103 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
108 /* usec_wait specifies the wait time in usec when writing to certain registers
109 * where it is unsafe to read back the register without some delay.
110 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
111 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
113 void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
114 { DBGP("%s\n", __func__);
121 /* Wait again after the read for the posted method to guarantee that
122 * the wait time is met.
128 /* stolen from legacy etherboot tg3 driver */
129 void tg3_set_power_state_0(struct tg3 *tp)
130 { DBGP("%s\n", __func__);
132 uint16_t power_control;
135 /* Make sure register accesses (indirect or otherwise)
136 * will function correctly.
138 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
140 pci_read_config_word(tp->pdev, pm + PCI_PM_CTRL, &power_control);
142 power_control |= PCI_PM_CTRL_PME_STATUS;
143 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
145 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
147 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
152 void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
153 { DBGP("%s\n", __func__);
155 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
156 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
161 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
162 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
164 /* Always leave this as zero. */
165 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
168 #define PCI_VENDOR_ID_ARIMA 0x161f
170 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
171 { DBGP("%s\n", __func__);
176 /* On some early chips the SRAM cannot be accessed in D3hot state,
177 * so need make sure we're in D0.
179 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
180 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
181 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
184 /* Make sure register accesses (indirect or otherwise)
185 * will function correctly.
187 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
190 /* The memory arbiter has to be enabled in order for SRAM accesses
191 * to succeed. Normally on powerup the tg3 chip firmware will make
192 * sure it is enabled, but other entities such as system netboot
193 * code might disable it.
195 val = tr32(MEMARB_MODE);
196 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
198 tp->phy_id = TG3_PHY_ID_INVALID;
199 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
201 /* Assume an onboard device by default. */
202 tg3_flag_set(tp, EEPROM_WRITE_PROT);
204 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
205 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
206 u32 nic_cfg, led_cfg;
207 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
208 int eeprom_phy_serdes = 0;
210 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
211 tp->nic_sram_data_cfg = nic_cfg;
213 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
214 ver >>= NIC_SRAM_DATA_VER_SHIFT;
215 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
216 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
217 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
218 (ver > 0) && (ver < 0x100))
219 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
221 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
222 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
224 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
225 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
226 eeprom_phy_serdes = 1;
228 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
229 if (nic_phy_id != 0) {
230 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
231 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
233 eeprom_phy_id = (id1 >> 16) << 10;
234 eeprom_phy_id |= (id2 & 0xfc00) << 16;
235 eeprom_phy_id |= (id2 & 0x03ff) << 0;
239 tp->phy_id = eeprom_phy_id;
240 if (eeprom_phy_serdes) {
241 if (!tg3_flag(tp, 5705_PLUS))
242 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
244 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
247 if (tg3_flag(tp, 5750_PLUS))
248 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
249 SHASTA_EXT_LED_MODE_MASK);
251 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
255 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
256 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
259 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
260 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
263 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
264 tp->led_ctrl = LED_CTRL_MODE_MAC;
266 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
267 * read on some older 5700/5701 bootcode.
269 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
271 GET_ASIC_REV(tp->pci_chip_rev_id) ==
273 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
277 case SHASTA_EXT_LED_SHARED:
278 tp->led_ctrl = LED_CTRL_MODE_SHARED;
279 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
280 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
281 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
282 LED_CTRL_MODE_PHY_2);
285 case SHASTA_EXT_LED_MAC:
286 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
289 case SHASTA_EXT_LED_COMBO:
290 tp->led_ctrl = LED_CTRL_MODE_COMBO;
291 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
292 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
293 LED_CTRL_MODE_PHY_2);
298 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
299 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
300 tp->subsystem_vendor == PCI_VENDOR_ID_DELL)
301 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
303 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
304 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
306 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
307 tg3_flag_set(tp, EEPROM_WRITE_PROT);
308 if ((tp->subsystem_vendor ==
309 PCI_VENDOR_ID_ARIMA) &&
310 (tp->subsystem_device == 0x205a ||
311 tp->subsystem_device == 0x2063))
312 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
314 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
315 tg3_flag_set(tp, IS_NIC);
318 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
319 tg3_flag_set(tp, ENABLE_ASF);
320 if (tg3_flag(tp, 5750_PLUS))
321 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
324 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
325 tg3_flag(tp, ENABLE_ASF))
326 tg3_flag_set(tp, ENABLE_APE);
328 if (cfg2 & (1 << 17))
329 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
331 /* serdes signal pre-emphasis in register 0x590 set by */
332 /* bootcode if bit 18 is set */
333 if (cfg2 & (1 << 18))
334 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
336 if ((tg3_flag(tp, 57765_PLUS) ||
337 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
338 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
339 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
340 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
342 if (tg3_flag(tp, PCI_EXPRESS) &&
343 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
344 !tg3_flag(tp, 57765_PLUS)) {
347 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
350 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
351 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
352 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
353 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
354 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
355 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
359 static void tg3_switch_clocks(struct tg3 *tp)
360 { DBGP("%s\n", __func__);
365 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
368 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
370 orig_clock_ctrl = clock_ctrl;
371 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
372 CLOCK_CTRL_CLKRUN_OENABLE |
374 tp->pci_clock_ctrl = clock_ctrl;
376 if (tg3_flag(tp, 5705_PLUS)) {
377 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
378 tw32_wait_f(TG3PCI_CLOCK_CTRL,
379 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
381 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
382 tw32_wait_f(TG3PCI_CLOCK_CTRL,
384 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
386 tw32_wait_f(TG3PCI_CLOCK_CTRL,
387 clock_ctrl | (CLOCK_CTRL_ALTCLK),
390 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
393 int tg3_get_invariants(struct tg3 *tp)
394 { DBGP("%s\n", __func__);
397 u32 pci_state_reg, grc_misc_cfg;
402 /* Force memory write invalidate off. If we leave it on,
403 * then on 5700_BX chips we have to enable a workaround.
404 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
405 * to match the cacheline size. The Broadcom driver have this
406 * workaround but turns MWI off all the times so never uses
407 * it. This seems to suggest that the workaround is insufficient.
409 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
410 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
411 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
413 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
414 * has the register indirect write enable bit set before
415 * we try to access any of the MMIO registers. It is also
416 * critical that the PCI-X hw workaround situation is decided
417 * before that as well.
419 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
422 tp->pci_chip_rev_id = (misc_ctrl_reg >>
423 MISC_HOST_CTRL_CHIPREV_SHIFT);
424 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
425 u32 prod_id_asic_rev;
427 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
428 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
429 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
430 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
431 pci_read_config_dword(tp->pdev,
432 TG3PCI_GEN2_PRODID_ASICREV,
434 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
435 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
436 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
437 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
438 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
439 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
440 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
441 pci_read_config_dword(tp->pdev,
442 TG3PCI_GEN15_PRODID_ASICREV,
445 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
448 tp->pci_chip_rev_id = prod_id_asic_rev;
451 /* Wrong chip ID in 5752 A0. This code can be removed later
452 * as A0 is not in production.
454 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
455 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
457 /* Initialize misc host control in PCI block. */
458 tp->misc_host_ctrl |= (misc_ctrl_reg &
459 MISC_HOST_CTRL_CHIPREV);
460 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
463 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
464 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
465 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
466 tg3_flag_set(tp, 5717_PLUS);
468 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
469 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766 ||
470 tg3_flag(tp, 5717_PLUS))
471 tg3_flag_set(tp, 57765_PLUS);
473 /* Intentionally exclude ASIC_REV_5906 */
474 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
475 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
476 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
477 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
478 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
479 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
480 tg3_flag(tp, 57765_PLUS))
481 tg3_flag_set(tp, 5755_PLUS);
483 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
484 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
485 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
486 tg3_flag(tp, 5755_PLUS) ||
487 tg3_flag(tp, 5780_CLASS))
488 tg3_flag_set(tp, 5750_PLUS);
490 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
491 tg3_flag(tp, 5750_PLUS))
492 tg3_flag_set(tp, 5705_PLUS);
494 if (tg3_flag(tp, 5717_PLUS))
495 tg3_flag_set(tp, LRG_PROD_RING_CAP);
497 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
500 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
501 if (tp->pcie_cap != 0) {
504 tg3_flag_set(tp, PCI_EXPRESS);
506 pci_read_config_word(tp->pdev,
507 tp->pcie_cap + PCI_EXP_LNKCTL,
509 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
510 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
511 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
512 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
513 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
514 tg3_flag_set(tp, CLKREQ_BUG);
515 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
516 tg3_flag_set(tp, L1PLLPD_EN);
518 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
519 tg3_flag_set(tp, PCI_EXPRESS);
520 } else if (!tg3_flag(tp, 5705_PLUS) ||
521 tg3_flag(tp, 5780_CLASS)) {
522 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
525 "Cannot find PCI-X capability, aborting\n");
529 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
530 tg3_flag_set(tp, PCIX_MODE);
533 /* If we have an AMD 762 or VIA K8T800 chipset, write
534 * reordering to the mailbox registers done by the host
535 * controller can cause major troubles. We read back from
536 * every mailbox register write to force the writes to be
537 * posted to the chip in order.
540 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
541 &tp->pci_cacheline_sz);
542 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
544 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
545 tp->pci_lat_timer < 64) {
546 tp->pci_lat_timer = 64;
547 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
551 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
552 /* 5700 BX chips need to have their TX producer index
553 * mailboxes written twice to workaround a bug.
555 tg3_flag_set(tp, TXD_MBOX_HWBUG);
557 /* If we are in PCI-X mode, enable register write workaround.
559 * The workaround is to use indirect register accesses
560 * for all chip writes not to mailbox registers.
562 if (tg3_flag(tp, PCIX_MODE)) {
565 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
567 /* The chip can have it's power management PCI config
568 * space registers clobbered due to this bug.
569 * So explicitly force the chip into D0 here.
571 pci_read_config_dword(tp->pdev,
572 tp->pm_cap + PCI_PM_CTRL,
574 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
575 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
576 pci_write_config_dword(tp->pdev,
577 tp->pm_cap + PCI_PM_CTRL,
580 /* Also, force SERR#/PERR# in PCI command. */
581 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
582 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
583 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
587 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
588 tg3_flag_set(tp, PCI_HIGH_SPEED);
589 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
590 tg3_flag_set(tp, PCI_32BIT);
592 /* Chip-specific fixup from Broadcom driver */
593 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
594 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
595 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
596 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
599 tp->write32_mbox = tg3_write_indirect_reg32;
600 tp->write32_rx_mbox = tg3_write_indirect_mbox;
601 tp->write32_tx_mbox = tg3_write_indirect_mbox;
602 tp->read32_mbox = tg3_read_indirect_mbox;
604 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
605 tp->read32_mbox = tg3_read32_mbox_5906;
606 tp->write32_mbox = tg3_write32_mbox_5906;
607 tp->write32_tx_mbox = tg3_write32_mbox_5906;
608 tp->write32_rx_mbox = tg3_write32_mbox_5906;
611 /* Get eeprom hw config before calling tg3_set_power_state().
612 * In particular, the TG3_FLAG_IS_NIC flag must be
613 * determined before calling tg3_set_power_state() so that
614 * we know whether or not to switch out of Vaux power.
615 * When the flag is set, it means that GPIO1 is used for eeprom
616 * write protect and also implies that it is a LOM where GPIOs
617 * are not used to switch power.
619 tg3_get_eeprom_hw_cfg(tp);
621 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
622 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
623 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
624 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
625 tg3_flag(tp, 57765_PLUS))
626 tg3_flag_set(tp, CPMU_PRESENT);
628 /* Set up tp->grc_local_ctrl before calling tg3_power_up().
629 * GPIO1 driven high will bring 5700's external PHY out of reset.
630 * It is also used as eeprom write protect on LOMs.
632 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
633 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
634 tg3_flag(tp, EEPROM_WRITE_PROT))
635 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
636 GRC_LCLCTRL_GPIO_OUTPUT1);
637 /* Unused GPIO3 must be driven as output on 5752 because there
638 * are no pull-up resistors on unused GPIO pins.
640 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
641 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
643 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
644 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
645 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
646 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
648 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
649 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
650 /* Turn off the debug UART. */
651 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
652 if (tg3_flag(tp, IS_NIC))
653 /* Keep VMain power. */
654 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
655 GRC_LCLCTRL_GPIO_OUTPUT0;
658 /* Force the chip into D0. */
659 tg3_set_power_state_0(tp);
661 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
662 tp->phy_flags |= TG3_PHYFLG_IS_FET;
664 /* A few boards don't want Ethernet@WireSpeed phy feature */
665 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
666 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
667 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
668 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
669 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
670 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
671 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
673 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
674 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
675 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
676 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
677 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
679 if (tg3_flag(tp, 5705_PLUS) &&
680 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
681 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
682 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
683 !tg3_flag(tp, 57765_PLUS)) {
684 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
685 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
686 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
687 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
688 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
689 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
690 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
691 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
692 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
694 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
697 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
698 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
699 tp->phy_otp = tg3_read_otp_phycfg(tp);
700 if (tp->phy_otp == 0)
701 tp->phy_otp = TG3_OTP_DEFAULT;
704 if (tg3_flag(tp, CPMU_PRESENT))
705 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
707 tp->mi_mode = MAC_MI_MODE_BASE;
709 tp->coalesce_mode = 0;
710 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
711 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
712 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
714 /* Set these bits to enable statistics workaround. */
715 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
716 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
717 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
718 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
719 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
724 /* Initialize data/descriptor byte/word swapping. */
725 val = tr32(GRC_MODE);
726 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
727 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
728 GRC_MODE_WORD_SWAP_B2HRX_DATA |
729 GRC_MODE_B2HRX_ENABLE |
730 GRC_MODE_HTX2B_ENABLE |
731 GRC_MODE_HOST_STACKUP);
733 val &= GRC_MODE_HOST_STACKUP;
735 tw32(GRC_MODE, val | tp->grc_mode);
737 tg3_switch_clocks(tp);
739 /* Clear this out for sanity. */
740 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
742 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
744 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
745 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
746 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
748 if (chiprevid == CHIPREV_ID_5701_A0 ||
749 chiprevid == CHIPREV_ID_5701_B0 ||
750 chiprevid == CHIPREV_ID_5701_B2 ||
751 chiprevid == CHIPREV_ID_5701_B5) {
754 /* Write some dummy words into the SRAM status block
755 * area, see if it reads back correctly. If the return
756 * value is bad, force enable the PCIX workaround.
758 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
760 writel(0x00000000, sram_base);
761 writel(0x00000000, sram_base + 4);
762 writel(0xffffffff, sram_base + 4);
763 if (readl(sram_base) != 0x00000000)
764 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
769 /* FIXME: do we need nvram access? */
770 /// tg3_nvram_init(tp);
772 grc_misc_cfg = tr32(GRC_MISC_CFG);
773 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
775 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
776 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
777 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
778 tg3_flag_set(tp, IS_5788);
780 if (!tg3_flag(tp, IS_5788) &&
781 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
782 tg3_flag_set(tp, TAGGED_STATUS);
783 if (tg3_flag(tp, TAGGED_STATUS)) {
784 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
785 HOSTCC_MODE_CLRTICK_TXBD);
787 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
788 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
792 /* Preserve the APE MAC_MODE bits */
793 if (tg3_flag(tp, ENABLE_APE))
794 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
796 tp->mac_mode = TG3_DEF_MAC_MODE;
798 /* these are limited to 10/100 only */
799 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
800 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
801 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
802 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
803 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
804 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
805 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
806 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
807 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
808 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
809 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
810 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
811 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
812 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
813 (tp->phy_flags & TG3_PHYFLG_IS_FET))
814 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
816 err = tg3_phy_probe(tp);
818 DBGC(&tp->pdev->dev, "phy probe failed, err: %s\n", strerror(err));
819 /* ... but do not return immediately ... */
822 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
823 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
825 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
826 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
828 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
831 /* For all SERDES we poll the MAC status register. */
832 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
833 tg3_flag_set(tp, POLL_SERDES);
835 tg3_flag_clear(tp, POLL_SERDES);
837 /* Increment the rx prod index on the rx std ring by at most
838 * 8 for these chips to workaround hw errata.
840 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
841 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
842 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
843 tp->rx_std_max_post = 8;
848 void tg3_init_bufmgr_config(struct tg3 *tp)
849 { DBGP("%s\n", __func__);
851 if (tg3_flag(tp, 57765_PLUS)) {
852 tp->bufmgr_config.mbuf_read_dma_low_water =
853 DEFAULT_MB_RDMA_LOW_WATER_5705;
854 tp->bufmgr_config.mbuf_mac_rx_low_water =
855 DEFAULT_MB_MACRX_LOW_WATER_57765;
856 tp->bufmgr_config.mbuf_high_water =
857 DEFAULT_MB_HIGH_WATER_57765;
859 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
860 DEFAULT_MB_RDMA_LOW_WATER_5705;
861 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
862 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
863 tp->bufmgr_config.mbuf_high_water_jumbo =
864 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
865 } else if (tg3_flag(tp, 5705_PLUS)) {
866 tp->bufmgr_config.mbuf_read_dma_low_water =
867 DEFAULT_MB_RDMA_LOW_WATER_5705;
868 tp->bufmgr_config.mbuf_mac_rx_low_water =
869 DEFAULT_MB_MACRX_LOW_WATER_5705;
870 tp->bufmgr_config.mbuf_high_water =
871 DEFAULT_MB_HIGH_WATER_5705;
872 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
873 tp->bufmgr_config.mbuf_mac_rx_low_water =
874 DEFAULT_MB_MACRX_LOW_WATER_5906;
875 tp->bufmgr_config.mbuf_high_water =
876 DEFAULT_MB_HIGH_WATER_5906;
879 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
880 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
881 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
882 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
883 tp->bufmgr_config.mbuf_high_water_jumbo =
884 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
886 tp->bufmgr_config.mbuf_read_dma_low_water =
887 DEFAULT_MB_RDMA_LOW_WATER;
888 tp->bufmgr_config.mbuf_mac_rx_low_water =
889 DEFAULT_MB_MACRX_LOW_WATER;
890 tp->bufmgr_config.mbuf_high_water =
891 DEFAULT_MB_HIGH_WATER;
893 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
894 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
895 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
896 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
897 tp->bufmgr_config.mbuf_high_water_jumbo =
898 DEFAULT_MB_HIGH_WATER_JUMBO;
901 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
902 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
905 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
907 void tg3_wait_for_event_ack(struct tg3 *tp)
908 { DBGP("%s\n", __func__);
912 for (i = 0; i < TG3_FW_EVENT_TIMEOUT_USEC / 10; i++) {
913 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
920 void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
921 { DBGP("%s\n", __func__);
923 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
924 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
927 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
928 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
930 /* Always leave this as zero. */
931 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
934 static void tg3_stop_fw(struct tg3 *tp)
935 { DBGP("%s\n", __func__);
937 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
938 /* Wait for RX cpu to ACK the previous event. */
939 tg3_wait_for_event_ack(tp);
941 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
943 tg3_generate_fw_event(tp);
945 /* Wait for RX cpu to ACK this event. */
946 tg3_wait_for_event_ack(tp);
950 static void tg3_write_sig_pre_reset(struct tg3 *tp)
951 { DBGP("%s\n", __func__);
953 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
954 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
957 void tg3_disable_ints(struct tg3 *tp)
958 { DBGP("%s\n", __func__);
960 tw32(TG3PCI_MISC_HOST_CTRL,
961 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
963 tw32_mailbox_f(tp->int_mbox, 0x00000001);
966 void tg3_enable_ints(struct tg3 *tp)
967 { DBGP("%s\n", __func__);
969 tw32(TG3PCI_MISC_HOST_CTRL,
970 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
972 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
974 tw32_mailbox_f(tp->int_mbox, tp->last_tag << 24);
976 /* Force an initial interrupt */
977 if (!tg3_flag(tp, TAGGED_STATUS) &&
978 (tp->hw_status->status & SD_STATUS_UPDATED))
979 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
981 tw32(HOSTCC_MODE, tp->coal_now);
984 #define MAX_WAIT_CNT 1000
986 /* To stop a block, clear the enable bit and poll till it clears. */
987 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
988 { DBGP("%s\n", __func__);
993 if (tg3_flag(tp, 5705_PLUS)) {
1000 /* We can't enable/disable these bits of the
1001 * 5705/5750, just say success.
1014 for (i = 0; i < MAX_WAIT_CNT; i++) {
1017 if ((val & enable_bit) == 0)
1021 if (i == MAX_WAIT_CNT) {
1022 DBGC(&tp->pdev->dev,
1023 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
1031 static int tg3_abort_hw(struct tg3 *tp)
1032 { DBGP("%s\n", __func__);
1036 tg3_disable_ints(tp);
1038 tp->rx_mode &= ~RX_MODE_ENABLE;
1039 tw32_f(MAC_RX_MODE, tp->rx_mode);
1042 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
1043 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
1044 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
1045 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
1046 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
1047 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
1049 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
1050 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
1051 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
1052 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
1053 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
1054 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE);
1055 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
1057 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
1058 tw32_f(MAC_MODE, tp->mac_mode);
1061 tp->tx_mode &= ~TX_MODE_ENABLE;
1062 tw32_f(MAC_TX_MODE, tp->tx_mode);
1064 for (i = 0; i < MAX_WAIT_CNT; i++) {
1066 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
1069 if (i >= MAX_WAIT_CNT) {
1070 DBGC(&tp->pdev->dev,
1071 "%s timed out, TX_MODE_ENABLE will not clear "
1072 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
1076 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
1077 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
1078 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
1080 tw32(FTQ_RESET, 0xffffffff);
1081 tw32(FTQ_RESET, 0x00000000);
1083 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
1084 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
1087 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
1092 void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
1093 { DBGP("%s\n", __func__);
1095 u32 addr_high, addr_low;
1098 addr_high = ((tp->dev->ll_addr[0] << 8) |
1099 tp->dev->ll_addr[1]);
1100 addr_low = ((tp->dev->ll_addr[2] << 24) |
1101 (tp->dev->ll_addr[3] << 16) |
1102 (tp->dev->ll_addr[4] << 8) |
1103 (tp->dev->ll_addr[5] << 0));
1104 for (i = 0; i < 4; i++) {
1105 if (i == 1 && skip_mac_1)
1107 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
1108 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
1111 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1112 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1113 for (i = 0; i < 12; i++) {
1114 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
1115 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
1119 addr_high = (tp->dev->ll_addr[0] +
1120 tp->dev->ll_addr[1] +
1121 tp->dev->ll_addr[2] +
1122 tp->dev->ll_addr[3] +
1123 tp->dev->ll_addr[4] +
1124 tp->dev->ll_addr[5]) &
1125 TX_BACKOFF_SEED_MASK;
1126 tw32(MAC_TX_BACKOFF_SEED, addr_high);
1129 /* Save PCI command register before chip reset */
1130 static void tg3_save_pci_state(struct tg3 *tp)
1131 { DBGP("%s\n", __func__);
1133 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
1136 /* Restore PCI state after chip reset */
1137 static void tg3_restore_pci_state(struct tg3 *tp)
1138 { DBGP("%s\n", __func__);
1142 /* Re-enable indirect register accesses. */
1143 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
1144 tp->misc_host_ctrl);
1146 /* Set MAX PCI retry to zero. */
1147 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
1148 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
1149 tg3_flag(tp, PCIX_MODE))
1150 val |= PCISTATE_RETRY_SAME_DMA;
1152 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
1154 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
1156 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
1157 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
1158 tp->pci_cacheline_sz);
1159 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
1164 /* Make sure PCI-X relaxed ordering bit is clear. */
1165 if (tg3_flag(tp, PCIX_MODE)) {
1168 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
1170 pcix_cmd &= ~PCI_X_CMD_ERO;
1171 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
1176 static int tg3_poll_fw(struct tg3 *tp)
1177 { DBGP("%s\n", __func__);
1182 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1183 /* Wait up to 20ms for init done. */
1184 for (i = 0; i < 200; i++) {
1185 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1192 /* Wait for firmware initialization to complete. */
1193 for (i = 0; i < 100000; i++) {
1194 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1195 if (val == (u32)~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1200 /* Chip might not be fitted with firmware. Some Sun onboard
1201 * parts are configured like that. So don't signal the timeout
1202 * of the above loop as an error, but do report the lack of
1203 * running firmware once.
1205 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1206 tg3_flag_set(tp, NO_FWARE_REPORTED);
1208 DBGC(tp->dev, "No firmware running\n");
1211 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1212 /* The 57765 A0 needs a little more
1213 * time to do some important work.
1221 static int tg3_nvram_lock(struct tg3 *tp)
1222 { DBGP("%s\n", __func__);
1224 if (tg3_flag(tp, NVRAM)) {
1227 if (tp->nvram_lock_cnt == 0) {
1228 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
1229 for (i = 0; i < 8000; i++) {
1230 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
1235 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
1239 tp->nvram_lock_cnt++;
1244 static void tg3_nvram_unlock(struct tg3 *tp)
1245 { DBGP("%s\n", __func__);
1247 if (tg3_flag(tp, NVRAM)) {
1248 if (tp->nvram_lock_cnt > 0)
1249 tp->nvram_lock_cnt--;
1250 if (tp->nvram_lock_cnt == 0)
1251 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
1255 static int tg3_chip_reset(struct tg3 *tp)
1256 { DBGP("%s\n", __func__);
1264 /* No matching tg3_nvram_unlock() after this because
1265 * chip reset below will undo the nvram lock.
1267 tp->nvram_lock_cnt = 0;
1269 /* GRC_MISC_CFG core clock reset will clear the memory
1270 * enable bit in PCI register 4 and the MSI enable bit
1271 * on some chips, so we save relevant registers here.
1273 tg3_save_pci_state(tp);
1275 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
1276 tg3_flag(tp, 5755_PLUS))
1277 tw32(GRC_FASTBOOT_PC, 0);
1281 * We must avoid the readl() that normally takes place.
1282 * It locks machines, causes machine checks, and other
1283 * fun things. So, temporarily disable the 5701
1284 * hardware workaround, while we do the reset.
1286 write_op = tp->write32;
1287 if (write_op == tg3_write_flush_reg32)
1288 tp->write32 = tg3_write32;
1291 /* Prevent the irq handler from reading or writing PCI registers
1292 * during chip reset when the memory enable bit in the PCI command
1293 * register may be cleared. The chip does not generate interrupt
1294 * at this time, but the irq handler may still be called due to irq
1295 * sharing or irqpoll.
1297 tg3_flag_set(tp, CHIP_RESETTING);
1299 if (tp->hw_status) {
1300 tp->hw_status->status = 0;
1301 tp->hw_status->status_tag = 0;
1304 tp->last_irq_tag = 0;
1308 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
1309 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
1310 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
1314 val = GRC_MISC_CFG_CORECLK_RESET;
1316 if (tg3_flag(tp, PCI_EXPRESS)) {
1317 /* Force PCIe 1.0a mode */
1318 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
1319 !tg3_flag(tp, 57765_PLUS) &&
1320 tr32(TG3_PCIE_PHY_TSTCTL) ==
1321 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
1322 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
1324 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
1325 tw32(GRC_MISC_CFG, (1 << 29));
1330 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1331 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
1332 tw32(GRC_VCPU_EXT_CTRL,
1333 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
1336 /* Manage gphy power for all CPMU absent PCIe devices. */
1337 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
1338 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
1340 tw32(GRC_MISC_CFG, val);
1342 /* Unfortunately, we have to delay before the PCI read back.
1343 * Some 575X chips even will not respond to a PCI cfg access
1344 * when the reset command is given to the chip.
1346 * How do these hardware designers expect things to work
1347 * properly if the PCI write is posted for a long period
1348 * of time? It is always necessary to have some method by
1349 * which a register read back can occur to push the write
1350 * out which does the reset.
1352 * For most tg3 variants the trick below was working.
1357 /* Flush PCI posted writes. The normal MMIO registers
1358 * are inaccessible at this time so this is the only
1359 * way to make this reliably (actually, this is no longer
1360 * the case, see above). I tried to use indirect
1361 * register read/write but this upset some 5701 variants.
1363 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
1367 if (tg3_flag(tp, PCI_EXPRESS) && tp->pcie_cap) {
1370 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
1374 /* Wait for link training to complete. */
1375 for (i = 0; i < 5000; i++)
1378 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
1379 pci_write_config_dword(tp->pdev, 0xc4,
1380 cfg_val | (1 << 15));
1383 /* Clear the "no snoop" and "relaxed ordering" bits. */
1384 pci_read_config_word(tp->pdev,
1385 tp->pcie_cap + PCI_EXP_DEVCTL,
1387 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
1388 PCI_EXP_DEVCTL_NOSNOOP_EN);
1390 * Older PCIe devices only support the 128 byte
1391 * MPS setting. Enforce the restriction.
1393 if (!tg3_flag(tp, CPMU_PRESENT))
1394 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
1395 pci_write_config_word(tp->pdev,
1396 tp->pcie_cap + PCI_EXP_DEVCTL,
1399 /* Clear error status */
1400 pci_write_config_word(tp->pdev,
1401 tp->pcie_cap + PCI_EXP_DEVSTA,
1402 PCI_EXP_DEVSTA_CED |
1403 PCI_EXP_DEVSTA_NFED |
1404 PCI_EXP_DEVSTA_FED |
1405 PCI_EXP_DEVSTA_URD);
1408 tg3_restore_pci_state(tp);
1410 tg3_flag_clear(tp, CHIP_RESETTING);
1411 tg3_flag_clear(tp, ERROR_PROCESSED);
1414 if (tg3_flag(tp, 5780_CLASS))
1415 val = tr32(MEMARB_MODE);
1416 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
1418 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
1420 tw32(0x5000, 0x400);
1423 tw32(GRC_MODE, tp->grc_mode);
1425 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
1428 tw32(0xc4, val | (1 << 15));
1431 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
1432 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1433 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
1434 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
1435 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
1436 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
1439 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
1440 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
1442 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
1443 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1448 tw32_f(MAC_MODE, val);
1451 err = tg3_poll_fw(tp);
1455 if (tg3_flag(tp, PCI_EXPRESS) &&
1456 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
1457 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
1458 !tg3_flag(tp, 57765_PLUS)) {
1461 tw32(0x7c00, val | (1 << 25));
1464 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
1465 val = tr32(TG3_CPMU_CLCK_ORIDE);
1466 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
1469 if (tg3_flag(tp, CPMU_PRESENT)) {
1470 tw32(TG3_CPMU_D0_CLCK_POLICY, 0);
1471 val = tr32(TG3_CPMU_CLCK_ORIDE_EN);
1472 tw32(TG3_CPMU_CLCK_ORIDE_EN,
1473 val | CPMU_CLCK_ORIDE_MAC_CLCK_ORIDE_EN);
1479 int tg3_halt(struct tg3 *tp)
1480 { DBGP("%s\n", __func__);
1486 tg3_write_sig_pre_reset(tp);
1489 err = tg3_chip_reset(tp);
1491 __tg3_set_mac_addr(tp, 0);
1499 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
1500 u32 offset, u32 *val)
1501 { DBGP("%s\n", __func__);
1506 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
1509 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
1510 EEPROM_ADDR_DEVID_MASK |
1512 tw32(GRC_EEPROM_ADDR,
1514 (0 << EEPROM_ADDR_DEVID_SHIFT) |
1515 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
1516 EEPROM_ADDR_ADDR_MASK) |
1517 EEPROM_ADDR_READ | EEPROM_ADDR_START);
1519 for (i = 0; i < 1000; i++) {
1520 tmp = tr32(GRC_EEPROM_ADDR);
1522 if (tmp & EEPROM_ADDR_COMPLETE)
1526 if (!(tmp & EEPROM_ADDR_COMPLETE))
1529 tmp = tr32(GRC_EEPROM_DATA);
1532 * The data will always be opposite the native endian
1533 * format. Perform a blind byteswap to compensate.
1535 *val = bswap_32(tmp);
1540 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
1541 { DBGP("%s\n", __func__);
1543 if (tg3_flag(tp, NVRAM) &&
1544 tg3_flag(tp, NVRAM_BUFFERED) &&
1545 tg3_flag(tp, FLASH) &&
1546 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
1547 (tp->nvram_jedecnum == JEDEC_ATMEL))
1549 addr = ((addr / tp->nvram_pagesize) <<
1550 ATMEL_AT45DB0X1B_PAGE_POS) +
1551 (addr % tp->nvram_pagesize);
1556 static void tg3_enable_nvram_access(struct tg3 *tp)
1557 { DBGP("%s\n", __func__);
1559 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
1560 u32 nvaccess = tr32(NVRAM_ACCESS);
1562 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
1566 static void tg3_disable_nvram_access(struct tg3 *tp)
1567 { DBGP("%s\n", __func__);
1569 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
1570 u32 nvaccess = tr32(NVRAM_ACCESS);
1572 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
1576 #define NVRAM_CMD_TIMEOUT 10000
1578 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
1579 { DBGP("%s\n", __func__);
1583 tw32(NVRAM_CMD, nvram_cmd);
1584 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
1586 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
1592 if (i == NVRAM_CMD_TIMEOUT)
1598 /* NOTE: Data read in from NVRAM is byteswapped according to
1599 * the byteswapping settings for all other register accesses.
1600 * tg3 devices are BE devices, so on a BE machine, the data
1601 * returned will be exactly as it is seen in NVRAM. On a LE
1602 * machine, the 32-bit value will be byteswapped.
1604 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
1605 { DBGP("%s\n", __func__);
1609 if (!tg3_flag(tp, NVRAM))
1610 return tg3_nvram_read_using_eeprom(tp, offset, val);
1612 offset = tg3_nvram_phys_addr(tp, offset);
1614 if (offset > NVRAM_ADDR_MSK)
1617 ret = tg3_nvram_lock(tp);
1621 tg3_enable_nvram_access(tp);
1623 tw32(NVRAM_ADDR, offset);
1624 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
1625 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
1628 *val = tr32(NVRAM_RDDATA);
1630 tg3_disable_nvram_access(tp);
1632 tg3_nvram_unlock(tp);
1637 /* Ensures NVRAM data is in bytestream format. */
1638 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, u32 *val)
1639 { DBGP("%s\n", __func__);
1642 int res = tg3_nvram_read(tp, offset, &v);
1644 *val = cpu_to_be32(v);
1648 int tg3_get_device_address(struct tg3 *tp)
1649 { DBGP("%s\n", __func__);
1651 struct net_device *dev = tp->dev;
1652 u32 hi, lo, mac_offset;
1656 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1657 tg3_flag(tp, 5780_CLASS)) {
1658 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
1660 if (tg3_nvram_lock(tp))
1661 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
1663 tg3_nvram_unlock(tp);
1664 } else if (tg3_flag(tp, 5717_PLUS)) {
1665 if (PCI_FUNC(tp->pdev->busdevfn) & 1)
1667 if (PCI_FUNC(tp->pdev->busdevfn) > 1)
1668 mac_offset += 0x18c;
1669 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
1672 /* First try to get it from MAC address mailbox. */
1673 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
1674 if ((hi >> 16) == 0x484b) {
1675 dev->hw_addr[0] = (hi >> 8) & 0xff;
1676 dev->hw_addr[1] = (hi >> 0) & 0xff;
1678 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
1679 dev->hw_addr[2] = (lo >> 24) & 0xff;
1680 dev->hw_addr[3] = (lo >> 16) & 0xff;
1681 dev->hw_addr[4] = (lo >> 8) & 0xff;
1682 dev->hw_addr[5] = (lo >> 0) & 0xff;
1684 /* Some old bootcode may report a 0 MAC address in SRAM */
1685 addr_ok = is_valid_ether_addr(&dev->hw_addr[0]);
1688 /* Next, try NVRAM. */
1689 if (!tg3_flag(tp, NO_NVRAM) &&
1690 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
1691 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
1692 memcpy(&dev->hw_addr[0], ((char *)&hi) + 2, 2);
1693 memcpy(&dev->hw_addr[2], (char *)&lo, sizeof(lo));
1695 /* Finally just fetch it out of the MAC control regs. */
1697 hi = tr32(MAC_ADDR_0_HIGH);
1698 lo = tr32(MAC_ADDR_0_LOW);
1700 dev->hw_addr[5] = lo & 0xff;
1701 dev->hw_addr[4] = (lo >> 8) & 0xff;
1702 dev->hw_addr[3] = (lo >> 16) & 0xff;
1703 dev->hw_addr[2] = (lo >> 24) & 0xff;
1704 dev->hw_addr[1] = hi & 0xff;
1705 dev->hw_addr[0] = (hi >> 8) & 0xff;
1709 if (!is_valid_ether_addr(&dev->hw_addr[0])) {
1716 static void __tg3_set_rx_mode(struct net_device *dev)
1717 { DBGP("%s\n", __func__);
1719 struct tg3 *tp = netdev_priv(dev);
1722 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
1723 RX_MODE_KEEP_VLAN_TAG);
1725 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
1727 /* Accept all multicast. */
1728 tw32(MAC_HASH_REG_0, 0xffffffff);
1729 tw32(MAC_HASH_REG_1, 0xffffffff);
1730 tw32(MAC_HASH_REG_2, 0xffffffff);
1731 tw32(MAC_HASH_REG_3, 0xffffffff);
1733 if (rx_mode != tp->rx_mode) {
1734 tp->rx_mode = rx_mode;
1735 tw32_f(MAC_RX_MODE, rx_mode);
1740 static void __tg3_set_coalesce(struct tg3 *tp)
1741 { DBGP("%s\n", __func__);
1744 tw32(HOSTCC_RXCOL_TICKS, 0);
1745 tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
1746 tw32(HOSTCC_RXMAX_FRAMES, 1);
1747 /* FIXME: mix between TXMAX and RXMAX taken from legacy driver */
1748 tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
1749 tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
1750 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
1752 if (!tg3_flag(tp, 5705_PLUS)) {
1753 u32 val = DEFAULT_STAT_COAL_TICKS;
1755 tw32(HOSTCC_RXCOAL_TICK_INT, DEFAULT_RXCOAL_TICK_INT);
1756 tw32(HOSTCC_TXCOAL_TICK_INT, DEFAULT_TXCOAL_TICK_INT);
1758 if (!netdev_link_ok(tp->dev))
1761 tw32(HOSTCC_STAT_COAL_TICKS, val);
1765 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
1766 dma_addr_t mapping, u32 maxlen_flags,
1768 { DBGP("%s\n", __func__);
1771 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
1772 ((u64) mapping >> 32));
1774 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
1775 ((u64) mapping & 0xffffffff));
1777 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
1780 if (!tg3_flag(tp, 5705_PLUS))
1782 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
1786 static void tg3_rings_reset(struct tg3 *tp)
1787 { DBGP("%s\n", __func__);
1790 u32 txrcb, rxrcb, limit;
1792 /* Disable all transmit rings but the first. */
1793 if (!tg3_flag(tp, 5705_PLUS))
1794 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
1795 else if (tg3_flag(tp, 5717_PLUS))
1796 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
1797 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
1798 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
1800 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
1802 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
1803 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
1804 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
1805 BDINFO_FLAGS_DISABLED);
1808 /* Disable all receive return rings but the first. */
1809 if (tg3_flag(tp, 5717_PLUS))
1810 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
1811 else if (!tg3_flag(tp, 5705_PLUS))
1812 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
1813 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
1814 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
1815 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
1817 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
1819 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
1820 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
1821 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
1822 BDINFO_FLAGS_DISABLED);
1824 /* Disable interrupts */
1825 tw32_mailbox_f(tp->int_mbox, 1);
1829 tw32_mailbox(tp->prodmbox, 0);
1830 tw32_rx_mbox(tp->consmbox, 0);
1832 /* Make sure the NIC-based send BD rings are disabled. */
1833 if (!tg3_flag(tp, 5705_PLUS)) {
1834 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
1835 for (i = 0; i < 16; i++)
1836 tw32_tx_mbox(mbox + i * 8, 0);
1839 txrcb = NIC_SRAM_SEND_RCB;
1840 rxrcb = NIC_SRAM_RCV_RET_RCB;
1842 /* Clear status block in ram. */
1843 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
1845 /* Set status block DMA address */
1846 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
1847 ((u64) tp->status_mapping >> 32));
1848 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
1849 ((u64) tp->status_mapping & 0xffffffff));
1852 tg3_set_bdinfo(tp, txrcb, tp->tx_desc_mapping,
1853 (TG3_TX_RING_SIZE <<
1854 BDINFO_FLAGS_MAXLEN_SHIFT),
1855 NIC_SRAM_TX_BUFFER_DESC);
1856 txrcb += TG3_BDINFO_SIZE;
1859 /* FIXME: will TG3_RX_RET_MAX_SIZE_5705 work on all cards? */
1861 tg3_set_bdinfo(tp, rxrcb, tp->rx_rcb_mapping,
1862 TG3_RX_RET_MAX_SIZE_5705 <<
1863 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
1864 rxrcb += TG3_BDINFO_SIZE;
1868 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
1869 { DBGP("%s\n", __func__);
1871 u32 val, bdcache_maxcnt;
1873 if (!tg3_flag(tp, 5750_PLUS) ||
1874 tg3_flag(tp, 5780_CLASS) ||
1875 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
1876 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
1877 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
1878 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
1879 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
1880 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
1882 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
1885 /* NOTE: legacy driver uses RX_PENDING / 8, we only use 4 descriptors
1886 * for now, use / 4 so the result is > 0
1888 val = TG3_DEF_RX_RING_PENDING / 4;
1889 tw32(RCVBDI_STD_THRESH, val);
1891 if (tg3_flag(tp, 57765_PLUS))
1892 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
1895 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
1896 { DBGP("%s\n", __func__);
1898 u32 val, rdmac_mode;
1900 struct tg3_rx_prodring_set *tpr = &tp->prodring;
1904 tg3_write_sig_pre_reset(tp);
1906 if (tg3_flag(tp, INIT_COMPLETE))
1912 err = tg3_chip_reset(tp);
1916 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
1917 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
1918 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
1919 PCIE_PWR_MGMT_L1_THRESH_4MS;
1920 tw32(PCIE_PWR_MGMT_THRESH, val);
1922 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
1923 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
1925 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
1927 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
1928 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
1931 if (tg3_flag(tp, L1PLLPD_EN)) {
1932 u32 grc_mode = tr32(GRC_MODE);
1934 /* Access the lower 1K of PL PCIE block registers. */
1935 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
1936 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
1938 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
1939 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
1940 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
1942 tw32(GRC_MODE, grc_mode);
1945 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
1946 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1947 u32 grc_mode = tr32(GRC_MODE);
1949 /* Access the lower 1K of PL PCIE block registers. */
1950 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
1951 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
1953 val = tr32(TG3_PCIE_TLDLPL_PORT +
1954 TG3_PCIE_PL_LO_PHYCTL5);
1955 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
1956 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
1958 tw32(GRC_MODE, grc_mode);
1961 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
1962 u32 grc_mode = tr32(GRC_MODE);
1964 /* Access the lower 1K of DL PCIE block registers. */
1965 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
1966 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
1968 val = tr32(TG3_PCIE_TLDLPL_PORT +
1969 TG3_PCIE_DL_LO_FTSMAX);
1970 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
1971 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
1972 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
1974 tw32(GRC_MODE, grc_mode);
1977 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
1978 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
1979 val |= CPMU_LSPD_10MB_MACCLK_6_25;
1980 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
1983 /* This works around an issue with Athlon chipsets on
1984 * B3 tigon3 silicon. This bit has no effect on any
1985 * other revision. But do not set this on PCI Express
1986 * chips and don't even touch the clocks if the CPMU is present.
1988 if (!tg3_flag(tp, CPMU_PRESENT)) {
1989 if (!tg3_flag(tp, PCI_EXPRESS))
1990 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
1991 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
1994 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
1995 tg3_flag(tp, PCIX_MODE)) {
1996 val = tr32(TG3PCI_PCISTATE);
1997 val |= PCISTATE_RETRY_SAME_DMA;
1998 tw32(TG3PCI_PCISTATE, val);
2001 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
2002 /* Enable some hw fixes. */
2003 val = tr32(TG3PCI_MSI_DATA);
2004 val |= (1 << 26) | (1 << 28) | (1 << 29);
2005 tw32(TG3PCI_MSI_DATA, val);
2008 /* Descriptor ring init may make accesses to the
2009 * NIC SRAM area to setup the TX descriptors, so we
2010 * can only do this after the hardware has been
2011 * successfully reset.
2013 err = tg3_init_rings(tp);
2017 if (tg3_flag(tp, 57765_PLUS)) {
2018 val = tr32(TG3PCI_DMA_RW_CTRL) &
2019 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
2020 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
2021 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
2022 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
2023 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
2024 val |= DMA_RWCTRL_TAGGED_STAT_WA;
2025 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
2026 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
2027 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
2028 /* This value is determined during the probe time DMA
2029 * engine test, tg3_test_dma.
2031 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
2034 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
2035 GRC_MODE_4X_NIC_SEND_RINGS |
2036 GRC_MODE_NO_TX_PHDR_CSUM |
2037 GRC_MODE_NO_RX_PHDR_CSUM);
2038 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
2039 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
2041 /* Pseudo-header checksum is done by hardware logic and not
2042 * the offload processers, so make the chip do the pseudo-
2043 * header checksums on receive. For transmit it is more
2044 * convenient to do the pseudo-header checksum in software
2045 * as Linux does that on transmit for us in all cases.
2047 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
2051 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
2053 /* Setup the timer prescalar register. Clock is always 66Mhz. */
2054 val = tr32(GRC_MISC_CFG);
2056 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
2057 tw32(GRC_MISC_CFG, val);
2059 /* Initialize MBUF/DESC pool. */
2060 if (tg3_flag(tp, 5750_PLUS)) {
2062 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
2063 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
2064 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
2065 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
2067 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
2068 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
2069 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
2072 tw32(BUFMGR_MB_RDMA_LOW_WATER,
2073 tp->bufmgr_config.mbuf_read_dma_low_water);
2074 tw32(BUFMGR_MB_MACRX_LOW_WATER,
2075 tp->bufmgr_config.mbuf_mac_rx_low_water);
2076 tw32(BUFMGR_MB_HIGH_WATER,
2077 tp->bufmgr_config.mbuf_high_water);
2079 tw32(BUFMGR_DMA_LOW_WATER,
2080 tp->bufmgr_config.dma_low_water);
2081 tw32(BUFMGR_DMA_HIGH_WATER,
2082 tp->bufmgr_config.dma_high_water);
2084 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
2085 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2086 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
2087 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2088 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
2089 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
2090 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
2091 tw32(BUFMGR_MODE, val);
2092 for (i = 0; i < 2000; i++) {
2093 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
2098 DBGC(tp->dev, "%s cannot enable BUFMGR\n", __func__);
2102 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
2103 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
2105 tg3_setup_rxbd_thresholds(tp);
2107 /* Initialize TG3_BDINFO's at:
2108 * RCVDBDI_STD_BD: standard eth size rx ring
2109 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
2110 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
2113 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
2114 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
2115 * ring attribute flags
2116 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
2118 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
2119 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
2121 * The size of each ring is fixed in the firmware, but the location is
2124 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
2125 ((u64) tpr->rx_std_mapping >> 32));
2126 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
2127 ((u64) tpr->rx_std_mapping & 0xffffffff));
2128 if (!tg3_flag(tp, 5717_PLUS))
2129 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
2130 NIC_SRAM_RX_BUFFER_DESC);
2132 /* Disable the mini ring */
2133 if (!tg3_flag(tp, 5705_PLUS))
2134 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
2135 BDINFO_FLAGS_DISABLED);
2137 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
2139 if (tg3_flag(tp, 57765_PLUS))
2140 val |= (RX_STD_MAX_SIZE << 2);
2142 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
2144 tpr->rx_std_prod_idx = 0;
2146 /* std prod index is updated by tg3_refill_prod_ring() */
2147 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 0);
2148 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, 0);
2150 tg3_rings_reset(tp);
2152 __tg3_set_mac_addr(tp,0);
2154 #define TG3_MAX_MTU 1522
2155 /* MTU + ethernet header + FCS + optional VLAN tag */
2156 tw32(MAC_RX_MTU_SIZE, TG3_MAX_MTU);
2158 /* The slot time is changed by tg3_setup_phy if we
2159 * run at gigabit with half duplex.
2161 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2162 (6 << TX_LENGTHS_IPG_SHIFT) |
2163 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
2165 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
2166 val |= tr32(MAC_TX_LENGTHS) &
2167 (TX_LENGTHS_JMB_FRM_LEN_MSK |
2168 TX_LENGTHS_CNT_DWN_VAL_MSK);
2170 tw32(MAC_TX_LENGTHS, val);
2172 /* Receive rules. */
2173 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
2174 tw32(RCVLPC_CONFIG, 0x0181);
2176 /* Calculate RDMAC_MODE setting early, we need it to determine
2177 * the RCVLPC_STATE_ENABLE mask.
2179 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
2180 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
2181 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
2182 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
2183 RDMAC_MODE_LNGREAD_ENAB);
2185 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
2186 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
2188 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
2189 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
2190 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
2191 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
2192 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
2193 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
2195 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
2196 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
2197 if (tg3_flag(tp, TSO_CAPABLE) &&
2198 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2199 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
2200 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
2201 !tg3_flag(tp, IS_5788)) {
2202 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
2206 if (tg3_flag(tp, PCI_EXPRESS))
2207 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
2209 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
2210 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
2212 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
2213 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
2214 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
2215 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
2216 tg3_flag(tp, 57765_PLUS)) {
2217 val = tr32(TG3_RDMA_RSRVCTRL_REG);
2218 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2219 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2220 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
2221 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
2222 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
2223 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
2224 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
2225 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
2227 tw32(TG3_RDMA_RSRVCTRL_REG,
2228 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
2231 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2232 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2233 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
2234 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
2235 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
2236 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
2239 /* Receive/send statistics. */
2240 if (tg3_flag(tp, 5750_PLUS)) {
2241 val = tr32(RCVLPC_STATS_ENABLE);
2242 val &= ~RCVLPC_STATSENAB_DACK_FIX;
2243 tw32(RCVLPC_STATS_ENABLE, val);
2244 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
2245 tg3_flag(tp, TSO_CAPABLE)) {
2246 val = tr32(RCVLPC_STATS_ENABLE);
2247 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
2248 tw32(RCVLPC_STATS_ENABLE, val);
2250 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
2252 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
2253 tw32(SNDDATAI_STATSENAB, 0xffffff);
2254 tw32(SNDDATAI_STATSCTRL,
2255 (SNDDATAI_SCTRL_ENABLE |
2256 SNDDATAI_SCTRL_FASTUPD));
2258 /* Setup host coalescing engine. */
2259 tw32(HOSTCC_MODE, 0);
2260 for (i = 0; i < 2000; i++) {
2261 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
2266 __tg3_set_coalesce(tp);
2268 if (!tg3_flag(tp, 5705_PLUS)) {
2269 /* Status/statistics block address. See tg3_timer,
2270 * the tg3_periodic_fetch_stats call there, and
2271 * tg3_get_stats to see how this works for 5705/5750 chips.
2272 * NOTE: stats block removed for iPXE
2274 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
2276 /* Clear statistics and status block memory areas */
2277 for (i = NIC_SRAM_STATS_BLK;
2278 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
2280 tg3_write_mem(tp, i, 0);
2285 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
2287 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
2288 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
2289 if (!tg3_flag(tp, 5705_PLUS))
2290 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
2292 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
2293 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
2294 /* reset to prevent losing 1st rx packet intermittently */
2295 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
2299 if (tg3_flag(tp, ENABLE_APE))
2300 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
2303 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
2304 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
2305 if (!tg3_flag(tp, 5705_PLUS) &&
2306 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
2307 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
2308 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2309 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
2312 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
2313 * If TG3_FLAG_IS_NIC is zero, we should read the
2314 * register to preserve the GPIO settings for LOMs. The GPIOs,
2315 * whether used as inputs or outputs, are set by boot code after
2318 if (!tg3_flag(tp, IS_NIC)) {
2321 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
2322 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
2323 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
2325 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
2326 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
2327 GRC_LCLCTRL_GPIO_OUTPUT3;
2329 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
2330 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
2332 tp->grc_local_ctrl &= ~gpio_mask;
2333 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
2335 /* GPIO1 must be driven high for eeprom write protect */
2336 if (tg3_flag(tp, EEPROM_WRITE_PROT))
2337 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
2338 GRC_LCLCTRL_GPIO_OUTPUT1);
2340 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
2343 if (!tg3_flag(tp, 5705_PLUS)) {
2344 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
2348 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
2349 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
2350 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
2351 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
2352 WDMAC_MODE_LNGREAD_ENAB);
2354 /* Enable host coalescing bug fix */
2355 if (tg3_flag(tp, 5755_PLUS))
2356 val |= WDMAC_MODE_STATUS_TAG_FIX;
2358 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
2359 val |= WDMAC_MODE_BURST_ALL_DATA;
2361 tw32_f(WDMAC_MODE, val);
2364 if (tg3_flag(tp, PCIX_MODE)) {
2367 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
2369 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
2370 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
2371 pcix_cmd |= PCI_X_CMD_READ_2K;
2372 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2373 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
2374 pcix_cmd |= PCI_X_CMD_READ_2K;
2376 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
2380 tw32_f(RDMAC_MODE, rdmac_mode);
2383 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
2384 if (!tg3_flag(tp, 5705_PLUS))
2385 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
2387 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
2389 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
2391 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
2393 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
2394 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
2395 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
2396 if (tg3_flag(tp, LRG_PROD_RING_CAP))
2397 val |= RCVDBDI_MODE_LRG_RING_SZ;
2398 tw32(RCVDBDI_MODE, val);
2399 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
2401 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
2402 if (tg3_flag(tp, ENABLE_TSS))
2403 val |= SNDBDI_MODE_MULTI_TXQ_EN;
2404 tw32(SNDBDI_MODE, val);
2405 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
2408 /* FIXME: 5701 firmware fix? */
2410 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
2411 err = tg3_load_5701_a0_firmware_fix(tp);
2417 tp->tx_mode = TX_MODE_ENABLE;
2419 if (tg3_flag(tp, 5755_PLUS) ||
2420 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
2421 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
2423 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2424 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
2425 tp->tx_mode &= ~val;
2426 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
2429 tw32_f(MAC_TX_MODE, tp->tx_mode);
2432 tp->rx_mode = RX_MODE_ENABLE;
2434 tw32_f(MAC_RX_MODE, tp->rx_mode);
2437 tw32(MAC_LED_CTRL, tp->led_ctrl);
2439 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2440 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2441 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
2444 tw32_f(MAC_RX_MODE, tp->rx_mode);
2447 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2448 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
2449 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
2450 /* Set drive transmission level to 1.2V */
2451 /* only if the signal pre-emphasis bit is not set */
2452 val = tr32(MAC_SERDES_CFG);
2455 tw32(MAC_SERDES_CFG, val);
2457 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
2458 tw32(MAC_SERDES_CFG, 0x616000);
2461 /* Prevent chip from dropping frames when flow control
2464 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2468 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
2470 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
2471 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2472 /* Use hardware link auto-negotiation */
2473 tg3_flag_set(tp, HW_AUTONEG);
2476 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
2477 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2480 tmp = tr32(SERDES_RX_CTRL);
2481 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
2482 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
2483 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
2484 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
2487 err = tg3_setup_phy(tp, 0);
2491 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
2492 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2495 /* Clear CRC stats. */
2496 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
2497 tg3_writephy(tp, MII_TG3_TEST1,
2498 tmp | MII_TG3_TEST1_CRC_EN);
2499 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
2503 __tg3_set_rx_mode(tp->dev);
2505 /* Initialize receive rules. */
2506 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
2507 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
2508 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
2509 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
2511 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
2515 if (tg3_flag(tp, ENABLE_ASF))
2519 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
2521 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
2523 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
2525 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
2527 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
2529 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
2531 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
2533 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
2535 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
2537 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
2539 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
2541 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
2543 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
2545 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
2556 /* Called at device open time to get the chip ready for
2557 * packet processing. Invoked with tp->lock held.
2559 int tg3_init_hw(struct tg3 *tp, int reset_phy)
2560 { DBGP("%s\n", __func__);
2562 tg3_switch_clocks(tp);
2564 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
2566 return tg3_reset_hw(tp, reset_phy);
2569 void tg3_set_txd(struct tg3 *tp, int entry,
2570 dma_addr_t mapping, int len, u32 flags)
2571 { DBGP("%s\n", __func__);
2573 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
2575 txd->addr_hi = ((u64) mapping >> 32);
2576 txd->addr_lo = ((u64) mapping & 0xffffffff);
2577 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
2581 int tg3_do_test_dma(struct tg3 *tp, u32 __unused *buf, dma_addr_t buf_dma, int size, int to_device)
2582 { DBGP("%s\n", __func__);
2584 struct tg3_internal_buffer_desc test_desc;
2589 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
2591 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
2592 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
2593 tw32(RDMAC_STATUS, 0);
2594 tw32(WDMAC_STATUS, 0);
2596 tw32(BUFMGR_MODE, 0);
2599 test_desc.addr_hi = ((u64) buf_dma) >> 32;
2600 test_desc.addr_lo = buf_dma & 0xffffffff;
2601 test_desc.nic_mbuf = 0x00002100;
2602 test_desc.len = size;
2605 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
2606 * the *second* time the tg3 driver was getting loaded after an
2609 * Broadcom tells me:
2610 * ...the DMA engine is connected to the GRC block and a DMA
2611 * reset may affect the GRC block in some unpredictable way...
2612 * The behavior of resets to individual blocks has not been tested.
2614 * Broadcom noted the GRC reset will also reset all sub-components.
2617 test_desc.cqid_sqid = (13 << 8) | 2;
2619 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
2622 test_desc.cqid_sqid = (16 << 8) | 7;
2624 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
2627 test_desc.flags = 0x00000005;
2629 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
2632 val = *(((u32 *)&test_desc) + i);
2633 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
2634 sram_dma_descs + (i * sizeof(u32)));
2635 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
2637 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
2640 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
2642 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
2645 for (i = 0; i < 40; i++) {
2649 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
2651 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
2652 if ((val & 0xffff) == sram_dma_descs) {