Add qemu 2.4.0
[kvmfornfv.git] / qemu / roms / ipxe / src / drivers / net / tg3 / tg3_hw.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18 FILE_LICENCE ( GPL2_ONLY );
19
20 #include <mii.h>
21 #include <stdio.h>
22 #include <errno.h>
23 #include <unistd.h>
24 #include <byteswap.h>
25 #include <ipxe/pci.h>
26 #include <ipxe/iobuf.h>
27 #include <ipxe/timer.h>
28 #include <ipxe/malloc.h>
29 #include <ipxe/if_ether.h>
30 #include <ipxe/ethernet.h>
31 #include <ipxe/netdevice.h>
32
33 #include "tg3.h"
34
35 #define RESET_KIND_SHUTDOWN     0
36 #define RESET_KIND_INIT         1
37 #define RESET_KIND_SUSPEND      2
38
39 #define TG3_DEF_MAC_MODE        0
40
41 void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
42 {       DBGP("%s\n", __func__);
43
44         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
45         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
46 }
47
48 u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
49 {       DBGP("%s\n", __func__);
50
51         u32 val;
52
53         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
54         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
55         return val;
56 }
57
58 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
59 {       DBGP("%s\n", __func__);
60
61         return readl(tp->regs + off + GRCMBOX_BASE);
62 }
63
64 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
65 {       DBGP("%s\n", __func__);
66
67         writel(val, tp->regs + off + GRCMBOX_BASE);
68 }
69
70 void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
71 {       DBGP("%s\n", __func__);
72
73         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
74                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
75                                        TG3_64BIT_REG_LOW, val);
76                 return;
77         }
78         if (off == TG3_RX_STD_PROD_IDX_REG) {
79                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
80                                        TG3_64BIT_REG_LOW, val);
81                 return;
82         }
83
84         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
85         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
86
87         /* In indirect mode when disabling interrupts, we also need
88          * to clear the interrupt bit in the GRC local ctrl register.
89          */
90         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
91             (val == 0x1)) {
92                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
93                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
94         }
95 }
96
97 u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
98 {       DBGP("%s\n", __func__);
99
100         u32 val;
101
102         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
103         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
104
105         return val;
106 }
107
108 /* usec_wait specifies the wait time in usec when writing to certain registers
109  * where it is unsafe to read back the register without some delay.
110  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
111  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
112  */
113 void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
114 {       DBGP("%s\n", __func__);
115
116         tw32(off, val);
117         if (usec_wait)
118                 udelay(usec_wait);
119         tr32(off);
120
121         /* Wait again after the read for the posted method to guarantee that
122          * the wait time is met.
123          */
124         if (usec_wait)
125                 udelay(usec_wait);
126 }
127
128 /* stolen from legacy etherboot tg3 driver */
129 void tg3_set_power_state_0(struct tg3 *tp)
130 {       DBGP("%s\n", __func__);
131
132         uint16_t power_control;
133         int pm = tp->pm_cap;
134
135         /* Make sure register accesses (indirect or otherwise)
136          * will function correctly.
137          */
138         pci_write_config_dword(tp->pdev,  TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
139
140         pci_read_config_word(tp->pdev, pm + PCI_PM_CTRL, &power_control);
141
142         power_control |= PCI_PM_CTRL_PME_STATUS;
143         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
144         power_control |= 0;
145         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
146
147         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
148
149         return;
150 }
151
152 void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
153 {       DBGP("%s\n", __func__);
154
155         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
156             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
157                 *val = 0;
158                 return;
159         }
160
161         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
162         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
163
164         /* Always leave this as zero. */
165         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
166 }
167
168 #define PCI_VENDOR_ID_ARIMA                0x161f
169
170 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
171 {       DBGP("%s\n", __func__);
172
173         u32 val;
174         u16 pmcsr;
175
176         /* On some early chips the SRAM cannot be accessed in D3hot state,
177          * so need make sure we're in D0.
178          */
179         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
180         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
181         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
182         mdelay(1);
183
184         /* Make sure register accesses (indirect or otherwise)
185          * will function correctly.
186          */
187         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
188                                tp->misc_host_ctrl);
189
190         /* The memory arbiter has to be enabled in order for SRAM accesses
191          * to succeed.  Normally on powerup the tg3 chip firmware will make
192          * sure it is enabled, but other entities such as system netboot
193          * code might disable it.
194          */
195         val = tr32(MEMARB_MODE);
196         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
197
198         tp->phy_id = TG3_PHY_ID_INVALID;
199         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
200
201         /* Assume an onboard device by default.  */
202         tg3_flag_set(tp, EEPROM_WRITE_PROT);
203
204         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
205         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
206                 u32 nic_cfg, led_cfg;
207                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
208                 int eeprom_phy_serdes = 0;
209
210                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
211                 tp->nic_sram_data_cfg = nic_cfg;
212
213                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
214                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
215                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
216                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
217                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
218                     (ver > 0) && (ver < 0x100))
219                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
220
221                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
222                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
223
224                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
225                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
226                         eeprom_phy_serdes = 1;
227
228                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
229                 if (nic_phy_id != 0) {
230                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
231                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
232
233                         eeprom_phy_id  = (id1 >> 16) << 10;
234                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
235                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
236                 } else
237                         eeprom_phy_id = 0;
238
239                 tp->phy_id = eeprom_phy_id;
240                 if (eeprom_phy_serdes) {
241                         if (!tg3_flag(tp, 5705_PLUS))
242                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
243                         else
244                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
245                 }
246
247                 if (tg3_flag(tp, 5750_PLUS))
248                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
249                                     SHASTA_EXT_LED_MODE_MASK);
250                 else
251                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
252
253                 switch (led_cfg) {
254                 default:
255                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
256                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
257                         break;
258
259                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
260                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
261                         break;
262
263                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
264                         tp->led_ctrl = LED_CTRL_MODE_MAC;
265
266                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
267                          * read on some older 5700/5701 bootcode.
268                          */
269                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
270                             ASIC_REV_5700 ||
271                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
272                             ASIC_REV_5701)
273                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
274
275                         break;
276
277                 case SHASTA_EXT_LED_SHARED:
278                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
279                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
280                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
281                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
282                                                  LED_CTRL_MODE_PHY_2);
283                         break;
284
285                 case SHASTA_EXT_LED_MAC:
286                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
287                         break;
288
289                 case SHASTA_EXT_LED_COMBO:
290                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
291                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
292                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
293                                                  LED_CTRL_MODE_PHY_2);
294                         break;
295
296                 }
297
298                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
299                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
300                     tp->subsystem_vendor == PCI_VENDOR_ID_DELL)
301                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
302
303                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
304                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
305
306                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
307                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
308                         if ((tp->subsystem_vendor ==
309                              PCI_VENDOR_ID_ARIMA) &&
310                             (tp->subsystem_device == 0x205a ||
311                              tp->subsystem_device == 0x2063))
312                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
313                 } else {
314                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
315                         tg3_flag_set(tp, IS_NIC);
316                 }
317
318                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
319                         tg3_flag_set(tp, ENABLE_ASF);
320                         if (tg3_flag(tp, 5750_PLUS))
321                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
322                 }
323
324                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
325                     tg3_flag(tp, ENABLE_ASF))
326                         tg3_flag_set(tp, ENABLE_APE);
327
328                 if (cfg2 & (1 << 17))
329                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
330
331                 /* serdes signal pre-emphasis in register 0x590 set by */
332                 /* bootcode if bit 18 is set */
333                 if (cfg2 & (1 << 18))
334                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
335
336                 if ((tg3_flag(tp, 57765_PLUS) ||
337                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
338                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
339                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
340                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
341
342                 if (tg3_flag(tp, PCI_EXPRESS) &&
343                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
344                     !tg3_flag(tp, 57765_PLUS)) {
345                         u32 cfg3;
346
347                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
348                 }
349
350                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
351                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
352                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
353                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
354                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
355                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
356         }
357 }
358
359 static void tg3_switch_clocks(struct tg3 *tp)
360 {       DBGP("%s\n", __func__);
361
362         u32 clock_ctrl;
363         u32 orig_clock_ctrl;
364
365         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
366                 return;
367
368         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
369
370         orig_clock_ctrl = clock_ctrl;
371         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
372                        CLOCK_CTRL_CLKRUN_OENABLE |
373                        0x1f);
374         tp->pci_clock_ctrl = clock_ctrl;
375
376         if (tg3_flag(tp, 5705_PLUS)) {
377                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
378                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
379                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
380                 }
381         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
382                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
383                             clock_ctrl |
384                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
385                             40);
386                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
387                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
388                             40);
389         }
390         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
391 }
392
393 int tg3_get_invariants(struct tg3 *tp)
394 {       DBGP("%s\n", __func__);
395
396         u32 misc_ctrl_reg;
397         u32 pci_state_reg, grc_misc_cfg;
398         u32 val;
399         u16 pci_cmd;
400         int err;
401
402         /* Force memory write invalidate off.  If we leave it on,
403          * then on 5700_BX chips we have to enable a workaround.
404          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
405          * to match the cacheline size.  The Broadcom driver have this
406          * workaround but turns MWI off all the times so never uses
407          * it.  This seems to suggest that the workaround is insufficient.
408          */
409         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
410         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
411         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
412
413         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
414          * has the register indirect write enable bit set before
415          * we try to access any of the MMIO registers.  It is also
416          * critical that the PCI-X hw workaround situation is decided
417          * before that as well.
418          */
419         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
420                               &misc_ctrl_reg);
421
422         tp->pci_chip_rev_id = (misc_ctrl_reg >>
423                                MISC_HOST_CTRL_CHIPREV_SHIFT);
424         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
425                 u32 prod_id_asic_rev;
426
427                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
428                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
429                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
430                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
431                         pci_read_config_dword(tp->pdev,
432                                               TG3PCI_GEN2_PRODID_ASICREV,
433                                               &prod_id_asic_rev);
434                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
435                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
436                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
437                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
438                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
439                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
440                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
441                         pci_read_config_dword(tp->pdev,
442                                               TG3PCI_GEN15_PRODID_ASICREV,
443                                               &prod_id_asic_rev);
444                 else
445                         pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
446                                               &prod_id_asic_rev);
447
448                 tp->pci_chip_rev_id = prod_id_asic_rev;
449         }
450
451         /* Wrong chip ID in 5752 A0. This code can be removed later
452          * as A0 is not in production.
453          */
454         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
455                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
456
457         /* Initialize misc host control in PCI block. */
458         tp->misc_host_ctrl |= (misc_ctrl_reg &
459                                MISC_HOST_CTRL_CHIPREV);
460         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
461                                tp->misc_host_ctrl);
462
463         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
464             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
465             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
466                 tg3_flag_set(tp, 5717_PLUS);
467
468         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
469             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766 ||
470             tg3_flag(tp, 5717_PLUS))
471                 tg3_flag_set(tp, 57765_PLUS);
472
473         /* Intentionally exclude ASIC_REV_5906 */
474         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
475             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
476             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
477             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
478             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
479             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
480             tg3_flag(tp, 57765_PLUS))
481                 tg3_flag_set(tp, 5755_PLUS);
482
483         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
484             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
485             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
486             tg3_flag(tp, 5755_PLUS) ||
487             tg3_flag(tp, 5780_CLASS))
488                 tg3_flag_set(tp, 5750_PLUS);
489
490         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
491             tg3_flag(tp, 5750_PLUS))
492                 tg3_flag_set(tp, 5705_PLUS);
493
494         if (tg3_flag(tp, 5717_PLUS))
495                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
496
497         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
498                               &pci_state_reg);
499
500         tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
501         if (tp->pcie_cap != 0) {
502                 u16 lnkctl;
503
504                 tg3_flag_set(tp, PCI_EXPRESS);
505
506                 pci_read_config_word(tp->pdev,
507                                      tp->pcie_cap + PCI_EXP_LNKCTL,
508                                      &lnkctl);
509                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
510                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
511                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
512                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
513                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
514                                 tg3_flag_set(tp, CLKREQ_BUG);
515                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
516                         tg3_flag_set(tp, L1PLLPD_EN);
517                 }
518         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
519                 tg3_flag_set(tp, PCI_EXPRESS);
520         } else if (!tg3_flag(tp, 5705_PLUS) ||
521                    tg3_flag(tp, 5780_CLASS)) {
522                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
523                 if (!tp->pcix_cap) {
524                         DBGC(&tp->pdev->dev,
525                                 "Cannot find PCI-X capability, aborting\n");
526                         return -EIO;
527                 }
528
529                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
530                         tg3_flag_set(tp, PCIX_MODE);
531         }
532
533         /* If we have an AMD 762 or VIA K8T800 chipset, write
534          * reordering to the mailbox registers done by the host
535          * controller can cause major troubles.  We read back from
536          * every mailbox register write to force the writes to be
537          * posted to the chip in order.
538          */
539
540         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
541                              &tp->pci_cacheline_sz);
542         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
543                              &tp->pci_lat_timer);
544         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
545             tp->pci_lat_timer < 64) {
546                 tp->pci_lat_timer = 64;
547                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
548                                       tp->pci_lat_timer);
549         }
550
551         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
552                 /* 5700 BX chips need to have their TX producer index
553                  * mailboxes written twice to workaround a bug.
554                  */
555                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
556
557                 /* If we are in PCI-X mode, enable register write workaround.
558                  *
559                  * The workaround is to use indirect register accesses
560                  * for all chip writes not to mailbox registers.
561                  */
562                 if (tg3_flag(tp, PCIX_MODE)) {
563                         u32 pm_reg;
564
565                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
566
567                         /* The chip can have it's power management PCI config
568                          * space registers clobbered due to this bug.
569                          * So explicitly force the chip into D0 here.
570                          */
571                         pci_read_config_dword(tp->pdev,
572                                               tp->pm_cap + PCI_PM_CTRL,
573                                               &pm_reg);
574                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
575                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
576                         pci_write_config_dword(tp->pdev,
577                                                tp->pm_cap + PCI_PM_CTRL,
578                                                pm_reg);
579
580                         /* Also, force SERR#/PERR# in PCI command. */
581                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
582                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
583                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
584                 }
585         }
586
587         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
588                 tg3_flag_set(tp, PCI_HIGH_SPEED);
589         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
590                 tg3_flag_set(tp, PCI_32BIT);
591
592         /* Chip-specific fixup from Broadcom driver */
593         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
594             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
595                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
596                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
597         }
598
599         tp->write32_mbox = tg3_write_indirect_reg32;
600         tp->write32_rx_mbox = tg3_write_indirect_mbox;
601         tp->write32_tx_mbox = tg3_write_indirect_mbox;
602         tp->read32_mbox = tg3_read_indirect_mbox;
603
604         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
605                 tp->read32_mbox = tg3_read32_mbox_5906;
606                 tp->write32_mbox = tg3_write32_mbox_5906;
607                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
608                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
609         }
610
611         /* Get eeprom hw config before calling tg3_set_power_state().
612          * In particular, the TG3_FLAG_IS_NIC flag must be
613          * determined before calling tg3_set_power_state() so that
614          * we know whether or not to switch out of Vaux power.
615          * When the flag is set, it means that GPIO1 is used for eeprom
616          * write protect and also implies that it is a LOM where GPIOs
617          * are not used to switch power.
618          */
619         tg3_get_eeprom_hw_cfg(tp);
620
621         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
622             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
623             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
624             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
625             tg3_flag(tp, 57765_PLUS))
626                 tg3_flag_set(tp, CPMU_PRESENT);
627
628         /* Set up tp->grc_local_ctrl before calling tg3_power_up().
629          * GPIO1 driven high will bring 5700's external PHY out of reset.
630          * It is also used as eeprom write protect on LOMs.
631          */
632         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
633         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
634             tg3_flag(tp, EEPROM_WRITE_PROT))
635                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
636                                        GRC_LCLCTRL_GPIO_OUTPUT1);
637         /* Unused GPIO3 must be driven as output on 5752 because there
638          * are no pull-up resistors on unused GPIO pins.
639          */
640         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
641                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
642
643         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
644             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
645             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
646                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
647
648         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
649             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
650                 /* Turn off the debug UART. */
651                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
652                 if (tg3_flag(tp, IS_NIC))
653                         /* Keep VMain power. */
654                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
655                                               GRC_LCLCTRL_GPIO_OUTPUT0;
656         }
657
658         /* Force the chip into D0. */
659         tg3_set_power_state_0(tp);
660
661         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
662                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
663
664         /* A few boards don't want Ethernet@WireSpeed phy feature */
665         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
666             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
667              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
668              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
669             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
670             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
671                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
672
673         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
674             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
675                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
676         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
677                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
678
679         if (tg3_flag(tp, 5705_PLUS) &&
680             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
681             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
682             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
683             !tg3_flag(tp, 57765_PLUS)) {
684                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
685                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
686                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
687                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
688                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
689                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
690                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
691                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
692                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
693                 } else
694                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
695         }
696
697         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
698             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
699                 tp->phy_otp = tg3_read_otp_phycfg(tp);
700                 if (tp->phy_otp == 0)
701                         tp->phy_otp = TG3_OTP_DEFAULT;
702         }
703
704         if (tg3_flag(tp, CPMU_PRESENT))
705                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
706         else
707                 tp->mi_mode = MAC_MI_MODE_BASE;
708
709         tp->coalesce_mode = 0;
710         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
711             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
712                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
713
714         /* Set these bits to enable statistics workaround. */
715         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
716             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
717             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
718                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
719                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
720         }
721
722         tg3_mdio_init(tp);
723
724         /* Initialize data/descriptor byte/word swapping. */
725         val = tr32(GRC_MODE);
726         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
727                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
728                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
729                         GRC_MODE_B2HRX_ENABLE |
730                         GRC_MODE_HTX2B_ENABLE |
731                         GRC_MODE_HOST_STACKUP);
732         else
733                 val &= GRC_MODE_HOST_STACKUP;
734
735         tw32(GRC_MODE, val | tp->grc_mode);
736
737         tg3_switch_clocks(tp);
738
739         /* Clear this out for sanity. */
740         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
741
742         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
743                               &pci_state_reg);
744         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
745             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
746                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
747
748                 if (chiprevid == CHIPREV_ID_5701_A0 ||
749                     chiprevid == CHIPREV_ID_5701_B0 ||
750                     chiprevid == CHIPREV_ID_5701_B2 ||
751                     chiprevid == CHIPREV_ID_5701_B5) {
752                         void *sram_base;
753
754                         /* Write some dummy words into the SRAM status block
755                          * area, see if it reads back correctly.  If the return
756                          * value is bad, force enable the PCIX workaround.
757                          */
758                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
759
760                         writel(0x00000000, sram_base);
761                         writel(0x00000000, sram_base + 4);
762                         writel(0xffffffff, sram_base + 4);
763                         if (readl(sram_base) != 0x00000000)
764                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
765                 }
766         }
767
768         udelay(50);
769         /* FIXME: do we need nvram access? */
770 ///     tg3_nvram_init(tp);
771
772         grc_misc_cfg = tr32(GRC_MISC_CFG);
773         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
774
775         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
776             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
777              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
778                 tg3_flag_set(tp, IS_5788);
779
780         if (!tg3_flag(tp, IS_5788) &&
781             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
782                 tg3_flag_set(tp, TAGGED_STATUS);
783         if (tg3_flag(tp, TAGGED_STATUS)) {
784                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
785                                       HOSTCC_MODE_CLRTICK_TXBD);
786
787                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
788                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
789                                        tp->misc_host_ctrl);
790         }
791
792         /* Preserve the APE MAC_MODE bits */
793         if (tg3_flag(tp, ENABLE_APE))
794                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
795         else
796                 tp->mac_mode = TG3_DEF_MAC_MODE;
797
798         /* these are limited to 10/100 only */
799         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
800              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
801             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
802              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
803              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
804               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
805               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
806             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
807              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
808               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
809               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
810             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
811             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
812             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
813             (tp->phy_flags & TG3_PHYFLG_IS_FET))
814                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
815
816         err = tg3_phy_probe(tp);
817         if (err) {
818                 DBGC(&tp->pdev->dev, "phy probe failed, err: %s\n", strerror(err));
819                 /* ... but do not return immediately ... */
820         }
821
822         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
823                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
824         } else {
825                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
826                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
827                 else
828                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
829         }
830
831         /* For all SERDES we poll the MAC status register. */
832         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
833                 tg3_flag_set(tp, POLL_SERDES);
834         else
835                 tg3_flag_clear(tp, POLL_SERDES);
836
837         /* Increment the rx prod index on the rx std ring by at most
838          * 8 for these chips to workaround hw errata.
839          */
840         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
841             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
842             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
843                 tp->rx_std_max_post = 8;
844
845         return err;
846 }
847
848 void tg3_init_bufmgr_config(struct tg3 *tp)
849 {       DBGP("%s\n", __func__);
850
851         if (tg3_flag(tp, 57765_PLUS)) {
852                 tp->bufmgr_config.mbuf_read_dma_low_water =
853                         DEFAULT_MB_RDMA_LOW_WATER_5705;
854                 tp->bufmgr_config.mbuf_mac_rx_low_water =
855                         DEFAULT_MB_MACRX_LOW_WATER_57765;
856                 tp->bufmgr_config.mbuf_high_water =
857                         DEFAULT_MB_HIGH_WATER_57765;
858
859                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
860                         DEFAULT_MB_RDMA_LOW_WATER_5705;
861                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
862                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
863                 tp->bufmgr_config.mbuf_high_water_jumbo =
864                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
865         } else if (tg3_flag(tp, 5705_PLUS)) {
866                 tp->bufmgr_config.mbuf_read_dma_low_water =
867                         DEFAULT_MB_RDMA_LOW_WATER_5705;
868                 tp->bufmgr_config.mbuf_mac_rx_low_water =
869                         DEFAULT_MB_MACRX_LOW_WATER_5705;
870                 tp->bufmgr_config.mbuf_high_water =
871                         DEFAULT_MB_HIGH_WATER_5705;
872                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
873                         tp->bufmgr_config.mbuf_mac_rx_low_water =
874                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
875                         tp->bufmgr_config.mbuf_high_water =
876                                 DEFAULT_MB_HIGH_WATER_5906;
877                 }
878
879                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
880                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
881                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
882                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
883                 tp->bufmgr_config.mbuf_high_water_jumbo =
884                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
885         } else {
886                 tp->bufmgr_config.mbuf_read_dma_low_water =
887                         DEFAULT_MB_RDMA_LOW_WATER;
888                 tp->bufmgr_config.mbuf_mac_rx_low_water =
889                         DEFAULT_MB_MACRX_LOW_WATER;
890                 tp->bufmgr_config.mbuf_high_water =
891                         DEFAULT_MB_HIGH_WATER;
892
893                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
894                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
895                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
896                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
897                 tp->bufmgr_config.mbuf_high_water_jumbo =
898                         DEFAULT_MB_HIGH_WATER_JUMBO;
899         }
900
901         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
902         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
903 }
904
905 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
906
907 void tg3_wait_for_event_ack(struct tg3 *tp)
908 {       DBGP("%s\n", __func__);
909
910         int i;
911
912         for (i = 0; i < TG3_FW_EVENT_TIMEOUT_USEC / 10; i++) {
913                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
914                         break;
915
916                 udelay(10);
917         }
918 }
919
920 void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
921 {       DBGP("%s\n", __func__);
922
923         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
924             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
925                 return;
926
927         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
928         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
929
930         /* Always leave this as zero. */
931         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
932 }
933
934 static void tg3_stop_fw(struct tg3 *tp)
935 {       DBGP("%s\n", __func__);
936
937         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
938                 /* Wait for RX cpu to ACK the previous event. */
939                 tg3_wait_for_event_ack(tp);
940
941                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
942
943                 tg3_generate_fw_event(tp);
944
945                 /* Wait for RX cpu to ACK this event. */
946                 tg3_wait_for_event_ack(tp);
947         }
948 }
949
950 static void tg3_write_sig_pre_reset(struct tg3 *tp)
951 {       DBGP("%s\n", __func__);
952
953         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
954                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
955 }
956
957 void tg3_disable_ints(struct tg3 *tp)
958 {       DBGP("%s\n", __func__);
959
960         tw32(TG3PCI_MISC_HOST_CTRL,
961              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
962
963         tw32_mailbox_f(tp->int_mbox, 0x00000001);
964 }
965
966 void tg3_enable_ints(struct tg3 *tp)
967 {       DBGP("%s\n", __func__);
968
969         tw32(TG3PCI_MISC_HOST_CTRL,
970              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
971
972         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
973
974         tw32_mailbox_f(tp->int_mbox, tp->last_tag << 24);
975
976         /* Force an initial interrupt */
977         if (!tg3_flag(tp, TAGGED_STATUS) &&
978             (tp->hw_status->status & SD_STATUS_UPDATED))
979                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
980         else
981                 tw32(HOSTCC_MODE, tp->coal_now);
982 }
983
984 #define MAX_WAIT_CNT 1000
985
986 /* To stop a block, clear the enable bit and poll till it clears. */
987 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
988 {       DBGP("%s\n", __func__);
989
990         unsigned int i;
991         u32 val;
992
993         if (tg3_flag(tp, 5705_PLUS)) {
994                 switch (ofs) {
995                 case RCVLSC_MODE:
996                 case DMAC_MODE:
997                 case MBFREE_MODE:
998                 case BUFMGR_MODE:
999                 case MEMARB_MODE:
1000                         /* We can't enable/disable these bits of the
1001                          * 5705/5750, just say success.
1002                          */
1003                         return 0;
1004
1005                 default:
1006                         break;
1007                 }
1008         }
1009
1010         val = tr32(ofs);
1011         val &= ~enable_bit;
1012         tw32_f(ofs, val);
1013
1014         for (i = 0; i < MAX_WAIT_CNT; i++) {
1015                 udelay(100);
1016                 val = tr32(ofs);
1017                 if ((val & enable_bit) == 0)
1018                         break;
1019         }
1020
1021         if (i == MAX_WAIT_CNT) {
1022                 DBGC(&tp->pdev->dev,
1023                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
1024                         ofs, enable_bit);
1025                 return -ENODEV;
1026         }
1027
1028         return 0;
1029 }
1030
1031 static int tg3_abort_hw(struct tg3 *tp)
1032 {       DBGP("%s\n", __func__);
1033
1034         int i, err;
1035
1036         tg3_disable_ints(tp);
1037
1038         tp->rx_mode &= ~RX_MODE_ENABLE;
1039         tw32_f(MAC_RX_MODE, tp->rx_mode);
1040         udelay(10);
1041
1042         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
1043         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
1044         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
1045         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
1046         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
1047         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
1048
1049         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
1050         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
1051         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
1052         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
1053         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
1054         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE);
1055         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
1056
1057         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
1058         tw32_f(MAC_MODE, tp->mac_mode);
1059         udelay(40);
1060
1061         tp->tx_mode &= ~TX_MODE_ENABLE;
1062         tw32_f(MAC_TX_MODE, tp->tx_mode);
1063
1064         for (i = 0; i < MAX_WAIT_CNT; i++) {
1065                 udelay(100);
1066                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
1067                         break;
1068         }
1069         if (i >= MAX_WAIT_CNT) {
1070                 DBGC(&tp->pdev->dev,
1071                         "%s timed out, TX_MODE_ENABLE will not clear "
1072                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
1073                 err |= -ENODEV;
1074         }
1075
1076         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
1077         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
1078         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
1079
1080         tw32(FTQ_RESET, 0xffffffff);
1081         tw32(FTQ_RESET, 0x00000000);
1082
1083         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
1084         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
1085
1086         if (tp->hw_status)
1087                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
1088
1089         return err;
1090 }
1091
1092 void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
1093 {       DBGP("%s\n", __func__);
1094
1095         u32 addr_high, addr_low;
1096         int i;
1097
1098         addr_high = ((tp->dev->ll_addr[0] << 8) |
1099                      tp->dev->ll_addr[1]);
1100         addr_low = ((tp->dev->ll_addr[2] << 24) |
1101                     (tp->dev->ll_addr[3] << 16) |
1102                     (tp->dev->ll_addr[4] <<  8) |
1103                     (tp->dev->ll_addr[5] <<  0));
1104         for (i = 0; i < 4; i++) {
1105                 if (i == 1 && skip_mac_1)
1106                         continue;
1107                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
1108                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
1109         }
1110
1111         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1112             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1113                 for (i = 0; i < 12; i++) {
1114                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
1115                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
1116                 }
1117         }
1118
1119         addr_high = (tp->dev->ll_addr[0] +
1120                      tp->dev->ll_addr[1] +
1121                      tp->dev->ll_addr[2] +
1122                      tp->dev->ll_addr[3] +
1123                      tp->dev->ll_addr[4] +
1124                      tp->dev->ll_addr[5]) &
1125                 TX_BACKOFF_SEED_MASK;
1126         tw32(MAC_TX_BACKOFF_SEED, addr_high);
1127 }
1128
1129 /* Save PCI command register before chip reset */
1130 static void tg3_save_pci_state(struct tg3 *tp)
1131 {       DBGP("%s\n", __func__);
1132
1133         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
1134 }
1135
1136 /* Restore PCI state after chip reset */
1137 static void tg3_restore_pci_state(struct tg3 *tp)
1138 {       DBGP("%s\n", __func__);
1139
1140         u32 val;
1141
1142         /* Re-enable indirect register accesses. */
1143         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
1144                                tp->misc_host_ctrl);
1145
1146         /* Set MAX PCI retry to zero. */
1147         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
1148         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
1149             tg3_flag(tp, PCIX_MODE))
1150                 val |= PCISTATE_RETRY_SAME_DMA;
1151
1152         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
1153
1154         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
1155
1156         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
1157                         pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
1158                                               tp->pci_cacheline_sz);
1159                         pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
1160                                               tp->pci_lat_timer);
1161         }
1162
1163
1164         /* Make sure PCI-X relaxed ordering bit is clear. */
1165         if (tg3_flag(tp, PCIX_MODE)) {
1166                 u16 pcix_cmd;
1167
1168                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
1169                                      &pcix_cmd);
1170                 pcix_cmd &= ~PCI_X_CMD_ERO;
1171                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
1172                                       pcix_cmd);
1173         }
1174 }
1175
1176 static int tg3_poll_fw(struct tg3 *tp)
1177 {       DBGP("%s\n", __func__);
1178
1179         int i;
1180         u32 val;
1181
1182         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1183                 /* Wait up to 20ms for init done. */
1184                 for (i = 0; i < 200; i++) {
1185                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1186                                 return 0;
1187                         udelay(100);
1188                 }
1189                 return -ENODEV;
1190         }
1191
1192         /* Wait for firmware initialization to complete. */
1193         for (i = 0; i < 100000; i++) {
1194                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1195                 if (val == (u32)~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1196                         break;
1197                 udelay(10);
1198         }
1199
1200         /* Chip might not be fitted with firmware.  Some Sun onboard
1201          * parts are configured like that.  So don't signal the timeout
1202          * of the above loop as an error, but do report the lack of
1203          * running firmware once.
1204          */
1205         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1206                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1207
1208                 DBGC(tp->dev, "No firmware running\n");
1209         }
1210
1211         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1212                 /* The 57765 A0 needs a little more
1213                  * time to do some important work.
1214                  */
1215                 mdelay(10);
1216         }
1217
1218         return 0;
1219 }
1220
1221 static int tg3_nvram_lock(struct tg3 *tp)
1222 {       DBGP("%s\n", __func__);
1223
1224         if (tg3_flag(tp, NVRAM)) {
1225                 int i;
1226
1227                 if (tp->nvram_lock_cnt == 0) {
1228                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
1229                         for (i = 0; i < 8000; i++) {
1230                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
1231                                         break;
1232                                 udelay(20);
1233                         }
1234                         if (i == 8000) {
1235                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
1236                                 return -ENODEV;
1237                         }
1238                 }
1239                 tp->nvram_lock_cnt++;
1240         }
1241         return 0;
1242 }
1243
1244 static void tg3_nvram_unlock(struct tg3 *tp)
1245 {       DBGP("%s\n", __func__);
1246
1247         if (tg3_flag(tp, NVRAM)) {
1248                 if (tp->nvram_lock_cnt > 0)
1249                         tp->nvram_lock_cnt--;
1250                 if (tp->nvram_lock_cnt == 0)
1251                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
1252         }
1253 }
1254
1255 static int tg3_chip_reset(struct tg3 *tp)
1256 {       DBGP("%s\n", __func__);
1257
1258         u32 val;
1259         int err;
1260
1261         tg3_nvram_lock(tp);
1262
1263
1264         /* No matching tg3_nvram_unlock() after this because
1265          * chip reset below will undo the nvram lock.
1266          */
1267         tp->nvram_lock_cnt = 0;
1268
1269         /* GRC_MISC_CFG core clock reset will clear the memory
1270          * enable bit in PCI register 4 and the MSI enable bit
1271          * on some chips, so we save relevant registers here.
1272          */
1273         tg3_save_pci_state(tp);
1274
1275         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
1276             tg3_flag(tp, 5755_PLUS))
1277                 tw32(GRC_FASTBOOT_PC, 0);
1278
1279 #if 0
1280         /*
1281          * We must avoid the readl() that normally takes place.
1282          * It locks machines, causes machine checks, and other
1283          * fun things.  So, temporarily disable the 5701
1284          * hardware workaround, while we do the reset.
1285          */
1286         write_op = tp->write32;
1287         if (write_op == tg3_write_flush_reg32)
1288                 tp->write32 = tg3_write32;
1289 #endif
1290
1291         /* Prevent the irq handler from reading or writing PCI registers
1292          * during chip reset when the memory enable bit in the PCI command
1293          * register may be cleared.  The chip does not generate interrupt
1294          * at this time, but the irq handler may still be called due to irq
1295          * sharing or irqpoll.
1296          */
1297         tg3_flag_set(tp, CHIP_RESETTING);
1298
1299         if (tp->hw_status) {
1300                 tp->hw_status->status = 0;
1301                 tp->hw_status->status_tag = 0;
1302         }
1303         tp->last_tag = 0;
1304         tp->last_irq_tag = 0;
1305
1306         mb();
1307
1308         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
1309                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
1310                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
1311         }
1312
1313         /* do the reset */
1314         val = GRC_MISC_CFG_CORECLK_RESET;
1315
1316         if (tg3_flag(tp, PCI_EXPRESS)) {
1317                 /* Force PCIe 1.0a mode */
1318                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
1319                     !tg3_flag(tp, 57765_PLUS) &&
1320                     tr32(TG3_PCIE_PHY_TSTCTL) ==
1321                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
1322                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
1323
1324                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
1325                         tw32(GRC_MISC_CFG, (1 << 29));
1326                         val |= (1 << 29);
1327                 }
1328         }
1329
1330         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1331                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
1332                 tw32(GRC_VCPU_EXT_CTRL,
1333                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
1334         }
1335
1336         /* Manage gphy power for all CPMU absent PCIe devices. */
1337         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
1338                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
1339
1340         tw32(GRC_MISC_CFG, val);
1341
1342         /* Unfortunately, we have to delay before the PCI read back.
1343          * Some 575X chips even will not respond to a PCI cfg access
1344          * when the reset command is given to the chip.
1345          *
1346          * How do these hardware designers expect things to work
1347          * properly if the PCI write is posted for a long period
1348          * of time?  It is always necessary to have some method by
1349          * which a register read back can occur to push the write
1350          * out which does the reset.
1351          *
1352          * For most tg3 variants the trick below was working.
1353          * Ho hum...
1354          */
1355         udelay(120);
1356
1357         /* Flush PCI posted writes.  The normal MMIO registers
1358          * are inaccessible at this time so this is the only
1359          * way to make this reliably (actually, this is no longer
1360          * the case, see above).  I tried to use indirect
1361          * register read/write but this upset some 5701 variants.
1362          */
1363         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
1364
1365         udelay(120);
1366
1367         if (tg3_flag(tp, PCI_EXPRESS) && tp->pcie_cap) {
1368                 u16 val16;
1369
1370                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
1371                         int i;
1372                         u32 cfg_val;
1373
1374                         /* Wait for link training to complete.  */
1375                         for (i = 0; i < 5000; i++)
1376                                 udelay(100);
1377
1378                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
1379                         pci_write_config_dword(tp->pdev, 0xc4,
1380                                                cfg_val | (1 << 15));
1381                 }
1382
1383                 /* Clear the "no snoop" and "relaxed ordering" bits. */
1384                 pci_read_config_word(tp->pdev,
1385                                      tp->pcie_cap + PCI_EXP_DEVCTL,
1386                                      &val16);
1387                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
1388                            PCI_EXP_DEVCTL_NOSNOOP_EN);
1389                 /*
1390                  * Older PCIe devices only support the 128 byte
1391                  * MPS setting.  Enforce the restriction.
1392                  */
1393                 if (!tg3_flag(tp, CPMU_PRESENT))
1394                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
1395                 pci_write_config_word(tp->pdev,
1396                                       tp->pcie_cap + PCI_EXP_DEVCTL,
1397                                       val16);
1398
1399                 /* Clear error status */
1400                 pci_write_config_word(tp->pdev,
1401                                       tp->pcie_cap + PCI_EXP_DEVSTA,
1402                                       PCI_EXP_DEVSTA_CED |
1403                                       PCI_EXP_DEVSTA_NFED |
1404                                       PCI_EXP_DEVSTA_FED |
1405                                       PCI_EXP_DEVSTA_URD);
1406         }
1407
1408         tg3_restore_pci_state(tp);
1409
1410         tg3_flag_clear(tp, CHIP_RESETTING);
1411         tg3_flag_clear(tp, ERROR_PROCESSED);
1412
1413         val = 0;
1414         if (tg3_flag(tp, 5780_CLASS))
1415                 val = tr32(MEMARB_MODE);
1416         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
1417
1418         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
1419                 tg3_stop_fw(tp);
1420                 tw32(0x5000, 0x400);
1421         }
1422
1423         tw32(GRC_MODE, tp->grc_mode);
1424
1425         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
1426                 val = tr32(0xc4);
1427
1428                 tw32(0xc4, val | (1 << 15));
1429         }
1430
1431         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
1432             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1433                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
1434                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
1435                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
1436                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
1437         }
1438
1439         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
1440                 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
1441                 val = tp->mac_mode;
1442         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
1443                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1444                 val = tp->mac_mode;
1445         } else
1446                 val = 0;
1447
1448         tw32_f(MAC_MODE, val);
1449         udelay(40);
1450
1451         err = tg3_poll_fw(tp);
1452         if (err)
1453                 return err;
1454
1455         if (tg3_flag(tp, PCI_EXPRESS) &&
1456             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
1457             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
1458             !tg3_flag(tp, 57765_PLUS)) {
1459                 val = tr32(0x7c00);
1460
1461                 tw32(0x7c00, val | (1 << 25));
1462         }
1463
1464         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
1465                 val = tr32(TG3_CPMU_CLCK_ORIDE);
1466                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
1467         }
1468
1469         if (tg3_flag(tp, CPMU_PRESENT)) {
1470                 tw32(TG3_CPMU_D0_CLCK_POLICY, 0);
1471                 val = tr32(TG3_CPMU_CLCK_ORIDE_EN);
1472                 tw32(TG3_CPMU_CLCK_ORIDE_EN,
1473                      val | CPMU_CLCK_ORIDE_MAC_CLCK_ORIDE_EN);
1474         }
1475
1476         return 0;
1477 }
1478
1479 int tg3_halt(struct tg3 *tp)
1480 {       DBGP("%s\n", __func__);
1481
1482         int err;
1483
1484         tg3_stop_fw(tp);
1485
1486         tg3_write_sig_pre_reset(tp);
1487
1488         tg3_abort_hw(tp);
1489         err = tg3_chip_reset(tp);
1490
1491         __tg3_set_mac_addr(tp, 0);
1492
1493         if (err)
1494                 return err;
1495
1496         return 0;
1497 }
1498
1499 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
1500                                         u32 offset, u32 *val)
1501 {       DBGP("%s\n", __func__);
1502
1503         u32 tmp;
1504         int i;
1505
1506         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
1507                 return -EINVAL;
1508
1509         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
1510                                         EEPROM_ADDR_DEVID_MASK |
1511                                         EEPROM_ADDR_READ);
1512         tw32(GRC_EEPROM_ADDR,
1513              tmp |
1514              (0 << EEPROM_ADDR_DEVID_SHIFT) |
1515              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
1516               EEPROM_ADDR_ADDR_MASK) |
1517              EEPROM_ADDR_READ | EEPROM_ADDR_START);
1518
1519         for (i = 0; i < 1000; i++) {
1520                 tmp = tr32(GRC_EEPROM_ADDR);
1521
1522                 if (tmp & EEPROM_ADDR_COMPLETE)
1523                         break;
1524                 mdelay(1);
1525         }
1526         if (!(tmp & EEPROM_ADDR_COMPLETE))
1527                 return -EBUSY;
1528
1529         tmp = tr32(GRC_EEPROM_DATA);
1530
1531         /*
1532          * The data will always be opposite the native endian
1533          * format.  Perform a blind byteswap to compensate.
1534          */
1535         *val = bswap_32(tmp);
1536
1537         return 0;
1538 }
1539
1540 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
1541 {       DBGP("%s\n", __func__);
1542
1543         if (tg3_flag(tp, NVRAM) &&
1544             tg3_flag(tp, NVRAM_BUFFERED) &&
1545             tg3_flag(tp, FLASH) &&
1546             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
1547             (tp->nvram_jedecnum == JEDEC_ATMEL))
1548
1549                 addr = ((addr / tp->nvram_pagesize) <<
1550                         ATMEL_AT45DB0X1B_PAGE_POS) +
1551                        (addr % tp->nvram_pagesize);
1552
1553         return addr;
1554 }
1555
1556 static void tg3_enable_nvram_access(struct tg3 *tp)
1557 {       DBGP("%s\n", __func__);
1558
1559         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
1560                 u32 nvaccess = tr32(NVRAM_ACCESS);
1561
1562                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
1563         }
1564 }
1565
1566 static void tg3_disable_nvram_access(struct tg3 *tp)
1567 {       DBGP("%s\n", __func__);
1568
1569         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
1570                 u32 nvaccess = tr32(NVRAM_ACCESS);
1571
1572                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
1573         }
1574 }
1575
1576 #define NVRAM_CMD_TIMEOUT 10000
1577
1578 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
1579 {       DBGP("%s\n", __func__);
1580
1581         int i;
1582
1583         tw32(NVRAM_CMD, nvram_cmd);
1584         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
1585                 udelay(10);
1586                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
1587                         udelay(10);
1588                         break;
1589                 }
1590         }
1591
1592         if (i == NVRAM_CMD_TIMEOUT)
1593                 return -EBUSY;
1594
1595         return 0;
1596 }
1597
1598 /* NOTE: Data read in from NVRAM is byteswapped according to
1599  * the byteswapping settings for all other register accesses.
1600  * tg3 devices are BE devices, so on a BE machine, the data
1601  * returned will be exactly as it is seen in NVRAM.  On a LE
1602  * machine, the 32-bit value will be byteswapped.
1603  */
1604 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
1605 {       DBGP("%s\n", __func__);
1606
1607         int ret;
1608
1609         if (!tg3_flag(tp, NVRAM))
1610                 return tg3_nvram_read_using_eeprom(tp, offset, val);
1611
1612         offset = tg3_nvram_phys_addr(tp, offset);
1613
1614         if (offset > NVRAM_ADDR_MSK)
1615                 return -EINVAL;
1616
1617         ret = tg3_nvram_lock(tp);
1618         if (ret)
1619                 return ret;
1620
1621         tg3_enable_nvram_access(tp);
1622
1623         tw32(NVRAM_ADDR, offset);
1624         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
1625                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
1626
1627         if (ret == 0)
1628                 *val = tr32(NVRAM_RDDATA);
1629
1630         tg3_disable_nvram_access(tp);
1631
1632         tg3_nvram_unlock(tp);
1633
1634         return ret;
1635 }
1636
1637 /* Ensures NVRAM data is in bytestream format. */
1638 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, u32 *val)
1639 {       DBGP("%s\n", __func__);
1640
1641         u32 v = 0;
1642         int res = tg3_nvram_read(tp, offset, &v);
1643         if (!res)
1644                 *val = cpu_to_be32(v);
1645         return res;
1646 }
1647
1648 int tg3_get_device_address(struct tg3 *tp)
1649 {       DBGP("%s\n", __func__);
1650
1651         struct net_device *dev = tp->dev;
1652         u32 hi, lo, mac_offset;
1653         int addr_ok = 0;
1654
1655         mac_offset = 0x7c;
1656         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1657             tg3_flag(tp, 5780_CLASS)) {
1658                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
1659                         mac_offset = 0xcc;
1660                 if (tg3_nvram_lock(tp))
1661                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
1662                 else
1663                         tg3_nvram_unlock(tp);
1664         } else if (tg3_flag(tp, 5717_PLUS)) {
1665                 if (PCI_FUNC(tp->pdev->busdevfn) & 1)
1666                         mac_offset = 0xcc;
1667                 if (PCI_FUNC(tp->pdev->busdevfn) > 1)
1668                         mac_offset += 0x18c;
1669         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
1670                 mac_offset = 0x10;
1671
1672         /* First try to get it from MAC address mailbox. */
1673         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
1674         if ((hi >> 16) == 0x484b) {
1675                 dev->hw_addr[0] = (hi >>  8) & 0xff;
1676                 dev->hw_addr[1] = (hi >>  0) & 0xff;
1677
1678                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
1679                 dev->hw_addr[2] = (lo >> 24) & 0xff;
1680                 dev->hw_addr[3] = (lo >> 16) & 0xff;
1681                 dev->hw_addr[4] = (lo >>  8) & 0xff;
1682                 dev->hw_addr[5] = (lo >>  0) & 0xff;
1683
1684                 /* Some old bootcode may report a 0 MAC address in SRAM */
1685                 addr_ok = is_valid_ether_addr(&dev->hw_addr[0]);
1686         }
1687         if (!addr_ok) {
1688                 /* Next, try NVRAM. */
1689                 if (!tg3_flag(tp, NO_NVRAM) &&
1690                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
1691                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
1692                         memcpy(&dev->hw_addr[0], ((char *)&hi) + 2, 2);
1693                         memcpy(&dev->hw_addr[2], (char *)&lo, sizeof(lo));
1694                 }
1695                 /* Finally just fetch it out of the MAC control regs. */
1696                 else {
1697                         hi = tr32(MAC_ADDR_0_HIGH);
1698                         lo = tr32(MAC_ADDR_0_LOW);
1699
1700                         dev->hw_addr[5] = lo & 0xff;
1701                         dev->hw_addr[4] = (lo >> 8) & 0xff;
1702                         dev->hw_addr[3] = (lo >> 16) & 0xff;
1703                         dev->hw_addr[2] = (lo >> 24) & 0xff;
1704                         dev->hw_addr[1] = hi & 0xff;
1705                         dev->hw_addr[0] = (hi >> 8) & 0xff;
1706                 }
1707         }
1708
1709         if (!is_valid_ether_addr(&dev->hw_addr[0])) {
1710                 return -EINVAL;
1711         }
1712
1713         return 0;
1714 }
1715
1716 static void __tg3_set_rx_mode(struct net_device *dev)
1717 {       DBGP("%s\n", __func__);
1718
1719         struct tg3 *tp = netdev_priv(dev);
1720         u32 rx_mode;
1721
1722         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
1723                                   RX_MODE_KEEP_VLAN_TAG);
1724
1725         rx_mode |= RX_MODE_KEEP_VLAN_TAG;
1726
1727         /* Accept all multicast. */
1728         tw32(MAC_HASH_REG_0, 0xffffffff);
1729         tw32(MAC_HASH_REG_1, 0xffffffff);
1730         tw32(MAC_HASH_REG_2, 0xffffffff);
1731         tw32(MAC_HASH_REG_3, 0xffffffff);
1732
1733         if (rx_mode != tp->rx_mode) {
1734                 tp->rx_mode = rx_mode;
1735                 tw32_f(MAC_RX_MODE, rx_mode);
1736                 udelay(10);
1737         }
1738 }
1739
1740 static void __tg3_set_coalesce(struct tg3 *tp)
1741 {       DBGP("%s\n", __func__);
1742
1743
1744         tw32(HOSTCC_RXCOL_TICKS, 0);
1745         tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
1746         tw32(HOSTCC_RXMAX_FRAMES, 1);
1747         /* FIXME: mix between TXMAX and RXMAX taken from legacy driver */
1748         tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
1749         tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
1750         tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
1751
1752         if (!tg3_flag(tp, 5705_PLUS)) {
1753                 u32 val = DEFAULT_STAT_COAL_TICKS;
1754
1755                 tw32(HOSTCC_RXCOAL_TICK_INT, DEFAULT_RXCOAL_TICK_INT);
1756                 tw32(HOSTCC_TXCOAL_TICK_INT, DEFAULT_TXCOAL_TICK_INT);
1757
1758                 if (!netdev_link_ok(tp->dev))
1759                         val = 0;
1760
1761                 tw32(HOSTCC_STAT_COAL_TICKS, val);
1762         }
1763 }
1764
1765 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
1766                            dma_addr_t mapping, u32 maxlen_flags,
1767                            u32 nic_addr)
1768 {       DBGP("%s\n", __func__);
1769
1770         tg3_write_mem(tp,
1771                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
1772                       ((u64) mapping >> 32));
1773         tg3_write_mem(tp,
1774                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
1775                       ((u64) mapping & 0xffffffff));
1776         tg3_write_mem(tp,
1777                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
1778                        maxlen_flags);
1779
1780         if (!tg3_flag(tp, 5705_PLUS))
1781                 tg3_write_mem(tp,
1782                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
1783                               nic_addr);
1784 }
1785
1786 static void tg3_rings_reset(struct tg3 *tp)
1787 {       DBGP("%s\n", __func__);
1788
1789         int i;
1790         u32 txrcb, rxrcb, limit;
1791
1792         /* Disable all transmit rings but the first. */
1793         if (!tg3_flag(tp, 5705_PLUS))
1794                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
1795         else if (tg3_flag(tp, 5717_PLUS))
1796                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
1797         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
1798                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
1799         else
1800                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
1801
1802         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
1803              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
1804                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
1805                               BDINFO_FLAGS_DISABLED);
1806
1807
1808         /* Disable all receive return rings but the first. */
1809         if (tg3_flag(tp, 5717_PLUS))
1810                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
1811         else if (!tg3_flag(tp, 5705_PLUS))
1812                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
1813         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
1814                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
1815                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
1816         else
1817                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
1818
1819         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
1820              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
1821                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
1822                               BDINFO_FLAGS_DISABLED);
1823
1824         /* Disable interrupts */
1825         tw32_mailbox_f(tp->int_mbox, 1);
1826
1827         tp->tx_prod = 0;
1828         tp->tx_cons = 0;
1829         tw32_mailbox(tp->prodmbox, 0);
1830         tw32_rx_mbox(tp->consmbox, 0);
1831
1832         /* Make sure the NIC-based send BD rings are disabled. */
1833         if (!tg3_flag(tp, 5705_PLUS)) {
1834                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
1835                 for (i = 0; i < 16; i++)
1836                         tw32_tx_mbox(mbox + i * 8, 0);
1837         }
1838
1839         txrcb = NIC_SRAM_SEND_RCB;
1840         rxrcb = NIC_SRAM_RCV_RET_RCB;
1841
1842         /* Clear status block in ram. */
1843         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
1844
1845         /* Set status block DMA address */
1846         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
1847              ((u64) tp->status_mapping >> 32));
1848         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
1849              ((u64) tp->status_mapping & 0xffffffff));
1850
1851         if (tp->tx_ring) {
1852                 tg3_set_bdinfo(tp, txrcb, tp->tx_desc_mapping,
1853                                (TG3_TX_RING_SIZE <<
1854                                 BDINFO_FLAGS_MAXLEN_SHIFT),
1855                                NIC_SRAM_TX_BUFFER_DESC);
1856                 txrcb += TG3_BDINFO_SIZE;
1857         }
1858
1859         /* FIXME: will TG3_RX_RET_MAX_SIZE_5705 work on all cards? */
1860         if (tp->rx_rcb) {
1861                 tg3_set_bdinfo(tp, rxrcb, tp->rx_rcb_mapping,
1862                                 TG3_RX_RET_MAX_SIZE_5705 <<
1863                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
1864                 rxrcb += TG3_BDINFO_SIZE;
1865         }
1866 }
1867
1868 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
1869 {       DBGP("%s\n", __func__);
1870
1871         u32 val, bdcache_maxcnt;
1872
1873         if (!tg3_flag(tp, 5750_PLUS) ||
1874             tg3_flag(tp, 5780_CLASS) ||
1875             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
1876             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
1877                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
1878         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
1879                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
1880                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
1881         else
1882                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
1883
1884
1885         /* NOTE: legacy driver uses RX_PENDING / 8, we only use 4 descriptors
1886          * for now, use / 4 so the result is > 0
1887          */
1888         val = TG3_DEF_RX_RING_PENDING / 4;
1889         tw32(RCVBDI_STD_THRESH, val);
1890
1891         if (tg3_flag(tp, 57765_PLUS))
1892                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
1893 }
1894
1895 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
1896 {       DBGP("%s\n", __func__);
1897
1898         u32 val, rdmac_mode;
1899         int i, err, limit;
1900         struct tg3_rx_prodring_set *tpr = &tp->prodring;
1901
1902         tg3_stop_fw(tp);
1903
1904         tg3_write_sig_pre_reset(tp);
1905
1906         if (tg3_flag(tp, INIT_COMPLETE))
1907                 tg3_abort_hw(tp);
1908
1909         if (reset_phy)
1910                 tg3_phy_reset(tp);
1911
1912         err = tg3_chip_reset(tp);
1913         if (err)
1914                 return err;
1915
1916         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
1917                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
1918                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
1919                        PCIE_PWR_MGMT_L1_THRESH_4MS;
1920                 tw32(PCIE_PWR_MGMT_THRESH, val);
1921
1922                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
1923                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
1924
1925                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
1926
1927                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
1928                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
1929         }
1930
1931         if (tg3_flag(tp, L1PLLPD_EN)) {
1932                 u32 grc_mode = tr32(GRC_MODE);
1933
1934                 /* Access the lower 1K of PL PCIE block registers. */
1935                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
1936                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
1937
1938                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
1939                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
1940                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
1941
1942                 tw32(GRC_MODE, grc_mode);
1943         }
1944
1945         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
1946                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1947                         u32 grc_mode = tr32(GRC_MODE);
1948
1949                         /* Access the lower 1K of PL PCIE block registers. */
1950                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
1951                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
1952
1953                         val = tr32(TG3_PCIE_TLDLPL_PORT +
1954                                    TG3_PCIE_PL_LO_PHYCTL5);
1955                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
1956                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
1957
1958                         tw32(GRC_MODE, grc_mode);
1959                 }
1960
1961                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
1962                         u32 grc_mode = tr32(GRC_MODE);
1963
1964                         /* Access the lower 1K of DL PCIE block registers. */
1965                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
1966                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
1967
1968                         val = tr32(TG3_PCIE_TLDLPL_PORT +
1969                                    TG3_PCIE_DL_LO_FTSMAX);
1970                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
1971                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
1972                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
1973
1974                         tw32(GRC_MODE, grc_mode);
1975                 }
1976
1977                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
1978                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
1979                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
1980                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
1981         }
1982
1983         /* This works around an issue with Athlon chipsets on
1984          * B3 tigon3 silicon.  This bit has no effect on any
1985          * other revision.  But do not set this on PCI Express
1986          * chips and don't even touch the clocks if the CPMU is present.
1987          */
1988         if (!tg3_flag(tp, CPMU_PRESENT)) {
1989                 if (!tg3_flag(tp, PCI_EXPRESS))
1990                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
1991                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
1992         }
1993
1994         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
1995             tg3_flag(tp, PCIX_MODE)) {
1996                 val = tr32(TG3PCI_PCISTATE);
1997                 val |= PCISTATE_RETRY_SAME_DMA;
1998                 tw32(TG3PCI_PCISTATE, val);
1999         }
2000
2001         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
2002                 /* Enable some hw fixes.  */
2003                 val = tr32(TG3PCI_MSI_DATA);
2004                 val |= (1 << 26) | (1 << 28) | (1 << 29);
2005                 tw32(TG3PCI_MSI_DATA, val);
2006         }
2007
2008         /* Descriptor ring init may make accesses to the
2009          * NIC SRAM area to setup the TX descriptors, so we
2010          * can only do this after the hardware has been
2011          * successfully reset.
2012          */
2013         err = tg3_init_rings(tp);
2014         if (err)
2015                 return err;
2016
2017         if (tg3_flag(tp, 57765_PLUS)) {
2018                 val = tr32(TG3PCI_DMA_RW_CTRL) &
2019                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
2020                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
2021                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
2022                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
2023                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
2024                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
2025                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
2026         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
2027                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
2028                 /* This value is determined during the probe time DMA
2029                  * engine test, tg3_test_dma.
2030                  */
2031                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
2032         }
2033
2034         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
2035                           GRC_MODE_4X_NIC_SEND_RINGS |
2036                           GRC_MODE_NO_TX_PHDR_CSUM |
2037                           GRC_MODE_NO_RX_PHDR_CSUM);
2038         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
2039         tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
2040
2041         /* Pseudo-header checksum is done by hardware logic and not
2042          * the offload processers, so make the chip do the pseudo-
2043          * header checksums on receive.  For transmit it is more
2044          * convenient to do the pseudo-header checksum in software
2045          * as Linux does that on transmit for us in all cases.
2046          */
2047         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
2048
2049         tw32(GRC_MODE,
2050              tp->grc_mode |
2051              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
2052
2053         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
2054         val = tr32(GRC_MISC_CFG);
2055         val &= ~0xff;
2056         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
2057         tw32(GRC_MISC_CFG, val);
2058
2059         /* Initialize MBUF/DESC pool. */
2060         if (tg3_flag(tp, 5750_PLUS)) {
2061                 /* Do nothing.  */
2062         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
2063                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
2064                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
2065                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
2066                 else
2067                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
2068                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
2069                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
2070         }
2071
2072         tw32(BUFMGR_MB_RDMA_LOW_WATER,
2073              tp->bufmgr_config.mbuf_read_dma_low_water);
2074         tw32(BUFMGR_MB_MACRX_LOW_WATER,
2075              tp->bufmgr_config.mbuf_mac_rx_low_water);
2076         tw32(BUFMGR_MB_HIGH_WATER,
2077              tp->bufmgr_config.mbuf_high_water);
2078
2079         tw32(BUFMGR_DMA_LOW_WATER,
2080              tp->bufmgr_config.dma_low_water);
2081         tw32(BUFMGR_DMA_HIGH_WATER,
2082              tp->bufmgr_config.dma_high_water);
2083
2084         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
2085         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2086                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
2087         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2088             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
2089             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
2090                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
2091         tw32(BUFMGR_MODE, val);
2092         for (i = 0; i < 2000; i++) {
2093                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
2094                         break;
2095                 udelay(10);
2096         }
2097         if (i >= 2000) {
2098                 DBGC(tp->dev, "%s cannot enable BUFMGR\n", __func__);
2099                 return -ENODEV;
2100         }
2101
2102         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
2103                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
2104
2105         tg3_setup_rxbd_thresholds(tp);
2106
2107         /* Initialize TG3_BDINFO's at:
2108          *  RCVDBDI_STD_BD:     standard eth size rx ring
2109          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
2110          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
2111          *
2112          * like so:
2113          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
2114          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
2115          *                              ring attribute flags
2116          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
2117          *
2118          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
2119          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
2120          *
2121          * The size of each ring is fixed in the firmware, but the location is
2122          * configurable.
2123          */
2124         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
2125              ((u64) tpr->rx_std_mapping >> 32));
2126         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
2127              ((u64) tpr->rx_std_mapping & 0xffffffff));
2128         if (!tg3_flag(tp, 5717_PLUS))
2129                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
2130                      NIC_SRAM_RX_BUFFER_DESC);
2131
2132         /* Disable the mini ring */
2133         if (!tg3_flag(tp, 5705_PLUS))
2134                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
2135                      BDINFO_FLAGS_DISABLED);
2136
2137         val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
2138
2139         if (tg3_flag(tp, 57765_PLUS))
2140                 val |= (RX_STD_MAX_SIZE << 2);
2141
2142         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
2143
2144         tpr->rx_std_prod_idx = 0;
2145
2146         /* std prod index is updated by tg3_refill_prod_ring() */
2147         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, 0);
2148         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, 0);
2149
2150         tg3_rings_reset(tp);
2151
2152         __tg3_set_mac_addr(tp,0);
2153
2154 #define TG3_MAX_MTU     1522
2155         /* MTU + ethernet header + FCS + optional VLAN tag */
2156         tw32(MAC_RX_MTU_SIZE, TG3_MAX_MTU);
2157
2158         /* The slot time is changed by tg3_setup_phy if we
2159          * run at gigabit with half duplex.
2160          */
2161         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2162               (6 << TX_LENGTHS_IPG_SHIFT) |
2163               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
2164
2165         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
2166                 val |= tr32(MAC_TX_LENGTHS) &
2167                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
2168                         TX_LENGTHS_CNT_DWN_VAL_MSK);
2169
2170         tw32(MAC_TX_LENGTHS, val);
2171
2172         /* Receive rules. */
2173         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
2174         tw32(RCVLPC_CONFIG, 0x0181);
2175
2176         /* Calculate RDMAC_MODE setting early, we need it to determine
2177          * the RCVLPC_STATE_ENABLE mask.
2178          */
2179         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
2180                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
2181                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
2182                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
2183                       RDMAC_MODE_LNGREAD_ENAB);
2184
2185         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
2186                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
2187
2188         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
2189             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
2190             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
2191                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
2192                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
2193                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
2194
2195         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
2196             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
2197                 if (tg3_flag(tp, TSO_CAPABLE) &&
2198                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2199                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
2200                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
2201                            !tg3_flag(tp, IS_5788)) {
2202                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
2203                 }
2204         }
2205
2206         if (tg3_flag(tp, PCI_EXPRESS))
2207                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
2208
2209         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
2210                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
2211
2212         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
2213             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
2214             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
2215             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
2216             tg3_flag(tp, 57765_PLUS)) {
2217                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
2218                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2219                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2220                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
2221                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
2222                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
2223                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
2224                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
2225                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
2226                 }
2227                 tw32(TG3_RDMA_RSRVCTRL_REG,
2228                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
2229         }
2230
2231         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2232             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2233                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
2234                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
2235                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
2236                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
2237         }
2238
2239         /* Receive/send statistics. */
2240         if (tg3_flag(tp, 5750_PLUS)) {
2241                 val = tr32(RCVLPC_STATS_ENABLE);
2242                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
2243                 tw32(RCVLPC_STATS_ENABLE, val);
2244         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
2245                    tg3_flag(tp, TSO_CAPABLE)) {
2246                 val = tr32(RCVLPC_STATS_ENABLE);
2247                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
2248                 tw32(RCVLPC_STATS_ENABLE, val);
2249         } else {
2250                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
2251         }
2252         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
2253         tw32(SNDDATAI_STATSENAB, 0xffffff);
2254         tw32(SNDDATAI_STATSCTRL,
2255              (SNDDATAI_SCTRL_ENABLE |
2256               SNDDATAI_SCTRL_FASTUPD));
2257
2258         /* Setup host coalescing engine. */
2259         tw32(HOSTCC_MODE, 0);
2260         for (i = 0; i < 2000; i++) {
2261                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
2262                         break;
2263                 udelay(10);
2264         }
2265
2266         __tg3_set_coalesce(tp);
2267
2268         if (!tg3_flag(tp, 5705_PLUS)) {
2269                 /* Status/statistics block address.  See tg3_timer,
2270                  * the tg3_periodic_fetch_stats call there, and
2271                  * tg3_get_stats to see how this works for 5705/5750 chips.
2272                  * NOTE: stats block removed for iPXE
2273                  */
2274                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
2275
2276                 /* Clear statistics and status block memory areas */
2277                 for (i = NIC_SRAM_STATS_BLK;
2278                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
2279                      i += sizeof(u32)) {
2280                         tg3_write_mem(tp, i, 0);
2281                         udelay(40);
2282                 }
2283         }
2284
2285         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
2286
2287         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
2288         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
2289         if (!tg3_flag(tp, 5705_PLUS))
2290                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
2291
2292         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
2293                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
2294                 /* reset to prevent losing 1st rx packet intermittently */
2295                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
2296                 udelay(10);
2297         }
2298
2299         if (tg3_flag(tp, ENABLE_APE))
2300                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
2301         else
2302                 tp->mac_mode = 0;
2303         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
2304                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
2305         if (!tg3_flag(tp, 5705_PLUS) &&
2306             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
2307             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
2308                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2309         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
2310         udelay(40);
2311
2312         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
2313          * If TG3_FLAG_IS_NIC is zero, we should read the
2314          * register to preserve the GPIO settings for LOMs. The GPIOs,
2315          * whether used as inputs or outputs, are set by boot code after
2316          * reset.
2317          */
2318         if (!tg3_flag(tp, IS_NIC)) {
2319                 u32 gpio_mask;
2320
2321                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
2322                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
2323                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
2324
2325                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
2326                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
2327                                      GRC_LCLCTRL_GPIO_OUTPUT3;
2328
2329                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
2330                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
2331
2332                 tp->grc_local_ctrl &= ~gpio_mask;
2333                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
2334
2335                 /* GPIO1 must be driven high for eeprom write protect */
2336                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
2337                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
2338                                                GRC_LCLCTRL_GPIO_OUTPUT1);
2339         }
2340         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
2341         udelay(100);
2342
2343         if (!tg3_flag(tp, 5705_PLUS)) {
2344                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
2345                 udelay(40);
2346         }
2347
2348         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
2349                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
2350                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
2351                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
2352                WDMAC_MODE_LNGREAD_ENAB);
2353
2354         /* Enable host coalescing bug fix */
2355         if (tg3_flag(tp, 5755_PLUS))
2356                 val |= WDMAC_MODE_STATUS_TAG_FIX;
2357
2358         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
2359                 val |= WDMAC_MODE_BURST_ALL_DATA;
2360
2361         tw32_f(WDMAC_MODE, val);
2362         udelay(40);
2363
2364         if (tg3_flag(tp, PCIX_MODE)) {
2365                 u16 pcix_cmd;
2366
2367                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
2368                                      &pcix_cmd);
2369                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
2370                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
2371                         pcix_cmd |= PCI_X_CMD_READ_2K;
2372                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2373                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
2374                         pcix_cmd |= PCI_X_CMD_READ_2K;
2375                 }
2376                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
2377                                       pcix_cmd);
2378         }
2379
2380         tw32_f(RDMAC_MODE, rdmac_mode);
2381         udelay(40);
2382
2383         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
2384         if (!tg3_flag(tp, 5705_PLUS))
2385                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
2386
2387         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
2388                 tw32(SNDDATAC_MODE,
2389                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
2390         else
2391                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
2392
2393         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
2394         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
2395         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
2396         if (tg3_flag(tp, LRG_PROD_RING_CAP))
2397                 val |= RCVDBDI_MODE_LRG_RING_SZ;
2398         tw32(RCVDBDI_MODE, val);
2399         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
2400
2401         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
2402         if (tg3_flag(tp, ENABLE_TSS))
2403                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
2404         tw32(SNDBDI_MODE, val);
2405         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
2406
2407
2408         /* FIXME: 5701 firmware fix? */
2409 #if 0
2410         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
2411                 err = tg3_load_5701_a0_firmware_fix(tp);
2412                 if (err)
2413                         return err;
2414         }
2415 #endif
2416
2417         tp->tx_mode = TX_MODE_ENABLE;
2418
2419         if (tg3_flag(tp, 5755_PLUS) ||
2420             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
2421                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
2422
2423         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2424                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
2425                 tp->tx_mode &= ~val;
2426                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
2427         }
2428
2429         tw32_f(MAC_TX_MODE, tp->tx_mode);
2430         udelay(100);
2431
2432         tp->rx_mode = RX_MODE_ENABLE;
2433
2434         tw32_f(MAC_RX_MODE, tp->rx_mode);
2435         udelay(10);
2436
2437         tw32(MAC_LED_CTRL, tp->led_ctrl);
2438
2439         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2440         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2441                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
2442                 udelay(10);
2443         }
2444         tw32_f(MAC_RX_MODE, tp->rx_mode);
2445         udelay(10);
2446
2447         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2448                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
2449                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
2450                         /* Set drive transmission level to 1.2V  */
2451                         /* only if the signal pre-emphasis bit is not set  */
2452                         val = tr32(MAC_SERDES_CFG);
2453                         val &= 0xfffff000;
2454                         val |= 0x880;
2455                         tw32(MAC_SERDES_CFG, val);
2456                 }
2457                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
2458                         tw32(MAC_SERDES_CFG, 0x616000);
2459         }
2460
2461         /* Prevent chip from dropping frames when flow control
2462          * is enabled.
2463          */
2464         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2465                 val = 1;
2466         else
2467                 val = 2;
2468         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
2469
2470         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
2471             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2472                 /* Use hardware link auto-negotiation */
2473                 tg3_flag_set(tp, HW_AUTONEG);
2474         }
2475
2476         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
2477             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2478                 u32 tmp;
2479
2480                 tmp = tr32(SERDES_RX_CTRL);
2481                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
2482                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
2483                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
2484                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
2485         }
2486
2487         err = tg3_setup_phy(tp, 0);
2488         if (err)
2489                 return err;
2490
2491         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
2492             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2493                 u32 tmp;
2494
2495                 /* Clear CRC stats. */
2496                 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
2497                         tg3_writephy(tp, MII_TG3_TEST1,
2498                                      tmp | MII_TG3_TEST1_CRC_EN);
2499                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
2500                 }
2501         }
2502
2503         __tg3_set_rx_mode(tp->dev);
2504
2505         /* Initialize receive rules. */
2506         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
2507         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
2508         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
2509         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
2510
2511         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
2512                 limit = 8;
2513         else
2514                 limit = 16;
2515         if (tg3_flag(tp, ENABLE_ASF))
2516                 limit -= 4;
2517         switch (limit) {
2518         case 16:
2519                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
2520         case 15:
2521                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
2522         case 14:
2523                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
2524         case 13:
2525                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
2526         case 12:
2527                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
2528         case 11:
2529                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
2530         case 10:
2531                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
2532         case 9:
2533                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
2534         case 8:
2535                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
2536         case 7:
2537                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
2538         case 6:
2539                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
2540         case 5:
2541                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
2542         case 4:
2543                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
2544         case 3:
2545                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
2546         case 2:
2547         case 1:
2548
2549         default:
2550                 break;
2551         }
2552
2553         return 0;
2554 }
2555
2556 /* Called at device open time to get the chip ready for
2557  * packet processing.  Invoked with tp->lock held.
2558  */
2559 int tg3_init_hw(struct tg3 *tp, int reset_phy)
2560 {       DBGP("%s\n", __func__);
2561
2562         tg3_switch_clocks(tp);
2563
2564         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
2565
2566         return tg3_reset_hw(tp, reset_phy);
2567 }
2568
2569 void tg3_set_txd(struct tg3 *tp, int entry,
2570                         dma_addr_t mapping, int len, u32 flags)
2571 {       DBGP("%s\n", __func__);
2572
2573         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
2574
2575         txd->addr_hi = ((u64) mapping >> 32);
2576         txd->addr_lo = ((u64) mapping & 0xffffffff);
2577         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
2578         txd->vlan_tag = 0;
2579 }
2580
2581 int tg3_do_test_dma(struct tg3 *tp, u32 __unused *buf, dma_addr_t buf_dma, int size, int to_device)
2582 {       DBGP("%s\n", __func__);
2583
2584         struct tg3_internal_buffer_desc test_desc;
2585         u32 sram_dma_descs;
2586         int ret;
2587         unsigned int i;
2588
2589         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
2590
2591         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
2592         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
2593         tw32(RDMAC_STATUS, 0);
2594         tw32(WDMAC_STATUS, 0);
2595
2596         tw32(BUFMGR_MODE, 0);
2597         tw32(FTQ_RESET, 0);
2598
2599         test_desc.addr_hi = ((u64) buf_dma) >> 32;
2600         test_desc.addr_lo = buf_dma & 0xffffffff;
2601         test_desc.nic_mbuf = 0x00002100;
2602         test_desc.len = size;
2603
2604         /*
2605          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
2606          * the *second* time the tg3 driver was getting loaded after an
2607          * initial scan.
2608          *
2609          * Broadcom tells me:
2610          *   ...the DMA engine is connected to the GRC block and a DMA
2611          *   reset may affect the GRC block in some unpredictable way...
2612          *   The behavior of resets to individual blocks has not been tested.
2613          *
2614          * Broadcom noted the GRC reset will also reset all sub-components.
2615          */
2616         if (to_device) {
2617                 test_desc.cqid_sqid = (13 << 8) | 2;
2618
2619                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
2620                 udelay(40);
2621         } else {
2622                 test_desc.cqid_sqid = (16 << 8) | 7;
2623
2624                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
2625                 udelay(40);
2626         }
2627         test_desc.flags = 0x00000005;
2628
2629         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
2630                 u32 val;
2631
2632                 val = *(((u32 *)&test_desc) + i);
2633                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
2634                                        sram_dma_descs + (i * sizeof(u32)));
2635                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
2636         }
2637         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
2638
2639         if (to_device)
2640                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
2641         else
2642                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
2643
2644         ret = -ENODEV;
2645         for (i = 0; i < 40; i++) {
2646                 u32 val;
2647
2648                 if (to_device)
2649                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
2650                 else
2651                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
2652                 if ((val & 0xffff) == sram_dma_descs) {
2653                         ret = 0;
2654                         break;
2655                 }
2656
2657                 udelay(100);
2658         }
2659
2660         return ret;
2661 }