2 * Driver for (BCM4706)? GBit MAC core on BCMA bus.
4 * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
6 * Licensed under the GNU/GPL. See COPYING for details.
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/delay.h>
14 #include <linux/etherdevice.h>
15 #include <linux/mii.h>
16 #include <linux/phy.h>
17 #include <linux/phy_fixed.h>
18 #include <linux/interrupt.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/bcm47xx_nvram.h>
22 static const struct bcma_device_id bgmac_bcma_tbl[] = {
23 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
24 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
27 MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl);
29 static bool bgmac_wait_value(struct bcma_device *core, u16 reg, u32 mask,
30 u32 value, int timeout)
35 for (i = 0; i < timeout / 10; i++) {
36 val = bcma_read32(core, reg);
37 if ((val & mask) == value)
41 pr_err("Timeout waiting for reg 0x%X\n", reg);
45 /**************************************************
47 **************************************************/
49 static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
57 /* Suspend DMA TX ring first.
58 * bgmac_wait_value doesn't support waiting for any of few values, so
59 * implement whole loop here.
61 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL,
62 BGMAC_DMA_TX_SUSPEND);
63 for (i = 0; i < 10000 / 10; i++) {
64 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
65 val &= BGMAC_DMA_TX_STAT;
66 if (val == BGMAC_DMA_TX_STAT_DISABLED ||
67 val == BGMAC_DMA_TX_STAT_IDLEWAIT ||
68 val == BGMAC_DMA_TX_STAT_STOPPED) {
75 bgmac_err(bgmac, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n",
76 ring->mmio_base, val);
78 /* Remove SUSPEND bit */
79 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0);
80 if (!bgmac_wait_value(bgmac->core,
81 ring->mmio_base + BGMAC_DMA_TX_STATUS,
82 BGMAC_DMA_TX_STAT, BGMAC_DMA_TX_STAT_DISABLED,
84 bgmac_warn(bgmac, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n",
87 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
88 if ((val & BGMAC_DMA_TX_STAT) != BGMAC_DMA_TX_STAT_DISABLED)
89 bgmac_err(bgmac, "Reset of DMA TX ring 0x%X failed\n",
94 static void bgmac_dma_tx_enable(struct bgmac *bgmac,
95 struct bgmac_dma_ring *ring)
99 ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL);
100 if (bgmac->core->id.rev >= 4) {
101 ctl &= ~BGMAC_DMA_TX_BL_MASK;
102 ctl |= BGMAC_DMA_TX_BL_128 << BGMAC_DMA_TX_BL_SHIFT;
104 ctl &= ~BGMAC_DMA_TX_MR_MASK;
105 ctl |= BGMAC_DMA_TX_MR_2 << BGMAC_DMA_TX_MR_SHIFT;
107 ctl &= ~BGMAC_DMA_TX_PC_MASK;
108 ctl |= BGMAC_DMA_TX_PC_16 << BGMAC_DMA_TX_PC_SHIFT;
110 ctl &= ~BGMAC_DMA_TX_PT_MASK;
111 ctl |= BGMAC_DMA_TX_PT_8 << BGMAC_DMA_TX_PT_SHIFT;
113 ctl |= BGMAC_DMA_TX_ENABLE;
114 ctl |= BGMAC_DMA_TX_PARITY_DISABLE;
115 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl);
119 bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
120 int i, int len, u32 ctl0)
122 struct bgmac_slot_info *slot;
123 struct bgmac_dma_desc *dma_desc;
126 if (i == BGMAC_TX_RING_SLOTS - 1)
127 ctl0 |= BGMAC_DESC_CTL0_EOT;
129 ctl1 = len & BGMAC_DESC_CTL1_LEN;
131 slot = &ring->slots[i];
132 dma_desc = &ring->cpu_base[i];
133 dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr));
134 dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr));
135 dma_desc->ctl0 = cpu_to_le32(ctl0);
136 dma_desc->ctl1 = cpu_to_le32(ctl1);
139 static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
140 struct bgmac_dma_ring *ring,
143 struct device *dma_dev = bgmac->core->dma_dev;
144 struct net_device *net_dev = bgmac->net_dev;
145 int index = ring->end % BGMAC_TX_RING_SLOTS;
146 struct bgmac_slot_info *slot = &ring->slots[index];
151 if (skb->len > BGMAC_DESC_CTL1_LEN) {
152 bgmac_err(bgmac, "Too long skb (%d)\n", skb->len);
156 if (skb->ip_summed == CHECKSUM_PARTIAL)
157 skb_checksum_help(skb);
159 nr_frags = skb_shinfo(skb)->nr_frags;
161 /* ring->end - ring->start will return the number of valid slots,
162 * even when ring->end overflows
164 if (ring->end - ring->start + nr_frags + 1 >= BGMAC_TX_RING_SLOTS) {
165 bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n");
166 netif_stop_queue(net_dev);
167 return NETDEV_TX_BUSY;
170 slot->dma_addr = dma_map_single(dma_dev, skb->data, skb_headlen(skb),
172 if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr)))
175 flags = BGMAC_DESC_CTL0_SOF;
177 flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC;
179 bgmac_dma_tx_add_buf(bgmac, ring, index, skb_headlen(skb), flags);
182 for (i = 0; i < nr_frags; i++) {
183 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
184 int len = skb_frag_size(frag);
186 index = (index + 1) % BGMAC_TX_RING_SLOTS;
187 slot = &ring->slots[index];
188 slot->dma_addr = skb_frag_dma_map(dma_dev, frag, 0,
190 if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr)))
193 if (i == nr_frags - 1)
194 flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC;
196 bgmac_dma_tx_add_buf(bgmac, ring, index, len, flags);
200 ring->end += nr_frags + 1;
201 netdev_sent_queue(net_dev, skb->len);
205 /* Increase ring->end to point empty slot. We tell hardware the first
206 * slot it should *not* read.
208 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
210 (ring->end % BGMAC_TX_RING_SLOTS) *
211 sizeof(struct bgmac_dma_desc));
213 if (ring->end - ring->start >= BGMAC_TX_RING_SLOTS - 8)
214 netif_stop_queue(net_dev);
219 dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb),
223 int index = (ring->end + i) % BGMAC_TX_RING_SLOTS;
224 struct bgmac_slot_info *slot = &ring->slots[index];
225 u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1);
226 int len = ctl1 & BGMAC_DESC_CTL1_LEN;
228 dma_unmap_page(dma_dev, slot->dma_addr, len, DMA_TO_DEVICE);
232 bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n",
240 /* Free transmitted packets */
241 static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
243 struct device *dma_dev = bgmac->core->dma_dev;
246 unsigned bytes_compl = 0, pkts_compl = 0;
248 /* The last slot that hardware didn't consume yet */
249 empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
250 empty_slot &= BGMAC_DMA_TX_STATDPTR;
251 empty_slot -= ring->index_base;
252 empty_slot &= BGMAC_DMA_TX_STATDPTR;
253 empty_slot /= sizeof(struct bgmac_dma_desc);
255 while (ring->start != ring->end) {
256 int slot_idx = ring->start % BGMAC_TX_RING_SLOTS;
257 struct bgmac_slot_info *slot = &ring->slots[slot_idx];
261 if (slot_idx == empty_slot)
264 ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1);
265 len = ctl1 & BGMAC_DESC_CTL1_LEN;
266 if (ctl1 & BGMAC_DESC_CTL0_SOF)
267 /* Unmap no longer used buffer */
268 dma_unmap_single(dma_dev, slot->dma_addr, len,
271 dma_unmap_page(dma_dev, slot->dma_addr, len,
275 bytes_compl += slot->skb->len;
278 /* Free memory! :) */
279 dev_kfree_skb(slot->skb);
291 netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl);
293 if (netif_queue_stopped(bgmac->net_dev))
294 netif_wake_queue(bgmac->net_dev);
297 static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
299 if (!ring->mmio_base)
302 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, 0);
303 if (!bgmac_wait_value(bgmac->core,
304 ring->mmio_base + BGMAC_DMA_RX_STATUS,
305 BGMAC_DMA_RX_STAT, BGMAC_DMA_RX_STAT_DISABLED,
307 bgmac_err(bgmac, "Reset of ring 0x%X RX failed\n",
311 static void bgmac_dma_rx_enable(struct bgmac *bgmac,
312 struct bgmac_dma_ring *ring)
316 ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
318 /* preserve ONLY bits 16-17 from current hardware value */
319 ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
321 if (bgmac->core->id.rev >= 4) {
322 ctl &= ~BGMAC_DMA_RX_BL_MASK;
323 ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT;
325 ctl &= ~BGMAC_DMA_RX_PC_MASK;
326 ctl |= BGMAC_DMA_RX_PC_8 << BGMAC_DMA_RX_PC_SHIFT;
328 ctl &= ~BGMAC_DMA_RX_PT_MASK;
329 ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT;
331 ctl |= BGMAC_DMA_RX_ENABLE;
332 ctl |= BGMAC_DMA_RX_PARITY_DISABLE;
333 ctl |= BGMAC_DMA_RX_OVERFLOW_CONT;
334 ctl |= BGMAC_RX_FRAME_OFFSET << BGMAC_DMA_RX_FRAME_OFFSET_SHIFT;
335 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, ctl);
338 static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
339 struct bgmac_slot_info *slot)
341 struct device *dma_dev = bgmac->core->dma_dev;
343 struct bgmac_rx_header *rx;
347 buf = netdev_alloc_frag(BGMAC_RX_ALLOC_SIZE);
351 /* Poison - if everything goes fine, hardware will overwrite it */
352 rx = buf + BGMAC_RX_BUF_OFFSET;
353 rx->len = cpu_to_le16(0xdead);
354 rx->flags = cpu_to_le16(0xbeef);
356 /* Map skb for the DMA */
357 dma_addr = dma_map_single(dma_dev, buf + BGMAC_RX_BUF_OFFSET,
358 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
359 if (dma_mapping_error(dma_dev, dma_addr)) {
360 bgmac_err(bgmac, "DMA mapping error\n");
361 put_page(virt_to_head_page(buf));
365 /* Update the slot */
367 slot->dma_addr = dma_addr;
372 static void bgmac_dma_rx_update_index(struct bgmac *bgmac,
373 struct bgmac_dma_ring *ring)
377 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
379 ring->end * sizeof(struct bgmac_dma_desc));
382 static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac,
383 struct bgmac_dma_ring *ring, int desc_idx)
385 struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx;
386 u32 ctl0 = 0, ctl1 = 0;
388 if (desc_idx == BGMAC_RX_RING_SLOTS - 1)
389 ctl0 |= BGMAC_DESC_CTL0_EOT;
390 ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
391 /* Is there any BGMAC device that requires extension? */
392 /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
393 * B43_DMA64_DCTL1_ADDREXT_MASK;
396 dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr));
397 dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr));
398 dma_desc->ctl0 = cpu_to_le32(ctl0);
399 dma_desc->ctl1 = cpu_to_le32(ctl1);
401 ring->end = desc_idx;
404 static void bgmac_dma_rx_poison_buf(struct device *dma_dev,
405 struct bgmac_slot_info *slot)
407 struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET;
409 dma_sync_single_for_cpu(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE,
411 rx->len = cpu_to_le16(0xdead);
412 rx->flags = cpu_to_le16(0xbeef);
413 dma_sync_single_for_device(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE,
417 static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
423 end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
424 end_slot &= BGMAC_DMA_RX_STATDPTR;
425 end_slot -= ring->index_base;
426 end_slot &= BGMAC_DMA_RX_STATDPTR;
427 end_slot /= sizeof(struct bgmac_dma_desc);
429 while (ring->start != end_slot) {
430 struct device *dma_dev = bgmac->core->dma_dev;
431 struct bgmac_slot_info *slot = &ring->slots[ring->start];
432 struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET;
434 void *buf = slot->buf;
435 dma_addr_t dma_addr = slot->dma_addr;
439 /* Prepare new skb as replacement */
440 if (bgmac_dma_rx_skb_for_slot(bgmac, slot)) {
441 bgmac_dma_rx_poison_buf(dma_dev, slot);
445 /* Unmap buffer to make it accessible to the CPU */
446 dma_unmap_single(dma_dev, dma_addr,
447 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
449 /* Get info from the header */
450 len = le16_to_cpu(rx->len);
451 flags = le16_to_cpu(rx->flags);
453 /* Check for poison and drop or pass the packet */
454 if (len == 0xdead && flags == 0xbeef) {
455 bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
457 put_page(virt_to_head_page(buf));
461 if (len > BGMAC_RX_ALLOC_SIZE) {
462 bgmac_err(bgmac, "Found oversized packet at slot %d, DMA issue!\n",
464 put_page(virt_to_head_page(buf));
471 skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE);
472 skb_put(skb, BGMAC_RX_FRAME_OFFSET +
473 BGMAC_RX_BUF_OFFSET + len);
474 skb_pull(skb, BGMAC_RX_FRAME_OFFSET +
475 BGMAC_RX_BUF_OFFSET);
477 skb_checksum_none_assert(skb);
478 skb->protocol = eth_type_trans(skb, bgmac->net_dev);
479 napi_gro_receive(&bgmac->napi, skb);
483 bgmac_dma_rx_setup_desc(bgmac, ring, ring->start);
485 if (++ring->start >= BGMAC_RX_RING_SLOTS)
488 if (handled >= weight) /* Should never be greater */
492 bgmac_dma_rx_update_index(bgmac, ring);
497 /* Does ring support unaligned addressing? */
498 static bool bgmac_dma_unaligned(struct bgmac *bgmac,
499 struct bgmac_dma_ring *ring,
500 enum bgmac_dma_ring_type ring_type)
503 case BGMAC_DMA_RING_TX:
504 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
506 if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO))
509 case BGMAC_DMA_RING_RX:
510 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
512 if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO))
519 static void bgmac_dma_tx_ring_free(struct bgmac *bgmac,
520 struct bgmac_dma_ring *ring)
522 struct device *dma_dev = bgmac->core->dma_dev;
523 struct bgmac_dma_desc *dma_desc = ring->cpu_base;
524 struct bgmac_slot_info *slot;
527 for (i = 0; i < BGMAC_TX_RING_SLOTS; i++) {
528 int len = dma_desc[i].ctl1 & BGMAC_DESC_CTL1_LEN;
530 slot = &ring->slots[i];
531 dev_kfree_skb(slot->skb);
537 dma_unmap_single(dma_dev, slot->dma_addr,
540 dma_unmap_page(dma_dev, slot->dma_addr,
545 static void bgmac_dma_rx_ring_free(struct bgmac *bgmac,
546 struct bgmac_dma_ring *ring)
548 struct device *dma_dev = bgmac->core->dma_dev;
549 struct bgmac_slot_info *slot;
552 for (i = 0; i < BGMAC_RX_RING_SLOTS; i++) {
553 slot = &ring->slots[i];
557 dma_unmap_single(dma_dev, slot->dma_addr,
560 put_page(virt_to_head_page(slot->buf));
565 static void bgmac_dma_ring_desc_free(struct bgmac *bgmac,
566 struct bgmac_dma_ring *ring,
569 struct device *dma_dev = bgmac->core->dma_dev;
575 /* Free ring of descriptors */
576 size = num_slots * sizeof(struct bgmac_dma_desc);
577 dma_free_coherent(dma_dev, size, ring->cpu_base,
581 static void bgmac_dma_cleanup(struct bgmac *bgmac)
585 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
586 bgmac_dma_tx_ring_free(bgmac, &bgmac->tx_ring[i]);
588 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
589 bgmac_dma_rx_ring_free(bgmac, &bgmac->rx_ring[i]);
592 static void bgmac_dma_free(struct bgmac *bgmac)
596 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
597 bgmac_dma_ring_desc_free(bgmac, &bgmac->tx_ring[i],
598 BGMAC_TX_RING_SLOTS);
600 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
601 bgmac_dma_ring_desc_free(bgmac, &bgmac->rx_ring[i],
602 BGMAC_RX_RING_SLOTS);
605 static int bgmac_dma_alloc(struct bgmac *bgmac)
607 struct device *dma_dev = bgmac->core->dma_dev;
608 struct bgmac_dma_ring *ring;
609 static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1,
610 BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, };
611 int size; /* ring size: different for Tx and Rx */
615 BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base));
616 BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base));
618 if (!(bcma_aread32(bgmac->core, BCMA_IOST) & BCMA_IOST_DMA64)) {
619 bgmac_err(bgmac, "Core does not report 64-bit DMA\n");
623 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
624 ring = &bgmac->tx_ring[i];
625 ring->mmio_base = ring_base[i];
627 /* Alloc ring of descriptors */
628 size = BGMAC_TX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
629 ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
632 if (!ring->cpu_base) {
633 bgmac_err(bgmac, "Allocation of TX ring 0x%X failed\n",
638 ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
641 ring->index_base = lower_32_bits(ring->dma_base);
643 ring->index_base = 0;
645 /* No need to alloc TX slots yet */
648 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
649 ring = &bgmac->rx_ring[i];
650 ring->mmio_base = ring_base[i];
652 /* Alloc ring of descriptors */
653 size = BGMAC_RX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
654 ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
657 if (!ring->cpu_base) {
658 bgmac_err(bgmac, "Allocation of RX ring 0x%X failed\n",
664 ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
667 ring->index_base = lower_32_bits(ring->dma_base);
669 ring->index_base = 0;
675 bgmac_dma_free(bgmac);
679 static int bgmac_dma_init(struct bgmac *bgmac)
681 struct bgmac_dma_ring *ring;
684 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
685 ring = &bgmac->tx_ring[i];
687 if (!ring->unaligned)
688 bgmac_dma_tx_enable(bgmac, ring);
689 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
690 lower_32_bits(ring->dma_base));
691 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
692 upper_32_bits(ring->dma_base));
694 bgmac_dma_tx_enable(bgmac, ring);
697 ring->end = 0; /* Points the slot that should *not* be read */
700 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
703 ring = &bgmac->rx_ring[i];
705 if (!ring->unaligned)
706 bgmac_dma_rx_enable(bgmac, ring);
707 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
708 lower_32_bits(ring->dma_base));
709 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
710 upper_32_bits(ring->dma_base));
712 bgmac_dma_rx_enable(bgmac, ring);
716 for (j = 0; j < BGMAC_RX_RING_SLOTS; j++) {
717 err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
721 bgmac_dma_rx_setup_desc(bgmac, ring, j);
724 bgmac_dma_rx_update_index(bgmac, ring);
730 bgmac_dma_cleanup(bgmac);
734 /**************************************************
736 **************************************************/
738 static u16 bgmac_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg)
740 struct bcma_device *core;
745 BUILD_BUG_ON(BGMAC_PA_DATA_MASK != BCMA_GMAC_CMN_PA_DATA_MASK);
746 BUILD_BUG_ON(BGMAC_PA_ADDR_MASK != BCMA_GMAC_CMN_PA_ADDR_MASK);
747 BUILD_BUG_ON(BGMAC_PA_ADDR_SHIFT != BCMA_GMAC_CMN_PA_ADDR_SHIFT);
748 BUILD_BUG_ON(BGMAC_PA_REG_MASK != BCMA_GMAC_CMN_PA_REG_MASK);
749 BUILD_BUG_ON(BGMAC_PA_REG_SHIFT != BCMA_GMAC_CMN_PA_REG_SHIFT);
750 BUILD_BUG_ON(BGMAC_PA_WRITE != BCMA_GMAC_CMN_PA_WRITE);
751 BUILD_BUG_ON(BGMAC_PA_START != BCMA_GMAC_CMN_PA_START);
752 BUILD_BUG_ON(BGMAC_PC_EPA_MASK != BCMA_GMAC_CMN_PC_EPA_MASK);
753 BUILD_BUG_ON(BGMAC_PC_MCT_MASK != BCMA_GMAC_CMN_PC_MCT_MASK);
754 BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT != BCMA_GMAC_CMN_PC_MCT_SHIFT);
755 BUILD_BUG_ON(BGMAC_PC_MTE != BCMA_GMAC_CMN_PC_MTE);
757 if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
758 core = bgmac->core->bus->drv_gmac_cmn.core;
759 phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
760 phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
763 phy_access_addr = BGMAC_PHY_ACCESS;
764 phy_ctl_addr = BGMAC_PHY_CNTL;
767 tmp = bcma_read32(core, phy_ctl_addr);
768 tmp &= ~BGMAC_PC_EPA_MASK;
770 bcma_write32(core, phy_ctl_addr, tmp);
772 tmp = BGMAC_PA_START;
773 tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
774 tmp |= reg << BGMAC_PA_REG_SHIFT;
775 bcma_write32(core, phy_access_addr, tmp);
777 if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) {
778 bgmac_err(bgmac, "Reading PHY %d register 0x%X failed\n",
783 return bcma_read32(core, phy_access_addr) & BGMAC_PA_DATA_MASK;
786 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */
787 static int bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value)
789 struct bcma_device *core;
794 if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
795 core = bgmac->core->bus->drv_gmac_cmn.core;
796 phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
797 phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
800 phy_access_addr = BGMAC_PHY_ACCESS;
801 phy_ctl_addr = BGMAC_PHY_CNTL;
804 tmp = bcma_read32(core, phy_ctl_addr);
805 tmp &= ~BGMAC_PC_EPA_MASK;
807 bcma_write32(core, phy_ctl_addr, tmp);
809 bgmac_write(bgmac, BGMAC_INT_STATUS, BGMAC_IS_MDIO);
810 if (bgmac_read(bgmac, BGMAC_INT_STATUS) & BGMAC_IS_MDIO)
811 bgmac_warn(bgmac, "Error setting MDIO int\n");
813 tmp = BGMAC_PA_START;
814 tmp |= BGMAC_PA_WRITE;
815 tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
816 tmp |= reg << BGMAC_PA_REG_SHIFT;
818 bcma_write32(core, phy_access_addr, tmp);
820 if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) {
821 bgmac_err(bgmac, "Writing to PHY %d register 0x%X failed\n",
829 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */
830 static void bgmac_phy_init(struct bgmac *bgmac)
832 struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
833 struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
836 if (ci->id == BCMA_CHIP_ID_BCM5356) {
837 for (i = 0; i < 5; i++) {
838 bgmac_phy_write(bgmac, i, 0x1f, 0x008b);
839 bgmac_phy_write(bgmac, i, 0x15, 0x0100);
840 bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
841 bgmac_phy_write(bgmac, i, 0x12, 0x2aaa);
842 bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
845 if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) ||
846 (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) ||
847 (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) {
848 bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0);
849 bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0);
850 for (i = 0; i < 5; i++) {
851 bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
852 bgmac_phy_write(bgmac, i, 0x16, 0x5284);
853 bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
854 bgmac_phy_write(bgmac, i, 0x17, 0x0010);
855 bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
856 bgmac_phy_write(bgmac, i, 0x16, 0x5296);
857 bgmac_phy_write(bgmac, i, 0x17, 0x1073);
858 bgmac_phy_write(bgmac, i, 0x17, 0x9073);
859 bgmac_phy_write(bgmac, i, 0x16, 0x52b6);
860 bgmac_phy_write(bgmac, i, 0x17, 0x9273);
861 bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
866 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */
867 static void bgmac_phy_reset(struct bgmac *bgmac)
869 if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
872 bgmac_phy_write(bgmac, bgmac->phyaddr, MII_BMCR, BMCR_RESET);
874 if (bgmac_phy_read(bgmac, bgmac->phyaddr, MII_BMCR) & BMCR_RESET)
875 bgmac_err(bgmac, "PHY reset failed\n");
876 bgmac_phy_init(bgmac);
879 /**************************************************
881 **************************************************/
883 /* TODO: can we just drop @force? Can we don't reset MAC at all if there is
884 * nothing to change? Try if after stabilizng driver.
886 static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,
889 u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
890 u32 new_val = (cmdcfg & mask) | set;
892 bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR(bgmac->core->id.rev));
895 if (new_val != cmdcfg || force)
896 bgmac_write(bgmac, BGMAC_CMDCFG, new_val);
898 bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR(bgmac->core->id.rev));
902 static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr)
906 tmp = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
907 bgmac_write(bgmac, BGMAC_MACADDR_HIGH, tmp);
908 tmp = (addr[4] << 8) | addr[5];
909 bgmac_write(bgmac, BGMAC_MACADDR_LOW, tmp);
912 static void bgmac_set_rx_mode(struct net_device *net_dev)
914 struct bgmac *bgmac = netdev_priv(net_dev);
916 if (net_dev->flags & IFF_PROMISC)
917 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_PROM, true);
919 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_PROM, 0, true);
922 #if 0 /* We don't use that regs yet */
923 static void bgmac_chip_stats_update(struct bgmac *bgmac)
927 if (bgmac->core->id.id != BCMA_CORE_4706_MAC_GBIT) {
928 for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
929 bgmac->mib_tx_regs[i] =
931 BGMAC_TX_GOOD_OCTETS + (i * 4));
932 for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
933 bgmac->mib_rx_regs[i] =
935 BGMAC_RX_GOOD_OCTETS + (i * 4));
938 /* TODO: what else? how to handle BCM4706? Specs are needed */
942 static void bgmac_clear_mib(struct bgmac *bgmac)
946 if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT)
949 bgmac_set(bgmac, BGMAC_DEV_CTL, BGMAC_DC_MROR);
950 for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
951 bgmac_read(bgmac, BGMAC_TX_GOOD_OCTETS + (i * 4));
952 for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
953 bgmac_read(bgmac, BGMAC_RX_GOOD_OCTETS + (i * 4));
956 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */
957 static void bgmac_mac_speed(struct bgmac *bgmac)
959 u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD);
962 switch (bgmac->mac_speed) {
964 set |= BGMAC_CMDCFG_ES_10;
967 set |= BGMAC_CMDCFG_ES_100;
970 set |= BGMAC_CMDCFG_ES_1000;
973 set |= BGMAC_CMDCFG_ES_2500;
976 bgmac_err(bgmac, "Unsupported speed: %d\n", bgmac->mac_speed);
979 if (bgmac->mac_duplex == DUPLEX_HALF)
980 set |= BGMAC_CMDCFG_HD;
982 bgmac_cmdcfg_maskset(bgmac, mask, set, true);
985 static void bgmac_miiconfig(struct bgmac *bgmac)
987 struct bcma_device *core = bgmac->core;
988 struct bcma_chipinfo *ci = &core->bus->chipinfo;
991 if (ci->id == BCMA_CHIP_ID_BCM4707 ||
992 ci->id == BCMA_CHIP_ID_BCM53018) {
993 bcma_awrite32(core, BCMA_IOCTL,
994 bcma_aread32(core, BCMA_IOCTL) | 0x40 |
995 BGMAC_BCMA_IOCTL_SW_CLKEN);
996 bgmac->mac_speed = SPEED_2500;
997 bgmac->mac_duplex = DUPLEX_FULL;
998 bgmac_mac_speed(bgmac);
1000 imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) &
1001 BGMAC_DS_MM_MASK) >> BGMAC_DS_MM_SHIFT;
1002 if (imode == 0 || imode == 1) {
1003 bgmac->mac_speed = SPEED_100;
1004 bgmac->mac_duplex = DUPLEX_FULL;
1005 bgmac_mac_speed(bgmac);
1010 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */
1011 static void bgmac_chip_reset(struct bgmac *bgmac)
1013 struct bcma_device *core = bgmac->core;
1014 struct bcma_bus *bus = core->bus;
1015 struct bcma_chipinfo *ci = &bus->chipinfo;
1020 if (bcma_core_is_enabled(core)) {
1021 if (!bgmac->stats_grabbed) {
1022 /* bgmac_chip_stats_update(bgmac); */
1023 bgmac->stats_grabbed = true;
1026 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
1027 bgmac_dma_tx_reset(bgmac, &bgmac->tx_ring[i]);
1029 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
1032 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
1033 bgmac_dma_rx_reset(bgmac, &bgmac->rx_ring[i]);
1035 /* TODO: Clear software multicast filter list */
1038 iost = bcma_aread32(core, BCMA_IOST);
1039 if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
1040 (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) ||
1041 (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188))
1042 iost &= ~BGMAC_BCMA_IOST_ATTACHED;
1044 /* 3GMAC: for BCM4707, only do core reset at bgmac_probe() */
1045 if (ci->id != BCMA_CHIP_ID_BCM4707) {
1047 if (iost & BGMAC_BCMA_IOST_ATTACHED) {
1048 flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
1049 if (!bgmac->has_robosw)
1050 flags |= BGMAC_BCMA_IOCTL_SW_RESET;
1052 bcma_core_enable(core, flags);
1055 /* Request Misc PLL for corerev > 2 */
1056 if (core->id.rev > 2 &&
1057 ci->id != BCMA_CHIP_ID_BCM4707 &&
1058 ci->id != BCMA_CHIP_ID_BCM53018) {
1059 bgmac_set(bgmac, BCMA_CLKCTLST,
1060 BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ);
1061 bgmac_wait_value(bgmac->core, BCMA_CLKCTLST,
1062 BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
1063 BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
1067 if (ci->id == BCMA_CHIP_ID_BCM5357 ||
1068 ci->id == BCMA_CHIP_ID_BCM4749 ||
1069 ci->id == BCMA_CHIP_ID_BCM53572) {
1070 struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
1072 u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
1073 BGMAC_CHIPCTL_1_IF_TYPE_MII;
1076 if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
1077 if (kstrtou8(buf, 0, &et_swtype))
1078 bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n",
1082 sw_type = et_swtype;
1083 } else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM5358) {
1084 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII;
1085 } else if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
1086 (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) ||
1087 (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) {
1088 sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII |
1089 BGMAC_CHIPCTL_1_SW_TYPE_RGMII;
1091 bcma_chipco_chipctl_maskset(cc, 1,
1092 ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK |
1093 BGMAC_CHIPCTL_1_SW_TYPE_MASK),
1097 if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
1098 bcma_awrite32(core, BCMA_IOCTL,
1099 bcma_aread32(core, BCMA_IOCTL) &
1100 ~BGMAC_BCMA_IOCTL_SW_RESET);
1102 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset
1103 * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine
1104 * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to
1105 * be keps until taking MAC out of the reset.
1107 bgmac_cmdcfg_maskset(bgmac,
1119 BGMAC_CMDCFG_PAD_EN |
1124 BGMAC_CMDCFG_SR(core->id.rev),
1126 bgmac->mac_speed = SPEED_UNKNOWN;
1127 bgmac->mac_duplex = DUPLEX_UNKNOWN;
1129 bgmac_clear_mib(bgmac);
1130 if (core->id.id == BCMA_CORE_4706_MAC_GBIT)
1131 bcma_maskset32(bgmac->cmn, BCMA_GMAC_CMN_PHY_CTL, ~0,
1132 BCMA_GMAC_CMN_PC_MTE);
1134 bgmac_set(bgmac, BGMAC_PHY_CNTL, BGMAC_PC_MTE);
1135 bgmac_miiconfig(bgmac);
1136 bgmac_phy_init(bgmac);
1138 netdev_reset_queue(bgmac->net_dev);
1141 static void bgmac_chip_intrs_on(struct bgmac *bgmac)
1143 bgmac_write(bgmac, BGMAC_INT_MASK, bgmac->int_mask);
1146 static void bgmac_chip_intrs_off(struct bgmac *bgmac)
1148 bgmac_write(bgmac, BGMAC_INT_MASK, 0);
1149 bgmac_read(bgmac, BGMAC_INT_MASK);
1152 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */
1153 static void bgmac_enable(struct bgmac *bgmac)
1155 struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
1163 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
1164 bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE),
1165 BGMAC_CMDCFG_SR(bgmac->core->id.rev), true);
1167 cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;
1168 bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);
1170 mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
1172 if (ci->id != BCMA_CHIP_ID_BCM47162 || mode != 0)
1173 bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
1174 if (ci->id == BCMA_CHIP_ID_BCM47162 && mode == 2)
1175 bcma_chipco_chipctl_maskset(&bgmac->core->bus->drv_cc, 1, ~0,
1176 BGMAC_CHIPCTL_1_RXC_DLL_BYPASS);
1179 case BCMA_CHIP_ID_BCM5357:
1180 case BCMA_CHIP_ID_BCM4749:
1181 case BCMA_CHIP_ID_BCM53572:
1182 case BCMA_CHIP_ID_BCM4716:
1183 case BCMA_CHIP_ID_BCM47162:
1184 fl_ctl = 0x03cb04cb;
1185 if (ci->id == BCMA_CHIP_ID_BCM5357 ||
1186 ci->id == BCMA_CHIP_ID_BCM4749 ||
1187 ci->id == BCMA_CHIP_ID_BCM53572)
1189 bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl);
1190 bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff);
1194 if (ci->id != BCMA_CHIP_ID_BCM4707 &&
1195 ci->id != BCMA_CHIP_ID_BCM53018) {
1196 rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
1197 rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
1198 bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) /
1200 mdp = (bp_clk * 128 / 1000) - 3;
1201 rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
1202 bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
1206 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */
1207 static void bgmac_chip_init(struct bgmac *bgmac)
1209 /* 1 interrupt per received frame */
1210 bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT);
1212 /* Enable 802.3x tx flow control (honor received PAUSE frames) */
1213 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_RPI, 0, true);
1215 bgmac_set_rx_mode(bgmac->net_dev);
1217 bgmac_write_mac_address(bgmac, bgmac->net_dev->dev_addr);
1219 if (bgmac->loopback)
1220 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
1222 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, false);
1224 bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);
1226 bgmac_chip_intrs_on(bgmac);
1228 bgmac_enable(bgmac);
1231 static irqreturn_t bgmac_interrupt(int irq, void *dev_id)
1233 struct bgmac *bgmac = netdev_priv(dev_id);
1235 u32 int_status = bgmac_read(bgmac, BGMAC_INT_STATUS);
1236 int_status &= bgmac->int_mask;
1241 int_status &= ~(BGMAC_IS_TX0 | BGMAC_IS_RX);
1243 bgmac_err(bgmac, "Unknown IRQs: 0x%08X\n", int_status);
1245 /* Disable new interrupts until handling existing ones */
1246 bgmac_chip_intrs_off(bgmac);
1248 napi_schedule(&bgmac->napi);
1253 static int bgmac_poll(struct napi_struct *napi, int weight)
1255 struct bgmac *bgmac = container_of(napi, struct bgmac, napi);
1259 bgmac_write(bgmac, BGMAC_INT_STATUS, ~0);
1261 bgmac_dma_tx_free(bgmac, &bgmac->tx_ring[0]);
1262 handled += bgmac_dma_rx_read(bgmac, &bgmac->rx_ring[0], weight);
1264 /* Poll again if more events arrived in the meantime */
1265 if (bgmac_read(bgmac, BGMAC_INT_STATUS) & (BGMAC_IS_TX0 | BGMAC_IS_RX))
1268 if (handled < weight) {
1269 napi_complete(napi);
1270 bgmac_chip_intrs_on(bgmac);
1276 /**************************************************
1278 **************************************************/
1280 static int bgmac_open(struct net_device *net_dev)
1282 struct bgmac *bgmac = netdev_priv(net_dev);
1285 bgmac_chip_reset(bgmac);
1287 err = bgmac_dma_init(bgmac);
1291 /* Specs say about reclaiming rings here, but we do that in DMA init */
1292 bgmac_chip_init(bgmac);
1294 err = request_irq(bgmac->core->irq, bgmac_interrupt, IRQF_SHARED,
1295 KBUILD_MODNAME, net_dev);
1297 bgmac_err(bgmac, "IRQ request error: %d!\n", err);
1298 bgmac_dma_cleanup(bgmac);
1301 napi_enable(&bgmac->napi);
1303 phy_start(bgmac->phy_dev);
1305 netif_carrier_on(net_dev);
1309 static int bgmac_stop(struct net_device *net_dev)
1311 struct bgmac *bgmac = netdev_priv(net_dev);
1313 netif_carrier_off(net_dev);
1315 phy_stop(bgmac->phy_dev);
1317 napi_disable(&bgmac->napi);
1318 bgmac_chip_intrs_off(bgmac);
1319 free_irq(bgmac->core->irq, net_dev);
1321 bgmac_chip_reset(bgmac);
1322 bgmac_dma_cleanup(bgmac);
1327 static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb,
1328 struct net_device *net_dev)
1330 struct bgmac *bgmac = netdev_priv(net_dev);
1331 struct bgmac_dma_ring *ring;
1333 /* No QOS support yet */
1334 ring = &bgmac->tx_ring[0];
1335 return bgmac_dma_tx_add(bgmac, ring, skb);
1338 static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
1340 struct bgmac *bgmac = netdev_priv(net_dev);
1343 ret = eth_prepare_mac_addr_change(net_dev, addr);
1346 bgmac_write_mac_address(bgmac, (u8 *)addr);
1347 eth_commit_mac_addr_change(net_dev, addr);
1351 static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1353 struct bgmac *bgmac = netdev_priv(net_dev);
1355 if (!netif_running(net_dev))
1358 return phy_mii_ioctl(bgmac->phy_dev, ifr, cmd);
1361 static const struct net_device_ops bgmac_netdev_ops = {
1362 .ndo_open = bgmac_open,
1363 .ndo_stop = bgmac_stop,
1364 .ndo_start_xmit = bgmac_start_xmit,
1365 .ndo_set_rx_mode = bgmac_set_rx_mode,
1366 .ndo_set_mac_address = bgmac_set_mac_address,
1367 .ndo_validate_addr = eth_validate_addr,
1368 .ndo_do_ioctl = bgmac_ioctl,
1371 /**************************************************
1373 **************************************************/
1375 static int bgmac_get_settings(struct net_device *net_dev,
1376 struct ethtool_cmd *cmd)
1378 struct bgmac *bgmac = netdev_priv(net_dev);
1380 return phy_ethtool_gset(bgmac->phy_dev, cmd);
1383 static int bgmac_set_settings(struct net_device *net_dev,
1384 struct ethtool_cmd *cmd)
1386 struct bgmac *bgmac = netdev_priv(net_dev);
1388 return phy_ethtool_sset(bgmac->phy_dev, cmd);
1391 static void bgmac_get_drvinfo(struct net_device *net_dev,
1392 struct ethtool_drvinfo *info)
1394 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1395 strlcpy(info->bus_info, "BCMA", sizeof(info->bus_info));
1398 static const struct ethtool_ops bgmac_ethtool_ops = {
1399 .get_settings = bgmac_get_settings,
1400 .set_settings = bgmac_set_settings,
1401 .get_drvinfo = bgmac_get_drvinfo,
1404 /**************************************************
1406 **************************************************/
1408 static int bgmac_mii_read(struct mii_bus *bus, int mii_id, int regnum)
1410 return bgmac_phy_read(bus->priv, mii_id, regnum);
1413 static int bgmac_mii_write(struct mii_bus *bus, int mii_id, int regnum,
1416 return bgmac_phy_write(bus->priv, mii_id, regnum, value);
1419 static void bgmac_adjust_link(struct net_device *net_dev)
1421 struct bgmac *bgmac = netdev_priv(net_dev);
1422 struct phy_device *phy_dev = bgmac->phy_dev;
1423 bool update = false;
1425 if (phy_dev->link) {
1426 if (phy_dev->speed != bgmac->mac_speed) {
1427 bgmac->mac_speed = phy_dev->speed;
1431 if (phy_dev->duplex != bgmac->mac_duplex) {
1432 bgmac->mac_duplex = phy_dev->duplex;
1438 bgmac_mac_speed(bgmac);
1439 phy_print_status(phy_dev);
1443 static int bgmac_fixed_phy_register(struct bgmac *bgmac)
1445 struct fixed_phy_status fphy_status = {
1447 .speed = SPEED_1000,
1448 .duplex = DUPLEX_FULL,
1450 struct phy_device *phy_dev;
1453 phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, -1, NULL);
1454 if (!phy_dev || IS_ERR(phy_dev)) {
1455 bgmac_err(bgmac, "Failed to register fixed PHY device\n");
1459 err = phy_connect_direct(bgmac->net_dev, phy_dev, bgmac_adjust_link,
1460 PHY_INTERFACE_MODE_MII);
1462 bgmac_err(bgmac, "Connecting PHY failed\n");
1466 bgmac->phy_dev = phy_dev;
1471 static int bgmac_mii_register(struct bgmac *bgmac)
1473 struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
1474 struct mii_bus *mii_bus;
1475 struct phy_device *phy_dev;
1476 char bus_id[MII_BUS_ID_SIZE + 3];
1479 if (ci->id == BCMA_CHIP_ID_BCM4707 ||
1480 ci->id == BCMA_CHIP_ID_BCM53018)
1481 return bgmac_fixed_phy_register(bgmac);
1483 mii_bus = mdiobus_alloc();
1487 mii_bus->name = "bgmac mii bus";
1488 sprintf(mii_bus->id, "%s-%d-%d", "bgmac", bgmac->core->bus->num,
1489 bgmac->core->core_unit);
1490 mii_bus->priv = bgmac;
1491 mii_bus->read = bgmac_mii_read;
1492 mii_bus->write = bgmac_mii_write;
1493 mii_bus->parent = &bgmac->core->dev;
1494 mii_bus->phy_mask = ~(1 << bgmac->phyaddr);
1496 mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
1497 if (!mii_bus->irq) {
1501 for (i = 0; i < PHY_MAX_ADDR; i++)
1502 mii_bus->irq[i] = PHY_POLL;
1504 err = mdiobus_register(mii_bus);
1506 bgmac_err(bgmac, "Registration of mii bus failed\n");
1510 bgmac->mii_bus = mii_bus;
1512 /* Connect to the PHY */
1513 snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id,
1515 phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link,
1516 PHY_INTERFACE_MODE_MII);
1517 if (IS_ERR(phy_dev)) {
1518 bgmac_err(bgmac, "PHY connecton failed\n");
1519 err = PTR_ERR(phy_dev);
1520 goto err_unregister_bus;
1522 bgmac->phy_dev = phy_dev;
1527 mdiobus_unregister(mii_bus);
1529 kfree(mii_bus->irq);
1531 mdiobus_free(mii_bus);
1535 static void bgmac_mii_unregister(struct bgmac *bgmac)
1537 struct mii_bus *mii_bus = bgmac->mii_bus;
1539 mdiobus_unregister(mii_bus);
1540 kfree(mii_bus->irq);
1541 mdiobus_free(mii_bus);
1544 /**************************************************
1546 **************************************************/
1548 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */
1549 static int bgmac_probe(struct bcma_device *core)
1551 struct bcma_chipinfo *ci = &core->bus->chipinfo;
1552 struct net_device *net_dev;
1553 struct bgmac *bgmac;
1554 struct ssb_sprom *sprom = &core->bus->sprom;
1558 switch (core->core_unit) {
1560 mac = sprom->et0mac;
1563 mac = sprom->et1mac;
1566 mac = sprom->et2mac;
1569 pr_err("Unsupported core_unit %d\n", core->core_unit);
1573 if (!is_valid_ether_addr(mac)) {
1574 dev_err(&core->dev, "Invalid MAC addr: %pM\n", mac);
1575 eth_random_addr(mac);
1576 dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
1579 /* Allocation and references */
1580 net_dev = alloc_etherdev(sizeof(*bgmac));
1583 net_dev->netdev_ops = &bgmac_netdev_ops;
1584 net_dev->irq = core->irq;
1585 net_dev->ethtool_ops = &bgmac_ethtool_ops;
1586 bgmac = netdev_priv(net_dev);
1587 bgmac->net_dev = net_dev;
1589 bcma_set_drvdata(core, bgmac);
1592 memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN);
1594 /* On BCM4706 we need common core to access PHY */
1595 if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
1596 !core->bus->drv_gmac_cmn.core) {
1597 bgmac_err(bgmac, "GMAC CMN core not found (required for BCM4706)\n");
1599 goto err_netdev_free;
1601 bgmac->cmn = core->bus->drv_gmac_cmn.core;
1603 switch (core->core_unit) {
1605 bgmac->phyaddr = sprom->et0phyaddr;
1608 bgmac->phyaddr = sprom->et1phyaddr;
1611 bgmac->phyaddr = sprom->et2phyaddr;
1614 bgmac->phyaddr &= BGMAC_PHY_MASK;
1615 if (bgmac->phyaddr == BGMAC_PHY_MASK) {
1616 bgmac_err(bgmac, "No PHY found\n");
1618 goto err_netdev_free;
1620 bgmac_info(bgmac, "Found PHY addr: %d%s\n", bgmac->phyaddr,
1621 bgmac->phyaddr == BGMAC_PHY_NOREGS ? " (NOREGS)" : "");
1623 if (core->bus->hosttype == BCMA_HOSTTYPE_PCI) {
1624 bgmac_err(bgmac, "PCI setup not implemented\n");
1626 goto err_netdev_free;
1629 bgmac_chip_reset(bgmac);
1631 /* For Northstar, we have to take all GMAC core out of reset */
1632 if (ci->id == BCMA_CHIP_ID_BCM4707 ||
1633 ci->id == BCMA_CHIP_ID_BCM53018) {
1634 struct bcma_device *ns_core;
1637 /* Northstar has 4 GMAC cores */
1638 for (ns_gmac = 0; ns_gmac < 4; ns_gmac++) {
1639 /* As Northstar requirement, we have to reset all GMACs
1640 * before accessing one. bgmac_chip_reset() call
1641 * bcma_core_enable() for this core. Then the other
1642 * three GMACs didn't reset. We do it here.
1644 ns_core = bcma_find_core_unit(core->bus,
1647 if (ns_core && !bcma_core_is_enabled(ns_core))
1648 bcma_core_enable(ns_core, 0);
1652 err = bgmac_dma_alloc(bgmac);
1654 bgmac_err(bgmac, "Unable to alloc memory for DMA\n");
1655 goto err_netdev_free;
1658 bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK;
1659 if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0)
1660 bgmac->int_mask &= ~BGMAC_IS_TX_MASK;
1662 /* TODO: reset the external phy. Specs are needed */
1663 bgmac_phy_reset(bgmac);
1665 bgmac->has_robosw = !!(core->bus->sprom.boardflags_lo &
1666 BGMAC_BFL_ENETROBO);
1667 if (bgmac->has_robosw)
1668 bgmac_warn(bgmac, "Support for Roboswitch not implemented\n");
1670 if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM)
1671 bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n");
1673 netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
1675 err = bgmac_mii_register(bgmac);
1677 bgmac_err(bgmac, "Cannot register MDIO\n");
1681 net_dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1682 net_dev->hw_features = net_dev->features;
1683 net_dev->vlan_features = net_dev->features;
1685 err = register_netdev(bgmac->net_dev);
1687 bgmac_err(bgmac, "Cannot register net device\n");
1688 goto err_mii_unregister;
1691 netif_carrier_off(net_dev);
1696 bgmac_mii_unregister(bgmac);
1698 bgmac_dma_free(bgmac);
1701 bcma_set_drvdata(core, NULL);
1702 free_netdev(net_dev);
1707 static void bgmac_remove(struct bcma_device *core)
1709 struct bgmac *bgmac = bcma_get_drvdata(core);
1711 unregister_netdev(bgmac->net_dev);
1712 bgmac_mii_unregister(bgmac);
1713 netif_napi_del(&bgmac->napi);
1714 bgmac_dma_free(bgmac);
1715 bcma_set_drvdata(core, NULL);
1716 free_netdev(bgmac->net_dev);
1719 static struct bcma_driver bgmac_bcma_driver = {
1720 .name = KBUILD_MODNAME,
1721 .id_table = bgmac_bcma_tbl,
1722 .probe = bgmac_probe,
1723 .remove = bgmac_remove,
1726 static int __init bgmac_init(void)
1730 err = bcma_driver_register(&bgmac_bcma_driver);
1733 pr_info("Broadcom 47xx GBit MAC driver loaded\n");
1738 static void __exit bgmac_exit(void)
1740 bcma_driver_unregister(&bgmac_bcma_driver);
1743 module_init(bgmac_init)
1744 module_exit(bgmac_exit)
1746 MODULE_AUTHOR("Rafał Miłecki");
1747 MODULE_LICENSE("GPL");