2 * Copyright (c) 2008-2011 Atheros Communications Inc.
4 * Modified for iPXE by Scott K Logan <logans@cottsay.net> July 2011
5 * Original from Linux kernel 3.0.1
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 FILE_LICENCE ( BSD2 );
22 #include <ipxe/malloc.h>
23 #include <ipxe/pci_io.h>
28 int is_ath9k_unloaded;
29 /* We use the hw_value as an index into our private channel structure */
31 #define CHAN2G(_freq, _idx) { \
32 .band = NET80211_BAND_2GHZ, \
33 .center_freq = (_freq), \
38 #define CHAN5G(_freq, _idx) { \
39 .band = NET80211_BAND_5GHZ, \
40 .center_freq = (_freq), \
45 /* Some 2 GHz radios are actually tunable on 2312-2732
46 * on 5 MHz steps, we support the channels which we know
47 * we have calibration data for all cards though to make
49 static const struct net80211_channel ath9k_2ghz_chantable[] = {
50 CHAN2G(2412, 0), /* Channel 1 */
51 CHAN2G(2417, 1), /* Channel 2 */
52 CHAN2G(2422, 2), /* Channel 3 */
53 CHAN2G(2427, 3), /* Channel 4 */
54 CHAN2G(2432, 4), /* Channel 5 */
55 CHAN2G(2437, 5), /* Channel 6 */
56 CHAN2G(2442, 6), /* Channel 7 */
57 CHAN2G(2447, 7), /* Channel 8 */
58 CHAN2G(2452, 8), /* Channel 9 */
59 CHAN2G(2457, 9), /* Channel 10 */
60 CHAN2G(2462, 10), /* Channel 11 */
61 CHAN2G(2467, 11), /* Channel 12 */
62 CHAN2G(2472, 12), /* Channel 13 */
63 CHAN2G(2484, 13), /* Channel 14 */
66 /* Some 5 GHz radios are actually tunable on XXXX-YYYY
67 * on 5 MHz steps, we support the channels which we know
68 * we have calibration data for all cards though to make
70 static const struct net80211_channel ath9k_5ghz_chantable[] = {
71 /* _We_ call this UNII 1 */
72 CHAN5G(5180, 14), /* Channel 36 */
73 CHAN5G(5200, 15), /* Channel 40 */
74 CHAN5G(5220, 16), /* Channel 44 */
75 CHAN5G(5240, 17), /* Channel 48 */
76 /* _We_ call this UNII 2 */
77 CHAN5G(5260, 18), /* Channel 52 */
78 CHAN5G(5280, 19), /* Channel 56 */
79 CHAN5G(5300, 20), /* Channel 60 */
80 CHAN5G(5320, 21), /* Channel 64 */
81 /* _We_ call this "Middle band" */
82 CHAN5G(5500, 22), /* Channel 100 */
83 CHAN5G(5520, 23), /* Channel 104 */
84 CHAN5G(5540, 24), /* Channel 108 */
85 CHAN5G(5560, 25), /* Channel 112 */
86 CHAN5G(5580, 26), /* Channel 116 */
87 CHAN5G(5600, 27), /* Channel 120 */
88 CHAN5G(5620, 28), /* Channel 124 */
89 CHAN5G(5640, 29), /* Channel 128 */
90 CHAN5G(5660, 30), /* Channel 132 */
91 CHAN5G(5680, 31), /* Channel 136 */
92 CHAN5G(5700, 32), /* Channel 140 */
93 /* _We_ call this UNII 3 */
94 CHAN5G(5745, 33), /* Channel 149 */
95 CHAN5G(5765, 34), /* Channel 153 */
96 CHAN5G(5785, 35), /* Channel 157 */
97 CHAN5G(5805, 36), /* Channel 161 */
98 CHAN5G(5825, 37), /* Channel 165 */
101 /* Atheros hardware rate code addition for short premble */
102 #define SHPCHECK(__hw_rate, __flags) \
103 ((__flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
105 #define RATE(_bitrate, _hw_rate, _flags) { \
106 .bitrate = (_bitrate), \
108 .hw_value = (_hw_rate), \
109 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
112 static struct ath9k_legacy_rate ath9k_legacy_rates[] = {
114 RATE(20, 0x1a, IEEE80211_TX_RC_USE_SHORT_PREAMBLE),
115 RATE(55, 0x19, IEEE80211_TX_RC_USE_SHORT_PREAMBLE),
116 RATE(110, 0x18, IEEE80211_TX_RC_USE_SHORT_PREAMBLE),
127 static void ath9k_deinit_softc(struct ath_softc *sc);
130 * Read and write, they both share the same lock. We do this to serialize
131 * reads and writes on Atheros 802.11n PCI devices only. This is required
132 * as the FIFO on these devices can only accept sanely 2 requests.
135 static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
137 struct ath_hw *ah = (struct ath_hw *) hw_priv;
138 struct ath_common *common = ath9k_hw_common(ah);
139 struct ath_softc *sc = (struct ath_softc *) common->priv;
141 writel(val, sc->mem + reg_offset);
144 static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
146 struct ath_hw *ah = (struct ath_hw *) hw_priv;
147 struct ath_common *common = ath9k_hw_common(ah);
148 struct ath_softc *sc = (struct ath_softc *) common->priv;
151 val = readl(sc->mem + reg_offset);
155 static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr)
157 struct ath_hw *ah = (struct ath_hw *) hw_priv;
158 struct ath_common *common = ath9k_hw_common(ah);
159 struct ath_softc *sc = (struct ath_softc *) common->priv;
162 val = readl(sc->mem + reg_offset);
165 writel(val, sc->mem + reg_offset);
170 /**************************/
172 /**************************/
175 * This function will allocate both the DMA descriptor structure, and the
176 * buffers it contains. These are used to contain the descriptors used
179 int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
180 struct list_head *head, const char *name,
181 int nbuf, int ndesc, int is_tx)
183 #define DS2PHYS(_dd, _ds) \
184 ((_dd)->dd_desc_paddr + ((char *)(_ds) - (char *)(_dd)->dd_desc))
185 #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF9F) ? 1 : 0)
188 int i, bsize, error, desc_len;
190 DBG2("ath9k: %s DMA: %d buffers %d desc/buf\n",
193 INIT_LIST_HEAD(head);
196 desc_len = sc->sc_ah->caps.tx_desc_len;
198 desc_len = sizeof(struct ath_desc);
200 /* ath_desc must be a multiple of DWORDs */
201 if ((desc_len % 4) != 0) {
202 DBG("ath9k: ath_desc not DWORD aligned\n");
207 dd->dd_desc_len = desc_len * nbuf * ndesc;
210 * Need additional DMA memory because we can't use
211 * descriptors that cross the 4K page boundary.
212 * However, iPXE only utilizes 16 buffers, which
213 * will never make up more than half of one page,
214 * so we will only ever skip 1 descriptor, if that.
216 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
217 u32 ndesc_skipped = 1;
220 dma_len = ndesc_skipped * desc_len;
221 dd->dd_desc_len += dma_len;
224 /* allocate descriptors */
225 dd->dd_desc = malloc_dma(dd->dd_desc_len, 16);
226 if (dd->dd_desc == NULL) {
230 dd->dd_desc_paddr = virt_to_bus(dd->dd_desc);
231 ds = (u8 *) dd->dd_desc;
232 DBG2("ath9k: %s DMA map: %p (%d) -> %llx (%d)\n",
233 name, ds, (u32) dd->dd_desc_len,
234 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
236 /* allocate buffers */
237 bsize = sizeof(struct ath_buf) * nbuf;
245 for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
247 bf->bf_daddr = DS2PHYS(dd, ds);
249 if (!(sc->sc_ah->caps.hw_caps &
250 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
252 * Skip descriptor addresses which can cause 4KB
253 * boundary crossing (addr + length) with a 32 dword
256 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
257 ds += (desc_len * ndesc);
259 bf->bf_daddr = DS2PHYS(dd, ds);
262 list_add_tail(&bf->list, head);
266 free_dma(dd->dd_desc, dd->dd_desc_len);
268 memset(dd, 0, sizeof(*dd));
270 #undef ATH_DESC_4KB_BOUND_CHECK
274 void ath9k_init_crypto(struct ath_softc *sc)
276 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
279 /* Get the hardware key cache size. */
280 common->keymax = AR_KEYTABLE_SIZE;
283 * Reset the key cache since some parts do not
284 * reset the contents on initial power up.
286 for (i = 0; i < common->keymax; i++)
287 ath_hw_keyreset(common, (u16) i);
290 * Check whether the separate key cache entries
291 * are required to handle both tx+rx MIC keys.
292 * With split mic keys the number of stations is limited
293 * to 27 otherwise 59.
295 if (sc->sc_ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA)
296 common->crypt_caps |= ATH_CRYPT_CAP_MIC_COMBINED;
299 static int ath9k_init_queues(struct ath_softc *sc)
303 for (i = 0; i < WME_NUM_AC; i++) {
304 sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
305 sc->tx.txq_map[i]->mac80211_qnum = i;
310 static int ath9k_init_channels_rates(struct ath_softc *sc)
314 memcpy(&sc->rates, ath9k_legacy_rates, sizeof(ath9k_legacy_rates));
316 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
317 memcpy(&sc->hwinfo->channels[sc->hwinfo->nr_channels], ath9k_2ghz_chantable, sizeof(ath9k_2ghz_chantable));
319 sc->hwinfo->nr_channels += ARRAY_SIZE(ath9k_2ghz_chantable);
321 for (i = 0; i < ARRAY_SIZE(ath9k_legacy_rates); i++)
322 sc->hwinfo->rates[NET80211_BAND_2GHZ][i] = ath9k_legacy_rates[i].bitrate;
323 sc->hwinfo->nr_rates[NET80211_BAND_2GHZ] = ARRAY_SIZE(ath9k_legacy_rates);
326 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
327 memcpy(&sc->hwinfo->channels[sc->hwinfo->nr_channels], ath9k_5ghz_chantable, sizeof(ath9k_5ghz_chantable));
329 sc->hwinfo->nr_channels += ARRAY_SIZE(ath9k_5ghz_chantable);
331 for (i = 4; i < ARRAY_SIZE(ath9k_legacy_rates); i++)
332 sc->hwinfo->rates[NET80211_BAND_5GHZ][i - 4] = ath9k_legacy_rates[i].bitrate;
333 sc->hwinfo->nr_rates[NET80211_BAND_5GHZ] = ARRAY_SIZE(ath9k_legacy_rates) - 4;
338 static void ath9k_init_misc(struct ath_softc *sc)
340 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
342 common->ani.timer = 0;
344 sc->config.txpowlimit = ATH_TXPOWER_MAX;
346 common->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
347 common->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
349 ath9k_hw_set_diversity(sc->sc_ah, 1);
350 sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
352 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
355 static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
356 const struct ath_bus_ops *bus_ops)
358 struct ath_hw *ah = NULL;
359 struct ath_common *common;
363 ah = zalloc(sizeof(struct ath_hw));
368 ah->hw_version.devid = devid;
369 ah->hw_version.subsysid = subsysid;
370 ah->reg_ops.read = ath9k_ioread32;
371 ah->reg_ops.write = ath9k_iowrite32;
372 ah->reg_ops.rmw = ath9k_reg_rmw;
375 sc->hwinfo = zalloc(sizeof(*sc->hwinfo));
377 DBG("ath9k: cannot allocate 802.11 hardware info structure\n");
381 ah->ah_flags |= AH_USE_EEPROM;
382 sc->sc_ah->led_pin = -1;
384 common = ath9k_hw_common(ah);
385 common->ops = &ah->reg_ops;
386 common->bus_ops = bus_ops;
388 common->dev = sc->dev;
391 sc->intr_tq = ath9k_tasklet;
394 * Cache line size is used to size and align various
395 * structures used to communicate with the hardware.
397 ath_read_cachesize(common, &csz);
398 common->cachelsz = csz << 2; /* convert to bytes */
400 /* Initializes the hardware for all supported chipsets */
401 ret = ath9k_hw_init(ah);
405 memcpy(sc->hwinfo->hwaddr, common->macaddr, ETH_ALEN);
407 ret = ath9k_init_queues(sc);
411 ret = ath9k_init_channels_rates(sc);
415 ath9k_init_crypto(sc);
421 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
422 if (ATH_TXQ_SETUP(sc, i))
423 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
436 static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
438 struct net80211_channel *chan;
439 struct ath_hw *ah = sc->sc_ah;
440 struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
443 for (i = 0; i < sc->hwinfo->nr_channels; i++) {
444 chan = &sc->hwinfo->channels[i];
445 if(chan->band != band)
447 ah->curchan = &ah->channels[chan->hw_value];
448 ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, 1);
449 chan->maxpower = reg->max_power_level / 2;
453 static void ath9k_init_txpower_limits(struct ath_softc *sc)
455 struct ath_hw *ah = sc->sc_ah;
456 struct ath9k_channel *curchan = ah->curchan;
458 if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
459 ath9k_init_band_txpower(sc, NET80211_BAND_2GHZ);
460 if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
461 ath9k_init_band_txpower(sc, NET80211_BAND_5GHZ);
463 ah->curchan = curchan;
466 void ath9k_set_hw_capab(struct ath_softc *sc, struct net80211_device *dev __unused)
468 sc->hwinfo->flags = NET80211_HW_RX_HAS_FCS;
469 sc->hwinfo->signal_type = NET80211_SIGNAL_DB;
470 sc->hwinfo->signal_max = 40; /* 35dB should give perfect 54Mbps */
471 sc->hwinfo->channel_change_time = 5000;
473 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
475 sc->hwinfo->bands |= NET80211_BAND_BIT_2GHZ;
476 sc->hwinfo->modes |= NET80211_MODE_B | NET80211_MODE_G;
478 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
480 sc->hwinfo->bands |= NET80211_BAND_BIT_5GHZ;
481 sc->hwinfo->modes |= NET80211_MODE_A;
485 int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
486 const struct ath_bus_ops *bus_ops)
488 struct net80211_device *dev = sc->dev;
489 /*struct ath_common *common;
492 /*struct ath_regulatory *reg;*/
494 /* Bring up device */
495 error = ath9k_init_softc(devid, sc, subsysid, bus_ops);
500 common = ath9k_hw_common(ah);*/
501 ath9k_set_hw_capab(sc, dev);
502 /* TODO Cottsay: reg */
503 /* Initialize regulatory */
504 /*error = ath_regd_init(&common->regulatory, sc->dev->wiphy,
509 reg = &common->regulatory;*/
512 error = ath_tx_init(sc, ATH_TXBUF);
517 error = ath_rx_init(sc, ATH_RXBUF);
521 ath9k_init_txpower_limits(sc);
523 /* Register with mac80211 */
524 error = net80211_register(dev, &ath9k_ops, sc->hwinfo);
528 /* TODO Cottsay: reg */
529 /* Handle world regulatory */
530 /*if (!ath_is_world_regd(reg)) {
531 error = regulatory_hint(hw->wiphy, reg->alpha2);
536 sc->hw_pll_work = ath_hw_pll_work;
537 sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
539 /* TODO Cottsay: rfkill */
540 /*ath_start_rfkill_poll(sc);*/
545 // net80211_unregister(dev);
551 ath9k_deinit_softc(sc);
556 /*****************************/
557 /* De-Initialization */
558 /*****************************/
560 static void ath9k_deinit_softc(struct ath_softc *sc)
564 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
565 if (ATH_TXQ_SETUP(sc, i))
566 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
568 ath9k_hw_deinit(sc->sc_ah);
576 void ath9k_deinit_device(struct ath_softc *sc)
578 struct net80211_device *dev = sc->dev;
580 net80211_unregister(dev);
583 ath9k_deinit_softc(sc);
586 void ath_descdma_cleanup(struct ath_softc *sc __unused,
587 struct ath_descdma *dd,
588 struct list_head *head)
590 free_dma(dd->dd_desc, dd->dd_desc_len);
592 INIT_LIST_HEAD(head);
594 memset(dd, 0, sizeof(*dd));