2 * Copyright (c) 2008-2011 Atheros Communications Inc.
4 * Modified for iPXE by Scott K Logan <logans@cottsay.net> July 2011
5 * Original from Linux kernel 3.0.1
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 #include "ar9003_mac.h"
26 * Setup and link descriptors.
28 * 11N: we can no longer afford to self link the last descriptor.
29 * MAC acknowledges BA status as long as it copies frames to host
30 * buffer (or rx fifo). This can incorrectly acknowledge packets
31 * to a sender if last desc is self-linked.
33 static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
35 struct ath_hw *ah = sc->sc_ah;
36 struct ath_common *common = ath9k_hw_common(ah);
38 // struct io_buffer *iob;
43 ds->ds_link = 0; /* link to null */
44 ds->ds_data = bf->bf_buf_addr;
46 // /* virtual addr of the beginning of the buffer. */
48 // ds->ds_vdata = iob->data;
51 * setup rx descriptors. The rx_bufsize here tells the hardware
52 * how much data it can DMA to us and that we are prepared
55 ath9k_hw_setuprxdesc(ah, ds,
59 if (sc->rx.rxlink == NULL)
60 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
62 *sc->rx.rxlink = bf->bf_daddr;
64 sc->rx.rxlink = &ds->ds_link;
67 static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
69 /* XXX block beacon interrupts */
70 ath9k_hw_setantenna(sc->sc_ah, antenna);
71 sc->rx.defant = antenna;
72 sc->rx.rxotherant = 0;
75 static void ath_opmode_init(struct ath_softc *sc)
77 struct ath_hw *ah = sc->sc_ah;
78 struct ath_common *common = ath9k_hw_common(ah);
82 /* configure rx filter */
83 rfilt = ath_calcrxfilter(sc);
84 ath9k_hw_setrxfilter(ah, rfilt);
86 /* configure bssid mask */
87 ath_hw_setbssidmask(common);
89 /* configure operational mode */
90 ath9k_hw_setopmode(ah);
92 /* calculate and install multicast filter */
93 mfilt[0] = mfilt[1] = ~0;
94 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
97 int ath_rx_init(struct ath_softc *sc, int nbufs)
99 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
100 struct io_buffer *iob;
101 u32 *iob_addr = NULL;
105 sc->sc_flags &= ~SC_OP_RXFLUSH;
107 common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
108 sc->sc_ah->caps.rx_status_len;
110 DBG2("ath9k: cachelsz %d rxbufsize %d\n",
111 common->cachelsz, common->rx_bufsize);
113 /* Initialize rx descriptors */
115 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
119 "failed to allocate rx descriptors: %d\n",
124 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
125 iob = ath_rxbuf_alloc(common, common->rx_bufsize,
133 bf->bf_buf_addr = *iob_addr;
135 sc->rx.rxlink = NULL;
144 void ath_rx_cleanup(struct ath_softc *sc)
146 struct io_buffer *iob;
149 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
158 if (sc->rx.rxdma.dd_desc_len != 0)
159 ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf);
163 * Calculate the receive filter according to the
164 * operating mode and state:
166 * o always accept unicast, broadcast, and multicast traffic
167 * o maintain current state of phy error reception (the hal
168 * may enable phy error frames for noise immunity work)
169 * o probe request frames are accepted only when operating in
170 * hostap, adhoc, or monitor modes
171 * o enable promiscuous mode according to the interface state
173 * - when operating in adhoc mode so the 802.11 layer creates
174 * node table entries for peers,
175 * - when operating in station mode for collecting rssi data when
176 * the station is otherwise quiet, or
177 * - when operating as a repeater so we see repeater-sta beacons
181 u32 ath_calcrxfilter(struct ath_softc *sc)
183 #define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
187 rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE)
188 | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
189 | ATH9K_RX_FILTER_MCAST | ATH9K_RX_FILTER_BEACON;
193 #undef RX_FILTER_PRESERVE
196 int ath_startrecv(struct ath_softc *sc)
198 struct ath_hw *ah = sc->sc_ah;
199 struct ath_buf *bf, *tbf;
201 if (list_empty(&sc->rx.rxbuf))
204 sc->rx.rxlink = NULL;
205 list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
206 ath_rx_buf_link(sc, bf);
209 /* We could have deleted elements so the list may be empty now */
210 if (list_empty(&sc->rx.rxbuf))
213 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
214 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
219 ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
224 int ath_stoprecv(struct ath_softc *sc)
226 struct ath_hw *ah = sc->sc_ah;
227 int stopped, reset = 0;
229 ath9k_hw_abortpcurecv(ah);
230 ath9k_hw_setrxfilter(ah, 0);
231 stopped = ath9k_hw_stopdmarecv(ah, &reset);
233 sc->rx.rxlink = NULL;
235 if (!(ah->ah_flags & AH_UNPLUGGED) &&
238 "Could not stop RX, we could be "
239 "confusing the DMA engine when we start RX up\n");
241 return stopped && !reset;
244 void ath_flushrecv(struct ath_softc *sc)
246 sc->sc_flags |= SC_OP_RXFLUSH;
247 ath_rx_tasklet(sc, 1, 0);
248 sc->sc_flags &= ~SC_OP_RXFLUSH;
251 static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
252 struct ath_rx_status *rs)
254 struct ath_hw *ah = sc->sc_ah;
259 if (list_empty(&sc->rx.rxbuf)) {
260 sc->rx.rxlink = NULL;
264 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
268 * Must provide the virtual address of the current
269 * descriptor, the physical address, and the virtual
270 * address of the next descriptor in the h/w chain.
271 * This allows the HAL to look ahead to see if the
272 * hardware is done with a descriptor by checking the
273 * done bit in the following descriptor and the address
274 * of the current descriptor the DMA engine is working
275 * on. All this is necessary because of our use of
276 * a self-linked list to avoid rx overruns.
278 ret = ath9k_hw_rxprocdesc(ah, ds, rs, 0);
279 if (ret == -EINPROGRESS) {
280 struct ath_rx_status trs;
282 struct ath_desc *tds;
284 memset(&trs, 0, sizeof(trs));
285 if ((&bf->list)->next == &sc->rx.rxbuf) {
286 sc->rx.rxlink = NULL;
290 tbf = list_entry(bf->list.next, struct ath_buf, list);
293 * On some hardware the descriptor status words could
294 * get corrupted, including the done bit. Because of
295 * this, check if the next descriptor's done bit is
298 * If the next descriptor's done bit is set, the current
299 * descriptor has been corrupted. Force s/w to discard
300 * this descriptor and continue...
304 ret = ath9k_hw_rxprocdesc(ah, tds, &trs, 0);
305 if (ret == -EINPROGRESS)
315 /* Assumes you've already done the endian to CPU conversion */
316 static int ath9k_rx_accept(struct ath_common *common,
317 struct ath_rx_status *rx_stats,
320 struct ath_hw *ah = common->ah;
321 u8 rx_status_len = ah->caps.rx_status_len;
324 if (!rx_stats->rs_datalen)
327 * rs_status follows rs_datalen so if rs_datalen is too large
328 * we can take a hint that hardware corrupted it, so ignore
331 if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len))
334 /* Only use error bits from the last fragment */
335 if (rx_stats->rs_more)
339 * The rx_stats->rs_status will not be set until the end of the
340 * chained descriptors so it can be ignored if rs_more is set. The
341 * rs_more will be false at the last element of the chained
344 if (rx_stats->rs_status != 0) {
345 if (rx_stats->rs_status & ATH9K_RXERR_PHY)
348 if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) {
352 * Reject error frames with the exception of
353 * decryption and MIC failures. For monitor mode,
354 * we also ignore the CRC error.
356 if (ah->is_monitoring) {
357 if (rx_stats->rs_status &
358 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
362 if (rx_stats->rs_status &
363 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
371 static int ath9k_process_rate(struct ath_common *common __unused,
372 struct net80211_device *dev,
373 struct ath_rx_status *rx_stats,
376 struct ath_softc *sc = (struct ath_softc *)dev->priv;
380 band = (dev->channels + sc->dev->channel)->band;
382 for (i = 0; i < sc->hwinfo->nr_rates[band]; i++) {
383 if (sc->rates[i].hw_value == rx_stats->rs_rate) {
387 if (sc->rates[i].hw_value_short == rx_stats->rs_rate) {
394 * No valid hardware bitrate found -- we should not get here
395 * because hardware has already validated this frame as OK.
398 "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
405 * For Decrypt or Demic errors, we only mark packet status here and always push
406 * up the frame up to let mac80211 handle the actual error case, be it no
407 * decryption key or real decryption error. This let us keep statistics there.
409 static int ath9k_rx_iob_preprocess(struct ath_common *common,
410 struct net80211_device *dev,
411 struct ath_rx_status *rx_stats,
416 * everything but the rate is checked here, the rate check is done
417 * separately to avoid doing two lookups for a rate for each frame.
419 if (!ath9k_rx_accept(common, rx_stats, decrypt_error))
422 /* Only use status info from the last fragment */
423 if (rx_stats->rs_more)
426 if (ath9k_process_rate(common, dev, rx_stats, rix))
432 int ath_rx_tasklet(struct ath_softc *sc, int flush, int hp __unused)
435 struct io_buffer *iob = NULL, *requeue_iob;
436 u32 *requeue_iob_addr = NULL;
437 struct ath_hw *ah = sc->sc_ah;
438 struct ath_common *common = ath9k_hw_common(ah);
440 * The hw can technically differ from common->hw when using ath9k
441 * virtual wiphy so to account for that we iterate over the active
442 * wiphys and find the appropriate wiphy and therefore hw.
444 struct net80211_device *dev = sc->dev;
446 int decrypt_error = 0;
447 struct ath_rx_status rs;
451 /* If handling rx interrupt and flush is in progress => exit */
452 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
455 memset(&rs, 0, sizeof(rs));
456 bf = ath_get_next_rx_buf(sc, &rs);
466 * If we're asked to flush receive queue, directly
467 * chain it back at the queue without processing it.
470 goto requeue_drop_frag;
472 retval = ath9k_rx_iob_preprocess(common, dev, &rs,
473 &rix, &decrypt_error);
475 goto requeue_drop_frag;
477 /* Ensure we always have an iob to requeue once we are done
478 * processing the current buffer's iob */
479 requeue_iob = ath_rxbuf_alloc(common, common->rx_bufsize, requeue_iob_addr);
481 /* If there is no memory we ignore the current RX'd frame,
482 * tell hardware it can give us a new frame using the old
483 * iob and put it at the tail of the sc->rx.rxbuf list for
486 goto requeue_drop_frag;
488 iob_put(iob, rs.rs_datalen + ah->caps.rx_status_len);
489 if (ah->caps.rx_status_len)
490 iob_pull(iob, ah->caps.rx_status_len);
492 /* We will now give hardware our shiny new allocated iob */
493 bf->bf_mpdu = requeue_iob;
494 bf->bf_buf_addr = *requeue_iob_addr;
497 * change the default rx antenna if rx diversity chooses the
498 * other antenna 3 times in a row.
500 if (sc->rx.defant != rs.rs_antenna) {
501 if (++sc->rx.rxotherant >= 3)
502 ath_setdefantenna(sc, rs.rs_antenna);
504 sc->rx.rxotherant = 0;
507 DBGIO("ath9k: rx %d bytes, signal %d, bitrate %d, hw_value %d\n", rs.rs_datalen,
508 rs.rs_rssi, sc->rates[rix].bitrate, rs.rs_rate);
510 net80211_rx(dev, iob, rs.rs_rssi,
511 sc->rates[rix].bitrate);
515 list_add_tail(&bf->list, &sc->rx.rxbuf);
516 ath_rx_buf_link(sc, bf);