2 * Copyright (c) 2008-2011 Atheros Communications Inc.
4 * Modified for iPXE by Scott K Logan <logans@cottsay.net> July 2011
5 * Original from Linux kernel 3.0.1
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
25 static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
26 struct ath9k_tx_queue_info *qi __unused)
29 "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
30 ah->txok_interrupt_mask, ah->txerr_interrupt_mask,
31 ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
32 ah->txurn_interrupt_mask);
34 ENABLE_REGWRITE_BUFFER(ah);
36 REG_WRITE(ah, AR_IMR_S0,
37 SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK)
38 | SM(ah->txdesc_interrupt_mask, AR_IMR_S0_QCU_TXDESC));
39 REG_WRITE(ah, AR_IMR_S1,
40 SM(ah->txerr_interrupt_mask, AR_IMR_S1_QCU_TXERR)
41 | SM(ah->txeol_interrupt_mask, AR_IMR_S1_QCU_TXEOL));
43 ah->imrs2_reg &= ~AR_IMR_S2_QCU_TXURN;
44 ah->imrs2_reg |= (ah->txurn_interrupt_mask & AR_IMR_S2_QCU_TXURN);
45 REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
47 REGWRITE_BUFFER_FLUSH(ah);
50 void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp)
52 REG_WRITE(ah, AR_QTXDP(q), txdp);
55 void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
58 "Enable TXE on queue: %d\n", q);
59 REG_WRITE(ah, AR_Q_TXE, 1 << q);
62 u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
66 npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
69 if (REG_READ(ah, AR_Q_TXE) & (1 << q))
77 * ath9k_hw_updatetxtriglevel - adjusts the frame trigger level
79 * @ah: atheros hardware struct
80 * @bIncTrigLevel: whether or not the frame trigger level should be updated
82 * The frame trigger level specifies the minimum number of bytes,
83 * in units of 64 bytes, that must be DMA'ed into the PCU TX FIFO
84 * before the PCU will initiate sending the frame on the air. This can
85 * mean we initiate transmit before a full frame is on the PCU TX FIFO.
86 * Resets to 0x1 (meaning 64 bytes or a full frame, whichever occurs
89 * Caution must be taken to ensure to set the frame trigger level based
90 * on the DMA request size. For example if the DMA request size is set to
91 * 128 bytes the trigger level cannot exceed 6 * 64 = 384. This is because
92 * there need to be enough space in the tx FIFO for the requested transfer
93 * size. Hence the tx FIFO will stop with 512 - 128 = 384 bytes. If we set
94 * the threshold to a value beyond 6, then the transmit will hang.
96 * Current dual stream devices have a PCU TX FIFO size of 8 KB.
97 * Current single stream devices have a PCU TX FIFO size of 4 KB, however,
98 * there is a hardware issue which forces us to use 2 KB instead so the
99 * frame trigger level must not exceed 2 KB for these chipsets.
101 int ath9k_hw_updatetxtriglevel(struct ath_hw *ah, int bIncTrigLevel)
103 u32 txcfg, curLevel, newLevel;
105 if (ah->tx_trig_level >= ah->config.max_txtrig_level)
108 ath9k_hw_disable_interrupts(ah);
110 txcfg = REG_READ(ah, AR_TXCFG);
111 curLevel = MS(txcfg, AR_FTRIG);
114 if (curLevel < ah->config.max_txtrig_level)
116 } else if (curLevel > MIN_TX_FIFO_THRESHOLD)
118 if (newLevel != curLevel)
119 REG_WRITE(ah, AR_TXCFG,
120 (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG));
122 ath9k_hw_enable_interrupts(ah);
124 ah->tx_trig_level = newLevel;
126 return newLevel != curLevel;
129 void ath9k_hw_abort_tx_dma(struct ath_hw *ah)
133 REG_WRITE(ah, AR_Q_TXD, AR_Q_TXD_M);
135 REG_SET_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF);
136 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
137 REG_SET_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
139 for (q = 0; q < AR_NUM_QCU; q++) {
140 for (i = 0; i < 1000; i++) {
144 if (!ath9k_hw_numtxpending(ah, q))
149 REG_CLR_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF);
150 REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
151 REG_CLR_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
153 REG_WRITE(ah, AR_Q_TXD, 0);
156 void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs)
158 *txqs &= ah->intr_txqs;
159 ah->intr_txqs &= ~(*txqs);
162 int ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
163 const struct ath9k_tx_queue_info *qinfo)
166 struct ath9k_tx_queue_info *qi;
169 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
171 "Set TXQ properties, inactive queue: %d\n", q);
175 DBG2("ath9k: Set queue properties for: %d\n", q);
177 qi->tqi_ver = qinfo->tqi_ver;
178 qi->tqi_subtype = qinfo->tqi_subtype;
179 qi->tqi_qflags = qinfo->tqi_qflags;
180 qi->tqi_priority = qinfo->tqi_priority;
181 if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT)
182 qi->tqi_aifs = min(qinfo->tqi_aifs, 255U);
184 qi->tqi_aifs = INIT_AIFS;
185 if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) {
186 cw = min(qinfo->tqi_cwmin, 1024U);
188 while (qi->tqi_cwmin < cw)
189 qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1;
191 qi->tqi_cwmin = qinfo->tqi_cwmin;
192 if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) {
193 cw = min(qinfo->tqi_cwmax, 1024U);
195 while (qi->tqi_cwmax < cw)
196 qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1;
198 qi->tqi_cwmax = INIT_CWMAX;
200 if (qinfo->tqi_shretry != 0)
201 qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U);
203 qi->tqi_shretry = INIT_SH_RETRY;
204 if (qinfo->tqi_lgretry != 0)
205 qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U);
207 qi->tqi_lgretry = INIT_LG_RETRY;
208 qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod;
209 qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit;
210 qi->tqi_burstTime = qinfo->tqi_burstTime;
211 qi->tqi_readyTime = qinfo->tqi_readyTime;
216 int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
217 const struct ath9k_tx_queue_info *qinfo)
219 struct ath9k_tx_queue_info *qi;
222 for (q = 0; q < ATH9K_NUM_TX_QUEUES; q++)
223 if (ah->txq[q].tqi_type ==
224 ATH9K_TX_QUEUE_INACTIVE)
226 if (q == ATH9K_NUM_TX_QUEUES) {
227 DBG("No available TX queue\n");
231 DBG2("ath9K: Setup TX queue: %d\n", q);
234 if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
235 DBG("ath9k: TX queue: %d already active\n", q);
238 memset(qi, 0, sizeof(struct ath9k_tx_queue_info));
242 TXQ_FLAG_TXOKINT_ENABLE
243 | TXQ_FLAG_TXERRINT_ENABLE
244 | TXQ_FLAG_TXDESCINT_ENABLE | TXQ_FLAG_TXURNINT_ENABLE;
245 qi->tqi_aifs = INIT_AIFS;
246 qi->tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
247 qi->tqi_cwmax = INIT_CWMAX;
248 qi->tqi_shretry = INIT_SH_RETRY;
249 qi->tqi_lgretry = INIT_LG_RETRY;
250 qi->tqi_physCompBuf = 0;
252 qi->tqi_physCompBuf = qinfo->tqi_physCompBuf;
253 (void) ath9k_hw_set_txq_props(ah, q, qinfo);
259 int ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
261 struct ath9k_tx_queue_info *qi;
264 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
266 "Release TXQ, inactive queue: %d\n", q);
270 DBG2("ath9k: Release TX queue: %d\n", q);
272 qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
273 ah->txok_interrupt_mask &= ~(1 << q);
274 ah->txerr_interrupt_mask &= ~(1 << q);
275 ah->txdesc_interrupt_mask &= ~(1 << q);
276 ah->txeol_interrupt_mask &= ~(1 << q);
277 ah->txurn_interrupt_mask &= ~(1 << q);
278 ath9k_hw_set_txq_interrupts(ah, qi);
283 int ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
285 struct ath9k_channel *chan = ah->curchan;
286 struct ath9k_tx_queue_info *qi;
287 u32 cwMin, chanCwMin, value __unused;
290 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
292 "Reset TXQ, inactive queue: %d\n", q);
296 DBG2("ath9k: Reset TX queue: %d\n", q);
298 if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
299 if (chan && IS_CHAN_B(chan))
300 chanCwMin = INIT_CWMIN_11B;
302 chanCwMin = INIT_CWMIN;
304 for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1);
306 cwMin = qi->tqi_cwmin;
308 ENABLE_REGWRITE_BUFFER(ah);
310 REG_WRITE(ah, AR_DLCL_IFS(q),
311 SM(cwMin, AR_D_LCL_IFS_CWMIN) |
312 SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) |
313 SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
315 REG_WRITE(ah, AR_DRETRY_LIMIT(q),
316 SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) |
317 SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) |
318 SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH));
320 REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);
322 if (AR_SREV_9340(ah))
323 REG_WRITE(ah, AR_DMISC(q),
324 AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x1);
326 REG_WRITE(ah, AR_DMISC(q),
327 AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);
329 if (qi->tqi_cbrPeriod) {
330 REG_WRITE(ah, AR_QCBRCFG(q),
331 SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
332 SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH));
333 REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_FSP_CBR |
334 (qi->tqi_cbrOverflowLimit ?
335 AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0));
337 if (qi->tqi_readyTime) {
338 REG_WRITE(ah, AR_QRDYTIMECFG(q),
339 SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) |
343 REG_WRITE(ah, AR_DCHNTIME(q),
344 SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
345 (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
347 if (qi->tqi_burstTime
348 && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE))
349 REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_RDYTIME_EXP_POLICY);
351 if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE)
352 REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS);
354 REGWRITE_BUFFER_FLUSH(ah);
356 if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
357 REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_FRAG_BKOFF_EN);
359 if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) {
360 REG_SET_BIT(ah, AR_DMISC(q),
361 SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
362 AR_D_MISC_ARB_LOCKOUT_CNTRL) |
363 AR_D_MISC_POST_FR_BKOFF_DIS);
366 if (AR_SREV_9300_20_OR_LATER(ah))
367 REG_WRITE(ah, AR_Q_DESC_CRCCHK, AR_Q_DESC_CRCCHK_EN);
369 if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE)
370 ah->txok_interrupt_mask |= 1 << q;
372 ah->txok_interrupt_mask &= ~(1 << q);
373 if (qi->tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE)
374 ah->txerr_interrupt_mask |= 1 << q;
376 ah->txerr_interrupt_mask &= ~(1 << q);
377 if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE)
378 ah->txdesc_interrupt_mask |= 1 << q;
380 ah->txdesc_interrupt_mask &= ~(1 << q);
381 if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE)
382 ah->txeol_interrupt_mask |= 1 << q;
384 ah->txeol_interrupt_mask &= ~(1 << q);
385 if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE)
386 ah->txurn_interrupt_mask |= 1 << q;
388 ah->txurn_interrupt_mask &= ~(1 << q);
389 ath9k_hw_set_txq_interrupts(ah, qi);
394 int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
395 struct ath_rx_status *rs, u64 tsf __unused)
397 struct ar5416_desc ads;
398 struct ar5416_desc *adsp = AR5416DESC(ds);
401 if ((adsp->ds_rxstatus8 & AR_RxDone) == 0)
404 ads.u.rx = adsp->u.rx;
409 rs->rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
410 rs->rs_tstamp = ads.AR_RcvTimestamp;
412 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) {
413 rs->rs_rssi = ATH9K_RSSI_BAD;
414 rs->rs_rssi_ctl0 = ATH9K_RSSI_BAD;
415 rs->rs_rssi_ctl1 = ATH9K_RSSI_BAD;
416 rs->rs_rssi_ctl2 = ATH9K_RSSI_BAD;
417 rs->rs_rssi_ext0 = ATH9K_RSSI_BAD;
418 rs->rs_rssi_ext1 = ATH9K_RSSI_BAD;
419 rs->rs_rssi_ext2 = ATH9K_RSSI_BAD;
421 rs->rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
422 rs->rs_rssi_ctl0 = MS(ads.ds_rxstatus0,
424 rs->rs_rssi_ctl1 = MS(ads.ds_rxstatus0,
426 rs->rs_rssi_ctl2 = MS(ads.ds_rxstatus0,
428 rs->rs_rssi_ext0 = MS(ads.ds_rxstatus4,
430 rs->rs_rssi_ext1 = MS(ads.ds_rxstatus4,
432 rs->rs_rssi_ext2 = MS(ads.ds_rxstatus4,
435 if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
436 rs->rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
438 rs->rs_keyix = ATH9K_RXKEYIX_INVALID;
440 rs->rs_rate = RXSTATUS_RATE(ah, (&ads));
441 rs->rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
443 rs->rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
445 (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
446 rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
448 (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0;
450 (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0;
452 if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
453 rs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
454 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
455 rs->rs_flags |= ATH9K_RX_DELIM_CRC_POST;
456 if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
457 rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
459 if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
461 * Treat these errors as mutually exclusive to avoid spurious
462 * extra error reports from the hardware. If a CRC error is
463 * reported, then decryption and MIC errors are irrelevant,
464 * the frame is going to be dropped either way
466 if (ads.ds_rxstatus8 & AR_CRCErr)
467 rs->rs_status |= ATH9K_RXERR_CRC;
468 else if (ads.ds_rxstatus8 & AR_PHYErr) {
469 rs->rs_status |= ATH9K_RXERR_PHY;
470 phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
471 rs->rs_phyerr = phyerr;
472 } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
473 rs->rs_status |= ATH9K_RXERR_DECRYPT;
474 else if (ads.ds_rxstatus8 & AR_MichaelErr)
475 rs->rs_status |= ATH9K_RXERR_MIC;
476 else if (ads.ds_rxstatus8 & AR_KeyMiss)
477 rs->rs_status |= ATH9K_RXERR_DECRYPT;
484 * This can stop or re-enables RX.
486 * If bool is set this will kill any frame which is currently being
487 * transferred between the MAC and baseband and also prevent any new
488 * frames from getting started.
490 int ath9k_hw_setrxabort(struct ath_hw *ah, int set)
495 REG_SET_BIT(ah, AR_DIAG_SW,
496 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
498 if (!ath9k_hw_wait(ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE,
499 0, AH_WAIT_TIMEOUT)) {
500 REG_CLR_BIT(ah, AR_DIAG_SW,
504 reg = REG_READ(ah, AR_OBS_BUS_1);
506 "RX failed to go idle in 10 ms RXSM=0x%x\n",
512 REG_CLR_BIT(ah, AR_DIAG_SW,
513 (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
519 void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp)
521 REG_WRITE(ah, AR_RXDP, rxdp);
524 void ath9k_hw_startpcureceive(struct ath_hw *ah, int is_scanning)
526 ath9k_ani_reset(ah, is_scanning);
528 REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
531 void ath9k_hw_abortpcurecv(struct ath_hw *ah)
533 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_ABORT | AR_DIAG_RX_DIS);
536 int ath9k_hw_stopdmarecv(struct ath_hw *ah, int *reset)
538 #define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */
539 u32 mac_status, last_mac_status = 0;
542 /* Enable access to the DMA observation bus */
543 REG_WRITE(ah, AR_MACMISC,
544 ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
545 (AR_MACMISC_MISC_OBS_BUS_1 <<
546 AR_MACMISC_MISC_OBS_BUS_MSB_S)));
548 REG_WRITE(ah, AR_CR, AR_CR_RXD);
550 /* Wait for rx enable bit to go low */
551 for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) {
552 if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0)
555 if (!AR_SREV_9300_20_OR_LATER(ah)) {
556 mac_status = REG_READ(ah, AR_DMADBG_7) & 0x7f0;
557 if (mac_status == 0x1c0 && mac_status == last_mac_status) {
562 last_mac_status = mac_status;
565 udelay(AH_TIME_QUANTUM);
570 "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x DMADBG_7=0x%08x\n",
571 AH_RX_STOP_DMA_TIMEOUT / 1000,
573 REG_READ(ah, AR_DIAG_SW),
574 REG_READ(ah, AR_DMADBG_7));
580 #undef AH_RX_STOP_DMA_TIMEOUT
583 int ath9k_hw_intrpend(struct ath_hw *ah)
587 if (AR_SREV_9100(ah) || !(ah->ah_ier & AR_IER_ENABLE))
590 host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
591 if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS))
594 host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
595 if ((host_isr & AR_INTR_SYNC_DEFAULT)
596 && (host_isr != AR_INTR_SPURIOUS))
602 void ath9k_hw_disable_interrupts(struct ath_hw *ah)
604 DBG2("ath9k: disable IER\n");
605 REG_WRITE(ah, AR_IER, ah->ah_ier);
606 (void) REG_READ(ah, AR_IER);
607 if (!AR_SREV_9100(ah)) {
608 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
609 (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);
611 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
612 (void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
616 void ath9k_hw_enable_interrupts(struct ath_hw *ah)
618 u32 sync_default = AR_INTR_SYNC_DEFAULT;
620 if (!(ah->imask & ATH9K_INT_GLOBAL))
623 if (AR_SREV_9340(ah))
624 sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
626 DBG2("ath9k: enable IER\n");
627 REG_WRITE(ah, AR_IER, ah->ah_ier);
628 if (!AR_SREV_9100(ah)) {
629 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
631 REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
634 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default);
635 REG_WRITE(ah, AR_INTR_SYNC_MASK, sync_default);
637 DBG2("ath9k: AR_IMR 0x%x IER 0x%x\n",
638 REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
641 void ath9k_hw_set_interrupts(struct ath_hw *ah, unsigned int ints)
643 enum ath9k_int omask = ah->imask;
645 struct ath9k_hw_capabilities *pCap = &ah->caps;
647 if (!(ints & ATH9K_INT_GLOBAL))
648 ath9k_hw_disable_interrupts(ah);
650 DBG2("ath9k: 0x%x => 0x%x\n", omask, ints);
652 /* TODO: global int Ref count */
653 mask = ints & ATH9K_INT_COMMON;
656 if (ints & ATH9K_INT_TX) {
657 if (ah->config.tx_intr_mitigation)
658 mask |= AR_IMR_TXMINTR | AR_IMR_TXINTM;
660 if (ah->txok_interrupt_mask)
662 if (ah->txdesc_interrupt_mask)
663 mask |= AR_IMR_TXDESC;
665 if (ah->txerr_interrupt_mask)
666 mask |= AR_IMR_TXERR;
667 if (ah->txeol_interrupt_mask)
668 mask |= AR_IMR_TXEOL;
670 if (ints & ATH9K_INT_RX) {
671 if (AR_SREV_9300_20_OR_LATER(ah)) {
672 mask |= AR_IMR_RXERR | AR_IMR_RXOK_HP;
673 if (ah->config.rx_intr_mitigation) {
674 mask &= ~AR_IMR_RXOK_LP;
675 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
677 mask |= AR_IMR_RXOK_LP;
680 if (ah->config.rx_intr_mitigation)
681 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
683 mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
685 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
686 mask |= AR_IMR_GENTMR;
689 if (ints & ATH9K_INT_GENTIMER)
690 mask |= AR_IMR_GENTMR;
692 if (ints & (ATH9K_INT_BMISC)) {
693 mask |= AR_IMR_BCNMISC;
694 if (ints & ATH9K_INT_TIM)
695 mask2 |= AR_IMR_S2_TIM;
696 if (ints & ATH9K_INT_DTIM)
697 mask2 |= AR_IMR_S2_DTIM;
698 if (ints & ATH9K_INT_DTIMSYNC)
699 mask2 |= AR_IMR_S2_DTIMSYNC;
700 if (ints & ATH9K_INT_CABEND)
701 mask2 |= AR_IMR_S2_CABEND;
702 if (ints & ATH9K_INT_TSFOOR)
703 mask2 |= AR_IMR_S2_TSFOOR;
706 if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) {
707 mask |= AR_IMR_BCNMISC;
708 if (ints & ATH9K_INT_GTT)
709 mask2 |= AR_IMR_S2_GTT;
710 if (ints & ATH9K_INT_CST)
711 mask2 |= AR_IMR_S2_CST;
714 DBG2("ath9k: new IMR 0x%x\n", mask);
715 REG_WRITE(ah, AR_IMR, mask);
716 ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC |
717 AR_IMR_S2_CABEND | AR_IMR_S2_CABTO |
718 AR_IMR_S2_TSFOOR | AR_IMR_S2_GTT | AR_IMR_S2_CST);
719 ah->imrs2_reg |= mask2;
720 REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
722 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
723 if (ints & ATH9K_INT_TIM_TIMER)
724 REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
726 REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
729 if (ints & ATH9K_INT_GLOBAL)
730 ath9k_hw_enable_interrupts(ah);