2 * Copyright (c) 2008-2011 Atheros Communications Inc.
4 * Modified for iPXE by Scott K Logan <logans@cottsay.net> July 2011
5 * Original from Linux kernel 3.0.1
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 #include "ar9003_mac.h"
25 #define BITS_PER_BYTE 8
26 #define OFDM_PLCP_BITS 22
27 #define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
33 #define HT_LTF(_ns) (4 * (_ns))
34 #define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
35 #define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
36 #define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
37 #define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
40 #define IS_HT_RATE(_rate) ((_rate) & 0x80)
42 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
43 struct ath_atx_tid *tid,
44 struct list_head *bf_head);
45 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
46 struct ath_txq *txq, struct list_head *bf_q,
47 struct ath_tx_status *ts, int txok, int sendbar);
48 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
49 struct list_head *head);
50 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
59 /*********************/
60 /* Aggregation logic */
61 /*********************/
63 static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
65 struct ath_atx_ac *ac = tid->ac;
74 list_add_tail(&tid->list, &ac->tid_q);
80 list_add_tail(&ac->list, &txq->axq_acq);
83 static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
85 struct ath_buf *bf = NULL;
87 if (list_empty(&sc->tx.txbuf)) {
91 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
97 static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
99 list_add_tail(&bf->list, &sc->tx.txbuf);
102 /********************/
103 /* Queue Management */
104 /********************/
106 struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
108 struct ath_hw *ah = sc->sc_ah;
109 struct ath9k_tx_queue_info qi;
110 static const int subtype_txq_to_hwq[] = {
111 [WME_AC_BE] = ATH_TXQ_AC_BE,
115 memset(&qi, 0, sizeof(qi));
116 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
117 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
118 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
119 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
120 qi.tqi_physCompBuf = 0;
123 * Enable interrupts only for EOL and DESC conditions.
124 * We mark tx descriptors to receive a DESC interrupt
125 * when a tx queue gets deep; otherwise waiting for the
126 * EOL to reap descriptors. Note that this is done to
127 * reduce interrupt load and this only defers reaping
128 * descriptors, never transmitting frames. Aside from
129 * reducing interrupts this also permits more concurrency.
130 * The only potential downside is if the tx queue backs
131 * up in which case the top half of the kernel may backup
132 * due to a lack of tx descriptors.
134 * The UAPSD queue is an exception, since we take a desc-
135 * based intr on the EOSP frames.
137 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
138 TXQ_FLAG_TXDESCINT_ENABLE;
140 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
141 if (axq_qnum == -1) {
143 * NB: don't print a message, this happens
144 * normally on parts with too few tx queues
148 if ((unsigned int)axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
149 DBG("ath9k: qnum %d out of range, max %zd!\n",
150 axq_qnum, ARRAY_SIZE(sc->tx.txq));
151 ath9k_hw_releasetxqueue(ah, axq_qnum);
154 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
155 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
157 txq->axq_qnum = axq_qnum;
158 txq->mac80211_qnum = -1;
159 txq->axq_link = NULL;
160 INIT_LIST_HEAD(&txq->axq_q);
161 INIT_LIST_HEAD(&txq->axq_acq);
163 txq->axq_ampdu_depth = 0;
164 txq->axq_tx_inprogress = 0;
165 sc->tx.txqsetup |= 1<<axq_qnum;
167 txq->txq_headidx = txq->txq_tailidx = 0;
168 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
169 INIT_LIST_HEAD(&txq->txq_fifo[i]);
170 INIT_LIST_HEAD(&txq->txq_fifo_pending);
172 return &sc->tx.txq[axq_qnum];
176 * Drain a given TX queue (could be Beacon or Data)
178 * This assumes output has been stopped and
179 * we do not need to block ath_tx_tasklet.
181 void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, int retry_tx __unused)
183 struct ath_buf *bf, *lastbf __unused;
184 struct list_head bf_head;
185 struct ath_tx_status ts;
187 memset(&ts, 0, sizeof(ts));
188 INIT_LIST_HEAD(&bf_head);
191 if (list_empty(&txq->axq_q)) {
192 txq->axq_link = NULL;
195 bf = list_first_entry(&txq->axq_q, struct ath_buf,
201 ath_tx_return_buffer(sc, bf);
205 lastbf = bf->bf_lastbf;
207 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
210 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
213 txq->axq_tx_inprogress = 0;
216 int ath_drain_all_txq(struct ath_softc *sc, int retry_tx)
218 struct ath_hw *ah = sc->sc_ah;
222 if (sc->sc_flags & SC_OP_INVALID)
225 ath9k_hw_abort_tx_dma(ah);
227 /* Check if any queue remains active */
228 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
229 if (!ATH_TXQ_SETUP(sc, i))
232 npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum);
236 DBG("ath9k: Failed to stop TX DMA!\n");
238 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
239 if (!ATH_TXQ_SETUP(sc, i))
243 * The caller will resume queues with ieee80211_wake_queues.
244 * Mark the queue as not stopped to prevent ath_tx_complete
245 * from waking the queue too early.
247 txq = &sc->tx.txq[i];
249 ath_draintxq(sc, txq, retry_tx);
255 void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
257 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
258 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
261 /* For each axq_acq entry, for each tid, try to schedule packets
262 * for transmit until ampdu_depth has reached min Q depth.
264 void ath_txq_schedule(struct ath_softc *sc __unused, struct ath_txq *txq)
266 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
267 struct ath_atx_tid *tid, *last_tid;
269 if (list_empty(&txq->axq_acq) ||
270 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
273 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
274 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
276 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
277 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
281 while (!list_empty(&ac->tid_q)) {
282 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
284 list_del(&tid->list);
291 * add tid to round-robin queue if more frames
292 * are pending for the tid
294 if (!list_empty(&tid->buf_q))
295 ath_tx_queue_tid(txq, tid);
297 if (tid == last_tid ||
298 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
302 if (!list_empty(&ac->tid_q)) {
305 list_add_tail(&ac->list, &txq->axq_acq);
310 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
320 * Insert a chain of ath_buf (descriptors) on a txq and
321 * assume the descriptors are already chained together by caller.
323 static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
324 struct list_head *head)
326 struct ath_hw *ah = sc->sc_ah;
330 * Insert the frame on the outbound list and
331 * pass it on to the hardware.
334 if (list_empty(head))
337 bf = list_first_entry(head, struct ath_buf, list);
340 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
342 list_splice_tail_init(head, &txq->axq_q);
344 if (txq->axq_link == NULL) {
345 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
346 DBGIO("ath9k: TXDP[%d] = %llx (%p)\n",
347 txq->axq_qnum, ito64(bf->bf_daddr),
350 *txq->axq_link = bf->bf_daddr;
352 "link[%d] (%p)=%llx (%p)\n",
353 txq->axq_qnum, txq->axq_link,
354 ito64(bf->bf_daddr), bf->bf_desc);
356 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
358 ath9k_hw_txstart(ah, txq->axq_qnum);
363 static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
364 struct ath_atx_tid *tid,
365 struct list_head *bf_head)
369 bf = list_first_entry(bf_head, struct ath_buf, list);
370 bf->bf_state.bf_type &= ~BUF_AMPDU;
372 /* update starting sequence number for subsequent ADDBA request */
374 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
377 ath_buf_set_rate(sc, bf, iob_len(bf->bf_mpdu) + FCS_LEN);
378 ath_tx_txqaddbuf(sc, txq, bf_head);
381 static enum ath9k_pkt_type get_hw_packet_type(struct io_buffer *iob)
383 struct ieee80211_frame *hdr;
384 enum ath9k_pkt_type htype;
387 hdr = (struct ieee80211_frame *)iob->data;
390 if ((fc & (IEEE80211_FC_TYPE | IEEE80211_FC_SUBTYPE)) == (IEEE80211_TYPE_MGMT | IEEE80211_STYPE_BEACON))
391 htype = ATH9K_PKT_TYPE_BEACON;
392 else if ((fc & (IEEE80211_FC_TYPE | IEEE80211_FC_SUBTYPE)) == (IEEE80211_TYPE_MGMT | IEEE80211_STYPE_PROBE_RESP))
393 htype = ATH9K_PKT_TYPE_PROBE_RESP;
395 htype = ATH9K_PKT_TYPE_NORMAL;
400 static int setup_tx_flags(struct io_buffer *iob __unused)
404 flags |= ATH9K_TXDESC_INTREQ;
409 u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
411 struct ath_hw *ah = sc->sc_ah;
412 struct ath9k_channel *curchan = ah->curchan;
413 if ((sc->sc_flags & SC_OP_ENABLE_APM) &&
414 (curchan->channelFlags & CHANNEL_5GHZ) &&
415 (chainmask == 0x7) && (rate < 0x90))
421 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
423 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
424 struct ath9k_11n_rate_series series[4];
425 const struct ath9k_legacy_rate *rate;
427 u8 rix = 0, ctsrate = 0;
430 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
435 * We check if Short Preamble is needed for the CTS rate by
436 * checking the BSS's global flag.
437 * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
439 rate = &sc->rates[sc->hw_rix];
440 ctsrate = rate->hw_value;
441 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
442 ctsrate |= rate->hw_value_short;
444 for (i = 0; i < 4; i++) {
445 int is_40 __unused, is_sgi __unused, is_sp;
449 series[i].Tries = ATH_TXMAXTRY;
451 if (sc->sc_flags & SC_OP_PROTECT_ENABLE) {
452 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
453 flags |= ATH9K_TXDESC_CTSENA;
456 is_sp = !!(rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
459 if ((sc->dev->channels + sc->dev->channel)->band == NET80211_BAND_2GHZ)
464 series[i].Rate = rate->hw_value;
465 if (rate->hw_value_short && (sc->sc_flags & SC_OP_PREAMBLE_SHORT)) {
466 if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
467 series[i].Rate |= rate->hw_value_short;
472 if (bf->bf_state.bfs_paprd)
473 series[i].ChSel = common->tx_chainmask;
475 series[i].ChSel = ath_txchainmask_reduction(sc,
476 common->tx_chainmask, series[i].Rate);
478 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
479 phy, rate->bitrate * 100, len, rix, is_sp);
482 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
483 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
484 flags &= ~ATH9K_TXDESC_RTSENA;
486 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
487 if (flags & ATH9K_TXDESC_RTSENA)
488 flags &= ~ATH9K_TXDESC_CTSENA;
490 /* set dur_update_en for l-sig computation except for PS-Poll frames */
491 ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
492 bf->bf_lastbf->bf_desc,
494 0, series, 4, flags);
498 static struct ath_buf *ath_tx_setup_buffer(struct net80211_device *dev,
500 struct io_buffer *iob)
502 struct ath_softc *sc = dev->priv;
503 struct ath_hw *ah = sc->sc_ah;
507 static const enum ath9k_key_type net80211_keytype_to_ath[] = {
508 [NET80211_CRYPT_NONE] = ATH9K_KEY_TYPE_CLEAR,
509 [NET80211_CRYPT_WEP] = ATH9K_KEY_TYPE_WEP,
510 [NET80211_CRYPT_TKIP] = ATH9K_KEY_TYPE_TKIP,
511 [NET80211_CRYPT_CCMP] = ATH9K_KEY_TYPE_AES,
512 [NET80211_CRYPT_UNKNOWN] = ATH9K_KEY_TYPE_CLEAR,
515 bf = ath_tx_get_buffer(sc);
517 DBG("ath9k: TX buffers are full\n");
523 bf->bf_flags = setup_tx_flags(iob);
526 bf->bf_buf_addr = virt_to_bus(iob->data);
528 frm_type = get_hw_packet_type(iob);
531 ath9k_hw_set_desc_link(ah, ds, 0);
533 ath9k_hw_set11n_txdesc(ah, ds, iob_len(iob) + FCS_LEN, frm_type, MAX_RATE_POWER,
534 ATH9K_TXKEYIX_INVALID, net80211_keytype_to_ath[dev->crypto->algorithm], bf->bf_flags);
536 ath9k_hw_filltxdesc(ah, ds,
537 iob_len(iob), /* segment length */
538 1, /* first segment */
539 1, /* last segment */
540 ds, /* first descriptor */
548 /* FIXME: tx power */
549 static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
550 struct ath_tx_control *txctl)
552 struct list_head bf_head;
553 struct ath_atx_tid *tid = NULL;
555 INIT_LIST_HEAD(&bf_head);
556 list_add_tail(&bf->list, &bf_head);
558 bf->bf_state.bfs_paprd = txctl->paprd;
561 bf->bf_state.bfs_paprd_timestamp = ( currticks() * 1000 ) / TICKS_PER_SEC;
563 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, 1);
565 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
568 /* Upon failure caller should free iob */
569 int ath_tx_start(struct net80211_device *dev, struct io_buffer *iob,
570 struct ath_tx_control *txctl)
572 struct ath_softc *sc = dev->priv;
573 struct ath_txq *txq = txctl->txq;
578 * At this point, the vif, hw_key and sta pointers in the tx control
579 * info are no longer valid (overwritten by the ath_frame_info data.
582 bf = ath_tx_setup_buffer(dev, txctl->txq, iob);
587 if (txq == sc->tx.txq_map[q] &&
588 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
592 ath_tx_start_dma(sc, bf, txctl);
601 static void ath_tx_complete(struct ath_softc *sc, struct io_buffer *iob,
602 int tx_flags __unused, struct ath_tx_status *ts, struct ath_txq *txq)
604 struct net80211_device *dev = sc->dev;
605 int q, padpos __unused, padsize __unused;
607 DBGIO("ath9k: TX complete: iob: %p\n", iob);
610 if (txq == sc->tx.txq_map[q]) {
611 if (--txq->pending_frames < 0)
612 txq->pending_frames = 0;
614 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
619 net80211_tx_complete(dev, iob, ts->ts_longretry,
620 (ts->ts_status & ATH9K_TXERR_MASK) ? EIO : 0);
623 static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
624 struct ath_txq *txq, struct list_head *bf_q,
625 struct ath_tx_status *ts, int txok, int sendbar)
627 struct io_buffer *iob = bf->bf_mpdu;
631 tx_flags = ATH_TX_BAR;
634 tx_flags |= ATH_TX_ERROR;
636 if (bf_isxretried(bf))
637 tx_flags |= ATH_TX_XRETRY;
642 ath_tx_complete(sc, iob, tx_flags,
645 /* At this point, iob (bf->bf_mpdu) is consumed...make sure we don't
646 * accidentally reference it later.
651 * Return the list of ath_buf of this mpdu to free queue
653 list_splice_tail_init(bf_q, &sc->tx.txbuf);
656 static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
658 struct ath_hw *ah = sc->sc_ah;
659 struct ath_buf *bf, *lastbf, *bf_held = NULL;
660 struct list_head bf_head;
662 struct ath_tx_status ts;
666 DBGIO("ath9k: tx queue %d (%x), link %p\n",
667 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
671 if (list_empty(&txq->axq_q)) {
672 txq->axq_link = NULL;
673 if (sc->sc_flags & SC_OP_TXAGGR)
674 ath_txq_schedule(sc, txq);
677 bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
680 * There is a race condition that a BH gets scheduled
681 * after sw writes TxE and before hw re-load the last
682 * descriptor to get the newly chained one.
683 * Software must keep the last DONE descriptor as a
684 * holding descriptor - software does so by marking
685 * it with the STALE flag.
690 if (list_is_last(&bf_held->list, &txq->axq_q)) {
693 bf = list_entry(bf_held->list.next,
694 struct ath_buf, list);
698 lastbf = bf->bf_lastbf;
699 ds = lastbf->bf_desc;
701 memset(&ts, 0, sizeof(ts));
702 status = ath9k_hw_txprocdesc(ah, ds, &ts);
703 if (status == -EINPROGRESS) {
708 * Remove ath_buf's of the same transmit unit from txq,
709 * however leave the last descriptor back as the holding
712 lastbf->bf_stale = 1;
713 INIT_LIST_HEAD(&bf_head);
714 if (!list_is_singular(&lastbf->list))
715 list_cut_position(&bf_head,
716 &txq->axq_q, lastbf->list.prev);
719 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
720 txq->axq_tx_inprogress = 0;
722 list_del(&bf_held->list);
725 ath_tx_return_buffer(sc, bf_held);
728 * This frame is sent out as a single frame.
729 * Use hardware retry status for this frame.
731 if (ts.ts_status & ATH9K_TXERR_XRETRY)
732 bf->bf_state.bf_type |= BUF_XRETRY;
734 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
736 if (sc->sc_flags & SC_OP_TXAGGR)
737 ath_txq_schedule(sc, txq);
741 static void ath_tx_complete_poll_work(struct ath_softc *sc)
747 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
748 if (ATH_TXQ_SETUP(sc, i)) {
749 txq = &sc->tx.txq[i];
750 if (txq->axq_depth) {
751 if (txq->axq_tx_inprogress) {
755 txq->axq_tx_inprogress = 1;
762 "tx hung, resetting the chip\n");
766 sc->tx_complete_work_timer = ( currticks() * 1000 ) / TICKS_PER_SEC + ATH_TX_COMPLETE_POLL_INT;
771 void ath_tx_tasklet(struct ath_softc *sc)
774 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
776 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
778 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
779 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
780 ath_tx_processq(sc, &sc->tx.txq[i]);
788 int ath_tx_init(struct ath_softc *sc, int nbufs)
792 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
796 "Failed to allocate tx descriptors: %d\n", error);
800 sc->tx_complete_work = ath_tx_complete_poll_work;
809 void ath_tx_cleanup(struct ath_softc *sc)
811 if (sc->tx.txdma.dd_desc_len != 0)
812 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);