X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?p=kvmfornfv.git;a=blobdiff_plain;f=kernel%2Fdrivers%2Fnet%2Fwireless%2Fbrcm80211%2Fbrcmfmac%2Fmsgbuf.c;h=44e618f9d89046b9350ce6fbc4eabb16f392458e;hp=65efb146898844510aa489502ee2c9db23906c92;hb=e09b41010ba33a20a87472ee821fa407a5b8da36;hpb=f93b97fd65072de626c074dbe099a1fff05ce060 diff --git a/kernel/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/kernel/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c index 65efb1468..44e618f9d 100644 --- a/kernel/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c +++ b/kernel/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c @@ -73,8 +73,10 @@ #define BRCMF_MSGBUF_TX_FLUSH_CNT1 32 #define BRCMF_MSGBUF_TX_FLUSH_CNT2 96 -#define BRCMF_MSGBUF_DELAY_TXWORKER_THRS 64 +#define BRCMF_MSGBUF_DELAY_TXWORKER_THRS 96 #define BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS 32 +#define BRCMF_MSGBUF_UPDATE_RX_PTR_THRS 48 + struct msgbuf_common_hdr { u8 msgtype; @@ -278,16 +280,6 @@ struct brcmf_msgbuf_pktids { struct brcmf_msgbuf_pktid *array; }; - -/* dma flushing needs implementation for mips and arm platforms. Should - * be put in util. Note, this is not real flushing. It is virtual non - * cached memory. Only write buffers should have to be drained. Though - * this may be different depending on platform...... - */ -#define brcmf_dma_flush(addr, len) -#define brcmf_dma_invalidate_cache(addr, len) - - static void brcmf_msgbuf_rxbuf_ioctlresp_post(struct brcmf_msgbuf *msgbuf); @@ -462,7 +454,6 @@ static int brcmf_msgbuf_tx_ioctl(struct brcmf_pub *drvr, int ifidx, memcpy(msgbuf->ioctbuf, buf, buf_len); else memset(msgbuf->ioctbuf, 0, buf_len); - brcmf_dma_flush(ioctl_buf, buf_len); err = brcmf_commonring_write_complete(commonring); brcmf_commonring_unlock(commonring); @@ -531,7 +522,7 @@ static int brcmf_msgbuf_set_dcmd(struct brcmf_pub *drvr, int ifidx, static int brcmf_msgbuf_hdrpull(struct brcmf_pub *drvr, bool do_fws, - u8 *ifidx, struct sk_buff *skb) + struct sk_buff *skb, struct brcmf_if **ifp) { return -ENODEV; } @@ -795,6 +786,8 @@ static int brcmf_msgbuf_txdata(struct brcmf_pub *drvr, int ifidx, struct brcmf_flowring *flow = msgbuf->flow; struct ethhdr *eh = (struct ethhdr *)(skb->data); u32 flowid; + u32 queue_count; + bool force; flowid = brcmf_flowring_lookup(flow, eh->h_dest, skb->priority, ifidx); if (flowid == BRCMF_FLOWRING_INVALID_ID) { @@ -802,8 +795,9 @@ static int brcmf_msgbuf_txdata(struct brcmf_pub *drvr, int ifidx, if (flowid == BRCMF_FLOWRING_INVALID_ID) return -ENOMEM; } - brcmf_flowring_enqueue(flow, flowid, skb); - brcmf_msgbuf_schedule_txdata(msgbuf, flowid, false); + queue_count = brcmf_flowring_enqueue(flow, flowid, skb); + force = ((queue_count % BRCMF_MSGBUF_TRICKLE_TXWORKER_THRS) == 0); + brcmf_msgbuf_schedule_txdata(msgbuf, flowid, force); return 0; } @@ -879,7 +873,8 @@ brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf) commonring = msgbuf->flowrings[flowid]; atomic_dec(&commonring->outstanding_tx); - brcmf_txfinalize(msgbuf->drvr, skb, tx_status->msg.ifidx, true); + brcmf_txfinalize(brcmf_get_ifp(msgbuf->drvr, tx_status->msg.ifidx), + skb, true); } @@ -1087,15 +1082,7 @@ brcmf_msgbuf_rx_skb(struct brcmf_msgbuf *msgbuf, struct sk_buff *skb, { struct brcmf_if *ifp; - /* The ifidx is the idx to map to matching netdev/ifp. When receiving - * events this is easy because it contains the bssidx which maps - * 1-on-1 to the netdev/ifp. But for data frames the ifidx is rcvd. - * bssidx 1 is used for p2p0 and no data can be received or - * transmitted on it. Therefor bssidx is ifidx + 1 if ifidx > 0 - */ - if (ifidx) - (ifidx)++; - ifp = msgbuf->drvr->iflist[ifidx]; + ifp = brcmf_get_ifp(msgbuf->drvr, ifidx); if (!ifp || !ifp->ndev) { brcmf_err("Received pkt for invalid ifidx %d\n", ifidx); brcmu_pkt_buf_free_skb(skb); @@ -1265,19 +1252,27 @@ static void brcmf_msgbuf_process_rx(struct brcmf_msgbuf *msgbuf, { void *buf; u16 count; + u16 processed; again: buf = brcmf_commonring_get_read_ptr(commonring, &count); if (buf == NULL) return; + processed = 0; while (count) { brcmf_msgbuf_process_msgtype(msgbuf, buf + msgbuf->rx_dataoffset); buf += brcmf_commonring_len_item(commonring); + processed++; + if (processed == BRCMF_MSGBUF_UPDATE_RX_PTR_THRS) { + brcmf_commonring_read_complete(commonring, processed); + processed = 0; + } count--; } - brcmf_commonring_read_complete(commonring); + if (processed) + brcmf_commonring_read_complete(commonring, processed); if (commonring->r_ptr == 0) goto again; @@ -1358,6 +1353,60 @@ void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid) } } +#ifdef DEBUG +static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data) +{ + struct brcmf_bus *bus_if = dev_get_drvdata(seq->private); + struct brcmf_pub *drvr = bus_if->drvr; + struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; + struct brcmf_commonring *commonring; + u16 i; + struct brcmf_flowring_ring *ring; + struct brcmf_flowring_hash *hash; + + commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_CONTROL_SUBMIT]; + seq_printf(seq, "h2d_ctl_submit: rp %4u, wp %4u, depth %4u\n", + commonring->r_ptr, commonring->w_ptr, commonring->depth); + commonring = msgbuf->commonrings[BRCMF_H2D_MSGRING_RXPOST_SUBMIT]; + seq_printf(seq, "h2d_rx_submit: rp %4u, wp %4u, depth %4u\n", + commonring->r_ptr, commonring->w_ptr, commonring->depth); + commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_CONTROL_COMPLETE]; + seq_printf(seq, "d2h_ctl_cmplt: rp %4u, wp %4u, depth %4u\n", + commonring->r_ptr, commonring->w_ptr, commonring->depth); + commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_TX_COMPLETE]; + seq_printf(seq, "d2h_tx_cmplt: rp %4u, wp %4u, depth %4u\n", + commonring->r_ptr, commonring->w_ptr, commonring->depth); + commonring = msgbuf->commonrings[BRCMF_D2H_MSGRING_RX_COMPLETE]; + seq_printf(seq, "d2h_rx_cmplt: rp %4u, wp %4u, depth %4u\n", + commonring->r_ptr, commonring->w_ptr, commonring->depth); + + seq_printf(seq, "\nh2d_flowrings: depth %u\n", + BRCMF_H2D_TXFLOWRING_MAX_ITEM); + seq_puts(seq, "Active flowrings:\n"); + hash = msgbuf->flow->hash; + for (i = 0; i < msgbuf->flow->nrofrings; i++) { + if (!msgbuf->flow->rings[i]) + continue; + ring = msgbuf->flow->rings[i]; + if (ring->status != RING_OPEN) + continue; + commonring = msgbuf->flowrings[i]; + hash = &msgbuf->flow->hash[ring->hash_id]; + seq_printf(seq, "id %3u: rp %4u, wp %4u, qlen %4u, blocked %u\n" + " ifidx %u, fifo %u, da %pM\n", + i, commonring->r_ptr, commonring->w_ptr, + skb_queue_len(&ring->skblist), ring->blocked, + hash->ifidx, hash->fifo, hash->mac); + } + + return 0; +} +#else +static int brcmf_msgbuf_stats_read(struct seq_file *seq, void *data) +{ + return 0; +} +#endif int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr) { @@ -1458,6 +1507,8 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr) spin_lock_init(&msgbuf->flowring_work_lock); INIT_LIST_HEAD(&msgbuf->work_queue); + brcmf_debugfs_add_entry(drvr, "msgbuf_stats", brcmf_msgbuf_stats_read); + return 0; fail: