X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=kernel%2Fdrivers%2Fnet%2Fwireless%2Fiwlwifi%2Fpcie%2Ftx.c;h=a8c8a4a7420b53d02a798bcc261f408f77440ace;hb=e09b41010ba33a20a87472ee821fa407a5b8da36;hp=5ef8044c2ea3ed7317870902168c71be936cd8df;hpb=9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00;p=kvmfornfv.git diff --git a/kernel/drivers/net/wireless/iwlwifi/pcie/tx.c b/kernel/drivers/net/wireless/iwlwifi/pcie/tx.c index 5ef8044c2..a8c8a4a74 100644 --- a/kernel/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/kernel/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -219,8 +219,6 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; - WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX); - sta_id = tx_cmd->sta_id; sec_ctl = tx_cmd->sec_ctl; @@ -239,6 +237,9 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, if (trans_pcie->bc_table_dword) len = DIV_ROUND_UP(len, 4); + if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) + return; + bc_ent = cpu_to_le16(len | (sta_id << 12)); scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; @@ -387,11 +388,18 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, /* first TB is never freed - it's the scratchbuf data */ - for (i = 1; i < num_tbs; i++) - dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i), - iwl_pcie_tfd_tb_get_len(tfd, i), - DMA_TO_DEVICE); - + for (i = 1; i < num_tbs; i++) { + if (meta->flags & BIT(i + CMD_TB_BITMAP_POS)) + dma_unmap_page(trans->dev, + iwl_pcie_tfd_tb_get_addr(tfd, i), + iwl_pcie_tfd_tb_get_len(tfd, i), + DMA_TO_DEVICE); + else + dma_unmap_single(trans->dev, + iwl_pcie_tfd_tb_get_addr(tfd, i), + iwl_pcie_tfd_tb_get_len(tfd, i), + DMA_TO_DEVICE); + } tfd->num_tbs = 0; } @@ -467,7 +475,7 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len); - return 0; + return num_tbs; } static int iwl_pcie_txq_alloc(struct iwl_trans *trans, @@ -915,6 +923,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) } } + iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); if (trans->cfg->base_params->num_of_queues > 20) iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_ENABLE_31_QUEUES); @@ -1049,8 +1058,6 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, !trans_pcie->cmd_hold_nic_awake) { __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) - udelay(2); ret = iwl_poll_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, @@ -1322,13 +1329,24 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, int idx; u16 copy_size, cmd_size, scratch_size; bool had_nocopy = false; + u8 group_id = iwl_cmd_groupid(cmd->id); int i, ret; u32 cmd_pos; const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; - copy_size = sizeof(out_cmd->hdr); - cmd_size = sizeof(out_cmd->hdr); + if (WARN(!trans_pcie->wide_cmd_header && + group_id > IWL_ALWAYS_LONG_GROUP, + "unsupported wide command %#x\n", cmd->id)) + return -EINVAL; + + if (group_id != 0) { + copy_size = sizeof(struct iwl_cmd_header_wide); + cmd_size = sizeof(struct iwl_cmd_header_wide); + } else { + copy_size = sizeof(struct iwl_cmd_header); + cmd_size = sizeof(struct iwl_cmd_header); + } /* need one for the header if the first is NOCOPY */ BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1); @@ -1418,16 +1436,32 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, out_meta->source = cmd; /* set up the header */ - - out_cmd->hdr.cmd = cmd->id; - out_cmd->hdr.flags = 0; - out_cmd->hdr.sequence = - cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | - INDEX_TO_SEQ(q->write_ptr)); + if (group_id != 0) { + out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); + out_cmd->hdr_wide.group_id = group_id; + out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); + out_cmd->hdr_wide.length = + cpu_to_le16(cmd_size - + sizeof(struct iwl_cmd_header_wide)); + out_cmd->hdr_wide.reserved = 0; + out_cmd->hdr_wide.sequence = + cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | + INDEX_TO_SEQ(q->write_ptr)); + + cmd_pos = sizeof(struct iwl_cmd_header_wide); + copy_size = sizeof(struct iwl_cmd_header_wide); + } else { + out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id); + out_cmd->hdr.sequence = + cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | + INDEX_TO_SEQ(q->write_ptr)); + out_cmd->hdr.group_id = 0; + + cmd_pos = sizeof(struct iwl_cmd_header); + copy_size = sizeof(struct iwl_cmd_header); + } /* and copy the data that needs to be copied */ - cmd_pos = offsetof(struct iwl_device_cmd, payload); - copy_size = sizeof(out_cmd->hdr); for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { int copy; @@ -1466,9 +1500,10 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, } IWL_DEBUG_HC(trans, - "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", + "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", get_cmd_string(trans_pcie, out_cmd->hdr.cmd), - out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), + group_id, out_cmd->hdr.cmd, + le16_to_cpu(out_cmd->hdr.sequence), cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); /* start the TFD with the scratchbuf */ @@ -1518,12 +1553,14 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false); } + BUILD_BUG_ON(IWL_NUM_OF_TBS + CMD_TB_BITMAP_POS > + sizeof(out_meta->flags) * BITS_PER_BYTE); out_meta->flags = cmd->flags; if (WARN_ON_ONCE(txq->entries[idx].free_buf)) kzfree(txq->entries[idx].free_buf); txq->entries[idx].free_buf = dup_buf; - trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr); + trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); /* start timer if queue currently empty */ if (q->read_ptr == q->write_ptr && txq->wd_timeout) @@ -1554,15 +1591,13 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, /* * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them * @rxb: Rx buffer to reclaim - * @handler_status: return value of the handler of the command - * (put in setup_rx_handlers) * * If an Rx buffer has an async callback associated with it the callback * will be executed. The attached skb (if present) will only be freed * if the callback returns 1 */ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, - struct iwl_rx_cmd_buffer *rxb, int handler_status) + struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); u16 sequence = le16_to_cpu(pkt->hdr.sequence); @@ -1601,7 +1636,6 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, meta->source->resp_pkt = pkt; meta->source->_rx_page_addr = (unsigned long)page_address(p); meta->source->_rx_page_order = trans_pcie->rx_page_order; - meta->source->handler_status = handler_status; } iwl_pcie_cmdq_reclaim(trans, txq_id, index); @@ -1764,7 +1798,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_cmd *dev_cmd, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_hdr *hdr; struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; struct iwl_cmd_meta *out_meta; struct iwl_txq *txq; @@ -1773,9 +1807,10 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, void *tb1_addr; u16 len, tb1_len, tb2_len; bool wait_write_ptr; - __le16 fc = hdr->frame_control; - u8 hdr_len = ieee80211_hdrlen(fc); + __le16 fc; + u8 hdr_len; u16 wifi_seq; + int i; txq = &trans_pcie->txq[txq_id]; q = &txq->q; @@ -1784,6 +1819,18 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, "TX on unused queue %d\n", txq_id)) return -EINVAL; + if (skb_is_nonlinear(skb) && + skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS && + __skb_linearize(skb)) + return -ENOMEM; + + /* mac80211 always puts the full header into the SKB's head, + * so there's no need to check if it's readable there + */ + hdr = (struct ieee80211_hdr *)skb->data; + fc = hdr->frame_control; + hdr_len = ieee80211_hdrlen(fc); + spin_lock(&txq->lock); /* In AGG mode, the index in the ring must correspond to the WiFi @@ -1814,6 +1861,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, /* Set up first empty entry in queue's array of Tx/cmd buffers */ out_meta = &txq->entries[q->write_ptr].meta; + out_meta->flags = 0; /* * The second TB (tb1) points to the remainder of the TX command @@ -1847,9 +1895,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, /* * Set up TFD's third entry to point directly to remainder - * of skb, if any (802.11 null frames have no payload). + * of skb's head, if any */ - tb2_len = skb->len - hdr_len; + tb2_len = skb_headlen(skb) - hdr_len; if (tb2_len > 0) { dma_addr_t tb2_phys = dma_map_single(trans->dev, skb->data + hdr_len, @@ -1862,6 +1910,29 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false); } + /* set up the remaining entries to point to the data */ + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + dma_addr_t tb_phys; + int tb_idx; + + if (!skb_frag_size(frag)) + continue; + + tb_phys = skb_frag_dma_map(trans->dev, frag, 0, + skb_frag_size(frag), DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { + iwl_pcie_tfd_unmap(trans, out_meta, + &txq->tfds[q->write_ptr]); + goto out_err; + } + tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, + skb_frag_size(frag), false); + + out_meta->flags |= BIT(tb_idx + CMD_TB_BITMAP_POS); + } + /* Set up entry for this TFD in Tx byte-count array */ iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); @@ -1871,14 +1942,25 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len, skb->data + hdr_len, tb2_len); trace_iwlwifi_dev_tx_data(trans->dev, skb, - skb->data + hdr_len, tb2_len); + hdr_len, skb->len - hdr_len); wait_write_ptr = ieee80211_has_morefrags(fc); /* start timer if queue currently empty */ if (q->read_ptr == q->write_ptr) { - if (txq->wd_timeout) - mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); + if (txq->wd_timeout) { + /* + * If the TXQ is active, then set the timer, if not, + * set the timer in remainder so that the timer will + * be armed with the right value when the station will + * wake up. + */ + if (!txq->frozen) + mod_timer(&txq->stuck_timer, + jiffies + txq->wd_timeout); + else + txq->frozen_expiry_remainder = txq->wd_timeout; + } IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id); iwl_trans_pcie_ref(trans); }