These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / net / ethernet / broadcom / bnx2x / bnx2x_cmn.c
index ec56a9b..c82ab87 100644 (file)
@@ -1,6 +1,8 @@
-/* bnx2x_cmn.c: Broadcom Everest network driver.
+/* bnx2x_cmn.c: QLogic Everest network driver.
  *
  * Copyright (c) 2007-2013 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
+ * All rights reserved
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -262,9 +264,9 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
        if (likely(skb)) {
                (*pkts_compl)++;
                (*bytes_compl) += skb->len;
+               dev_kfree_skb_any(skb);
        }
 
-       dev_kfree_skb_any(skb);
        tx_buf->first_bd = 0;
        tx_buf->skb = NULL;
 
@@ -544,30 +546,46 @@ static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
                              u16 index, gfp_t gfp_mask)
 {
-       struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
        struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
        struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
+       struct bnx2x_alloc_pool *pool = &fp->page_pool;
        dma_addr_t mapping;
 
-       if (unlikely(page == NULL)) {
-               BNX2X_ERR("Can't alloc sge\n");
-               return -ENOMEM;
+       if (!pool->page || (PAGE_SIZE - pool->offset) < SGE_PAGE_SIZE) {
+
+               /* put page reference used by the memory pool, since we
+                * won't be using this page as the mempool anymore.
+                */
+               if (pool->page)
+                       put_page(pool->page);
+
+               pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
+               if (unlikely(!pool->page)) {
+                       BNX2X_ERR("Can't alloc sge\n");
+                       return -ENOMEM;
+               }
+
+               pool->offset = 0;
        }
 
-       mapping = dma_map_page(&bp->pdev->dev, page, 0,
-                              SGE_PAGES, DMA_FROM_DEVICE);
+       mapping = dma_map_page(&bp->pdev->dev, pool->page,
+                              pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
        if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
-               __free_pages(page, PAGES_PER_SGE_SHIFT);
                BNX2X_ERR("Can't map sge\n");
                return -ENOMEM;
        }
 
-       sw_buf->page = page;
+       get_page(pool->page);
+       sw_buf->page = pool->page;
+       sw_buf->offset = pool->offset;
+
        dma_unmap_addr_set(sw_buf, mapping, mapping);
 
        sge->addr_hi = cpu_to_le32(U64_HI(mapping));
        sge->addr_lo = cpu_to_le32(U64_LO(mapping));
 
+       pool->offset += SGE_PAGE_SIZE;
+
        return 0;
 }
 
@@ -629,20 +647,22 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
                        return err;
                }
 
-               /* Unmap the page as we're going to pass it to the stack */
                dma_unmap_page(&bp->pdev->dev,
                               dma_unmap_addr(&old_rx_pg, mapping),
-                              SGE_PAGES, DMA_FROM_DEVICE);
+                              SGE_PAGE_SIZE, DMA_FROM_DEVICE);
                /* Add one frag and update the appropriate fields in the skb */
                if (fp->mode == TPA_MODE_LRO)
-                       skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
+                       skb_fill_page_desc(skb, j, old_rx_pg.page,
+                                          old_rx_pg.offset, frag_len);
                else { /* GRO */
                        int rem;
                        int offset = 0;
                        for (rem = frag_len; rem > 0; rem -= gro_size) {
                                int len = rem > gro_size ? gro_size : rem;
                                skb_fill_page_desc(skb, frag_id++,
-                                                  old_rx_pg.page, offset, len);
+                                                  old_rx_pg.page,
+                                                  old_rx_pg.offset + offset,
+                                                  len);
                                if (offset)
                                        get_page(old_rx_pg.page);
                                offset += len;
@@ -662,7 +682,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
 {
        if (fp->rx_frag_size)
-               put_page(virt_to_head_page(data));
+               skb_free_frag(data);
        else
                kfree(data);
 }
@@ -671,7 +691,7 @@ static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
 {
        if (fp->rx_frag_size) {
                /* GFP_KERNEL allocations are used only during initialization */
-               if (unlikely(gfp_mask & __GFP_WAIT))
+               if (unlikely(gfpflags_allow_blocking(gfp_mask)))
                        return (void *)__get_free_page(gfp_mask);
 
                return netdev_alloc_frag(fp->rx_frag_size);
@@ -1170,7 +1190,7 @@ u16 bnx2x_get_mf_speed(struct bnx2x *bp)
                /* Calculate the current MAX line speed limit for the MF
                 * devices
                 */
-               if (IS_MF_SI(bp))
+               if (IS_MF_PERCENT_BW(bp))
                        line_speed = (line_speed * maxCfg) / 100;
                else { /* SD mode */
                        u16 vn_max_rate = maxCfg * 100;
@@ -2085,9 +2105,14 @@ int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
                if (rss_obj->udp_rss_v6)
                        __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
 
-               if (!CHIP_IS_E1x(bp))
+               if (!CHIP_IS_E1x(bp)) {
+                       /* valid only for TUNN_MODE_VXLAN tunnel mode */
+                       __set_bit(BNX2X_RSS_IPV4_VXLAN, &params.rss_flags);
+                       __set_bit(BNX2X_RSS_IPV6_VXLAN, &params.rss_flags);
+
                        /* valid only for TUNN_MODE_GRE tunnel mode */
-                       __set_bit(BNX2X_RSS_GRE_INNER_HDRS, &params.rss_flags);
+                       __set_bit(BNX2X_RSS_TUNN_INNER_HDRS, &params.rss_flags);
+               }
        } else {
                __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
        }
@@ -2492,6 +2517,20 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
                fp->mode = TPA_MODE_DISABLED;
 }
 
+void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
+{
+       u32 cur;
+
+       if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
+               return;
+
+       cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
+       DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
+          cur, state);
+
+       SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
+}
+
 int bnx2x_load_cnic(struct bnx2x *bp)
 {
        int i, rc, port = BP_PORT(bp);
@@ -2809,6 +2848,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
 
        /* Start fast path */
 
+       /* Re-configure vlan filters */
+       rc = bnx2x_vlan_reconfigure_vid(bp);
+       if (rc)
+               LOAD_ERROR_EXIT(bp, load_error3);
+
        /* Initialize Rx filter. */
        bnx2x_set_rx_mode_inner(bp);
 
@@ -2855,6 +2899,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
                /* mark driver is loaded in shmem2 */
                u32 val;
                val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
+               val &= ~DRV_FLAGS_MTU_MASK;
+               val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
                SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
                          val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
                          DRV_FLAGS_CAPABILITIES_LOADED_L2);
@@ -2867,10 +2913,17 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
                return -EBUSY;
        }
 
+       /* Update driver data for On-Chip MFW dump. */
+       if (IS_PF(bp))
+               bnx2x_update_mfw_dump(bp);
+
        /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
        if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
                bnx2x_dcbx_init(bp, false);
 
+       if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
+               bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
+
        DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
 
        return 0;
@@ -2938,6 +2991,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
 
        DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
 
+       if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
+               bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
+
        /* mark driver is unloaded in shmem2 */
        if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
                u32 val;
@@ -3374,25 +3430,29 @@ static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
        return rc;
 }
 
-#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
+/* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */
+#define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS         4
+
+/* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
+#define BNX2X_NUM_TSO_WIN_SUB_BDS               3
+
+#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
 /* check if packet requires linearization (packet is too fragmented)
    no need to check fragmentation if page size > 8K (there will be no
    violation to FW restrictions) */
 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
                             u32 xmit_type)
 {
-       int to_copy = 0;
-       int hlen = 0;
-       int first_bd_sz = 0;
+       int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS;
+       int to_copy = 0, hlen = 0;
 
-       /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
-       if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
+       if (xmit_type & XMIT_GSO_ENC)
+               num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS;
 
+       if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) {
                if (xmit_type & XMIT_GSO) {
                        unsigned short lso_mss = skb_shinfo(skb)->gso_size;
-                       /* Check if LSO packet needs to be copied:
-                          3 = 1 (for headers BD) + 2 (for PBD and last BD) */
-                       int wnd_size = MAX_FETCH_BD - 3;
+                       int wnd_size = MAX_FETCH_BD - num_tso_win_sub;
                        /* Number of windows to check */
                        int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
                        int wnd_idx = 0;
@@ -3400,8 +3460,13 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
                        u32 wnd_sum = 0;
 
                        /* Headers length */
-                       hlen = (int)(skb_transport_header(skb) - skb->data) +
-                               tcp_hdrlen(skb);
+                       if (xmit_type & XMIT_GSO_ENC)
+                               hlen = (int)(skb_inner_transport_header(skb) -
+                                            skb->data) +
+                                            inner_tcp_hdrlen(skb);
+                       else
+                               hlen = (int)(skb_transport_header(skb) -
+                                            skb->data) + tcp_hdrlen(skb);
 
                        /* Amount of data (w/o headers) on linear part of SKB*/
                        first_bd_sz = skb_headlen(skb) - hlen;
@@ -3654,7 +3719,7 @@ static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
                pbd2->fw_ip_hdr_to_payload_w =
                        hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
                pbd_e2->data.tunnel_data.flags |=
-                       ETH_TUNNEL_DATA_IP_HDR_TYPE_OUTER;
+                       ETH_TUNNEL_DATA_IPV6_OUTER;
        }
 
        pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
@@ -4161,6 +4226,41 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
        return NETDEV_TX_OK;
 }
 
+void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
+{
+       int mfw_vn = BP_FW_MB_IDX(bp);
+       u32 tmp;
+
+       /* If the shmem shouldn't affect configuration, reflect */
+       if (!IS_MF_BD(bp)) {
+               int i;
+
+               for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
+                       c2s_map[i] = i;
+               *c2s_default = 0;
+
+               return;
+       }
+
+       tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
+       tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
+       c2s_map[0] = tmp & 0xff;
+       c2s_map[1] = (tmp >> 8) & 0xff;
+       c2s_map[2] = (tmp >> 16) & 0xff;
+       c2s_map[3] = (tmp >> 24) & 0xff;
+
+       tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
+       tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
+       c2s_map[4] = tmp & 0xff;
+       c2s_map[5] = (tmp >> 8) & 0xff;
+       c2s_map[6] = (tmp >> 16) & 0xff;
+       c2s_map[7] = (tmp >> 24) & 0xff;
+
+       tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
+       tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
+       *c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
+}
+
 /**
  * bnx2x_setup_tc - routine to configure net_device for multi tc
  *
@@ -4171,8 +4271,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
  */
 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
 {
-       int cos, prio, count, offset;
        struct bnx2x *bp = netdev_priv(dev);
+       u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
+       int cos, prio, count, offset;
 
        /* setup tc must be called under rtnl lock */
        ASSERT_RTNL();
@@ -4196,12 +4297,16 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
                return -EINVAL;
        }
 
+       bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
+
        /* configure priority to traffic class mapping */
        for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
-               netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
+               int outer_prio = c2s_map[prio];
+
+               netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
                DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
                   "mapping priority %d to tc %d\n",
-                  prio, bp->prio_to_cos[prio]);
+                  outer_prio, bp->prio_to_cos[outer_prio]);
        }
 
        /* Use this configuration to differentiate tc0 from other COSes
@@ -4255,6 +4360,9 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
        if (netif_running(dev))
                rc = bnx2x_set_eth_mac(bp, true);
 
+       if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
+               SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
+
        return rc;
 }
 
@@ -4808,6 +4916,9 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
         */
        dev->mtu = new_mtu;
 
+       if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
+               SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
+
        return bnx2x_reload_if_running(dev);
 }