These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / net / ethernet / amd / xgbe / xgbe-dev.c
index 21d9497..f6a7161 100644 (file)
@@ -710,7 +710,8 @@ static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
        if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
                return 0;
 
-       DBGPR("  %s promiscuous mode\n", enable ? "entering" : "leaving");
+       netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n",
+                 enable ? "entering" : "leaving");
        XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
 
        return 0;
@@ -724,7 +725,8 @@ static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
        if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
                return 0;
 
-       DBGPR("  %s allmulti mode\n", enable ? "entering" : "leaving");
+       netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n",
+                 enable ? "entering" : "leaving");
        XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);
 
        return 0;
@@ -749,8 +751,9 @@ static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata,
                mac_addr[0] = ha->addr[4];
                mac_addr[1] = ha->addr[5];
 
-               DBGPR("  adding mac address %pM at 0x%04x\n", ha->addr,
-                     *mac_reg);
+               netif_dbg(pdata, drv, pdata->netdev,
+                         "adding mac address %pM at %#x\n",
+                         ha->addr, *mac_reg);
 
                XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
        }
@@ -907,23 +910,6 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
        else
                mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
 
-       /* If the PCS is changing modes, match the MAC speed to it */
-       if (((mmd_address >> 16) == MDIO_MMD_PCS) &&
-           ((mmd_address & 0xffff) == MDIO_CTRL2)) {
-               struct phy_device *phydev = pdata->phydev;
-
-               if (mmd_data & MDIO_PCS_CTRL2_TYPE) {
-                       /* KX mode */
-                       if (phydev->supported & SUPPORTED_1000baseKX_Full)
-                               xgbe_set_gmii_speed(pdata);
-                       else
-                               xgbe_set_gmii_2500_speed(pdata);
-               } else {
-                       /* KR mode */
-                       xgbe_set_xgmii_speed(pdata);
-               }
-       }
-
        /* The PCS registers are accessed using mmio. The underlying APB3
         * management interface uses indirect addressing to access the MMD
         * register sets. This requires accessing of the PCS register in two
@@ -1124,6 +1110,7 @@ static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
        unsigned int rx_usecs = pdata->rx_usecs;
        unsigned int rx_frames = pdata->rx_frames;
        unsigned int inte;
+       dma_addr_t hdr_dma, buf_dma;
 
        if (!rx_usecs && !rx_frames) {
                /* No coalescing, interrupt for every descriptor */
@@ -1143,10 +1130,12 @@ static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
         *   Set buffer 2 (hi) address to buffer dma address (hi) and
         *     set control bits OWN and INTE
         */
-       rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->rx.hdr.dma));
-       rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->rx.hdr.dma));
-       rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx.buf.dma));
-       rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx.buf.dma));
+       hdr_dma = rdata->rx.hdr.dma_base + rdata->rx.hdr.dma_off;
+       buf_dma = rdata->rx.buf.dma_base + rdata->rx.buf.dma_off;
+       rdesc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma));
+       rdesc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma));
+       rdesc->desc2 = cpu_to_le32(lower_32_bits(buf_dma));
+       rdesc->desc3 = cpu_to_le32(upper_32_bits(buf_dma));
 
        XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte);
 
@@ -1322,7 +1311,8 @@ static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
        for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
                switch (ets->tc_tsa[i]) {
                case IEEE_8021QAZ_TSA_STRICT:
-                       DBGPR("  TC%u using SP\n", i);
+                       netif_dbg(pdata, drv, pdata->netdev,
+                                 "TC%u using SP\n", i);
                        XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
                                               MTL_TSA_SP);
                        break;
@@ -1330,7 +1320,8 @@ static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
                        weight = total_weight * ets->tc_tx_bw[i] / 100;
                        weight = clamp(weight, min_weight, total_weight);
 
-                       DBGPR("  TC%u using DWRR (weight %u)\n", i, weight);
+                       netif_dbg(pdata, drv, pdata->netdev,
+                                 "TC%u using DWRR (weight %u)\n", i, weight);
                        XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
                                               MTL_TSA_ETS);
                        XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW,
@@ -1359,7 +1350,8 @@ static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
                }
                mask &= 0xff;
 
-               DBGPR("  TC%u PFC mask=%#x\n", tc, mask);
+               netif_dbg(pdata, drv, pdata->netdev, "TC%u PFC mask=%#x\n",
+                         tc, mask);
                reg = MTL_TCPM0R + (MTL_TCPM_INC * (tc / MTL_TCPM_TC_PER_REG));
                reg_val = XGMAC_IOREAD(pdata, reg);
 
@@ -1457,8 +1449,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
        /* Create a context descriptor if this is a TSO packet */
        if (tso_context || vlan_context) {
                if (tso_context) {
-                       DBGPR("  TSO context descriptor, mss=%u\n",
-                             packet->mss);
+                       netif_dbg(pdata, tx_queued, pdata->netdev,
+                                 "TSO context descriptor, mss=%u\n",
+                                 packet->mss);
 
                        /* Set the MSS size */
                        XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
@@ -1476,8 +1469,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
                }
 
                if (vlan_context) {
-                       DBGPR("  VLAN context descriptor, ctag=%u\n",
-                             packet->vlan_ctag);
+                       netif_dbg(pdata, tx_queued, pdata->netdev,
+                                 "VLAN context descriptor, ctag=%u\n",
+                                 packet->vlan_ctag);
 
                        /* Mark it as a CONTEXT descriptor */
                        XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
@@ -1533,6 +1527,8 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
                                  packet->tcp_payload_len);
                XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
                                  packet->tcp_header_len / 4);
+
+               pdata->ext_stats.tx_tso_packets++;
        } else {
                /* Enable CRC and Pad Insertion */
                XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
@@ -1594,12 +1590,12 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
        rdesc = rdata->rdesc;
        XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
 
-#ifdef XGMAC_ENABLE_TX_DESC_DUMP
-       xgbe_dump_tx_desc(ring, start_index, packet->rdesc_count, 1);
-#endif
+       if (netif_msg_tx_queued(pdata))
+               xgbe_dump_tx_desc(pdata, ring, start_index,
+                                 packet->rdesc_count, 1);
 
        /* Make sure ownership is written to the descriptor */
-       dma_wmb();
+       smp_wmb();
 
        ring->cur = cur_index + 1;
        if (!packet->skb->xmit_more ||
@@ -1618,11 +1614,12 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
 
 static int xgbe_dev_read(struct xgbe_channel *channel)
 {
+       struct xgbe_prv_data *pdata = channel->pdata;
        struct xgbe_ring *ring = channel->rx_ring;
        struct xgbe_ring_data *rdata;
        struct xgbe_ring_desc *rdesc;
        struct xgbe_packet_data *packet = &ring->packet_data;
-       struct net_device *netdev = channel->pdata->netdev;
+       struct net_device *netdev = pdata->netdev;
        unsigned int err, etlt, l34t;
 
        DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
@@ -1637,9 +1634,8 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
        /* Make sure descriptor fields are read after reading the OWN bit */
        dma_rmb();
 
-#ifdef XGMAC_ENABLE_RX_DESC_DUMP
-       xgbe_dump_rx_desc(ring, rdesc, ring->cur);
-#endif
+       if (netif_msg_rx_status(pdata))
+               xgbe_dump_rx_desc(pdata, ring, ring->cur);
 
        if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) {
                /* Timestamp Context Descriptor */
@@ -1661,9 +1657,12 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
                               CONTEXT_NEXT, 1);
 
        /* Get the header length */
-       if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD))
+       if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) {
                rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
                                                      RX_NORMAL_DESC2, HL);
+               if (rdata->rx.hdr_len)
+                       pdata->ext_stats.rx_split_header_packets++;
+       }
 
        /* Get the RSS hash */
        if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) {
@@ -1700,14 +1699,14 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
                       INCOMPLETE, 0);
 
        /* Set checksum done indicator as appropriate */
-       if (channel->pdata->netdev->features & NETIF_F_RXCSUM)
+       if (netdev->features & NETIF_F_RXCSUM)
                XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
                               CSUM_DONE, 1);
 
        /* Check for errors (only valid in last descriptor) */
        err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
        etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
-       DBGPR("  err=%u, etlt=%#x\n", err, etlt);
+       netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt);
 
        if (!err || !etlt) {
                /* No error if err is 0 or etlt is 0 */
@@ -1718,7 +1717,8 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
                        packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
                                                              RX_NORMAL_DESC0,
                                                              OVT);
-                       DBGPR("  vlan-ctag=0x%04x\n", packet->vlan_ctag);
+                       netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n",
+                                 packet->vlan_ctag);
                }
        } else {
                if ((etlt == 0x05) || (etlt == 0x06))
@@ -1849,7 +1849,7 @@ static int xgbe_exit(struct xgbe_prv_data *pdata)
        usleep_range(10, 15);
 
        /* Poll Until Poll Condition */
-       while (count-- && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
+       while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
                usleep_range(500, 600);
 
        if (!count)
@@ -1873,7 +1873,7 @@ static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
        /* Poll Until Poll Condition */
        for (i = 0; i < pdata->tx_q_count; i++) {
                count = 2000;
-               while (count-- && XGMAC_MTL_IOREAD_BITS(pdata, i,
+               while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i,
                                                        MTL_Q_TQOMR, FTQ))
                        usleep_range(500, 600);
 
@@ -1940,84 +1940,31 @@ static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
 static unsigned int xgbe_calculate_per_queue_fifo(unsigned int fifo_size,
                                                  unsigned int queue_count)
 {
-       unsigned int q_fifo_size = 0;
-       enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256;
+       unsigned int q_fifo_size;
+       unsigned int p_fifo;
 
-       /* Calculate Tx/Rx fifo share per queue */
-       switch (fifo_size) {
-       case 0:
-               q_fifo_size = XGBE_FIFO_SIZE_B(128);
-               break;
-       case 1:
-               q_fifo_size = XGBE_FIFO_SIZE_B(256);
-               break;
-       case 2:
-               q_fifo_size = XGBE_FIFO_SIZE_B(512);
-               break;
-       case 3:
-               q_fifo_size = XGBE_FIFO_SIZE_KB(1);
-               break;
-       case 4:
-               q_fifo_size = XGBE_FIFO_SIZE_KB(2);
-               break;
-       case 5:
-               q_fifo_size = XGBE_FIFO_SIZE_KB(4);
-               break;
-       case 6:
-               q_fifo_size = XGBE_FIFO_SIZE_KB(8);
-               break;
-       case 7:
-               q_fifo_size = XGBE_FIFO_SIZE_KB(16);
-               break;
-       case 8:
-               q_fifo_size = XGBE_FIFO_SIZE_KB(32);
-               break;
-       case 9:
-               q_fifo_size = XGBE_FIFO_SIZE_KB(64);
-               break;
-       case 10:
-               q_fifo_size = XGBE_FIFO_SIZE_KB(128);
-               break;
-       case 11:
-               q_fifo_size = XGBE_FIFO_SIZE_KB(256);
-               break;
-       }
+       /* Calculate the configured fifo size */
+       q_fifo_size = 1 << (fifo_size + 7);
 
-       /* The configured value is not the actual amount of fifo RAM */
+       /* The configured value may not be the actual amount of fifo RAM */
        q_fifo_size = min_t(unsigned int, XGBE_FIFO_MAX, q_fifo_size);
 
        q_fifo_size = q_fifo_size / queue_count;
 
-       /* Set the queue fifo size programmable value */
-       if (q_fifo_size >= XGBE_FIFO_SIZE_KB(256))
-               p_fifo = XGMAC_MTL_FIFO_SIZE_256K;
-       else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(128))
-               p_fifo = XGMAC_MTL_FIFO_SIZE_128K;
-       else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(64))
-               p_fifo = XGMAC_MTL_FIFO_SIZE_64K;
-       else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(32))
-               p_fifo = XGMAC_MTL_FIFO_SIZE_32K;
-       else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(16))
-               p_fifo = XGMAC_MTL_FIFO_SIZE_16K;
-       else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(8))
-               p_fifo = XGMAC_MTL_FIFO_SIZE_8K;
-       else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(4))
-               p_fifo = XGMAC_MTL_FIFO_SIZE_4K;
-       else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(2))
-               p_fifo = XGMAC_MTL_FIFO_SIZE_2K;
-       else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(1))
-               p_fifo = XGMAC_MTL_FIFO_SIZE_1K;
-       else if (q_fifo_size >= XGBE_FIFO_SIZE_B(512))
-               p_fifo = XGMAC_MTL_FIFO_SIZE_512;
-       else if (q_fifo_size >= XGBE_FIFO_SIZE_B(256))
-               p_fifo = XGMAC_MTL_FIFO_SIZE_256;
+       /* Each increment in the queue fifo size represents 256 bytes of
+        * fifo, with 0 representing 256 bytes. Distribute the fifo equally
+        * between the queues.
+        */
+       p_fifo = q_fifo_size / 256;
+       if (p_fifo)
+               p_fifo--;
 
        return p_fifo;
 }
 
 static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
 {
-       enum xgbe_mtl_fifo_size fifo_size;
+       unsigned int fifo_size;
        unsigned int i;
 
        fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size,
@@ -2026,14 +1973,14 @@ static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
        for (i = 0; i < pdata->tx_q_count; i++)
                XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
 
-       netdev_notice(pdata->netdev,
-                     "%d Tx hardware queues, %d byte fifo per queue\n",
-                     pdata->tx_q_count, ((fifo_size + 1) * 256));
+       netif_info(pdata, drv, pdata->netdev,
+                  "%d Tx hardware queues, %d byte fifo per queue\n",
+                  pdata->tx_q_count, ((fifo_size + 1) * 256));
 }
 
 static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
 {
-       enum xgbe_mtl_fifo_size fifo_size;
+       unsigned int fifo_size;
        unsigned int i;
 
        fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size,
@@ -2042,9 +1989,9 @@ static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
        for (i = 0; i < pdata->rx_q_count; i++)
                XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
 
-       netdev_notice(pdata->netdev,
-                     "%d Rx hardware queues, %d byte fifo per queue\n",
-                     pdata->rx_q_count, ((fifo_size + 1) * 256));
+       netif_info(pdata, drv, pdata->netdev,
+                  "%d Rx hardware queues, %d byte fifo per queue\n",
+                  pdata->rx_q_count, ((fifo_size + 1) * 256));
 }
 
 static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
@@ -2063,14 +2010,16 @@ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
 
        for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
                for (j = 0; j < qptc; j++) {
-                       DBGPR("  TXq%u mapped to TC%u\n", queue, i);
+                       netif_dbg(pdata, drv, pdata->netdev,
+                                 "TXq%u mapped to TC%u\n", queue, i);
                        XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
                                               Q2TCMAP, i);
                        pdata->q2tc_map[queue++] = i;
                }
 
                if (i < qptc_extra) {
-                       DBGPR("  TXq%u mapped to TC%u\n", queue, i);
+                       netif_dbg(pdata, drv, pdata->netdev,
+                                 "TXq%u mapped to TC%u\n", queue, i);
                        XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
                                               Q2TCMAP, i);
                        pdata->q2tc_map[queue++] = i;
@@ -2088,13 +2037,15 @@ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
        for (i = 0, prio = 0; i < prio_queues;) {
                mask = 0;
                for (j = 0; j < ppq; j++) {
-                       DBGPR("  PRIO%u mapped to RXq%u\n", prio, i);
+                       netif_dbg(pdata, drv, pdata->netdev,
+                                 "PRIO%u mapped to RXq%u\n", prio, i);
                        mask |= (1 << prio);
                        pdata->prio2q_map[prio++] = i;
                }
 
                if (i < ppq_extra) {
-                       DBGPR("  PRIO%u mapped to RXq%u\n", prio, i);
+                       netif_dbg(pdata, drv, pdata->netdev,
+                                 "PRIO%u mapped to RXq%u\n", prio, i);
                        mask |= (1 << prio);
                        pdata->prio2q_map[prio++] = i;
                }
@@ -2220,7 +2171,7 @@ static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
 
        default:
                read_hi = false;
-       };
+       }
 
        val = XGMAC_IOREAD(pdata, reg_lo);