These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / net / xen-netfront.c
index e031c94..d6abf19 100644 (file)
@@ -45,7 +45,6 @@
 #include <linux/slab.h>
 #include <net/ip.h>
 
-#include <asm/xen/page.h>
 #include <xen/xen.h>
 #include <xen/xenbus.h>
 #include <xen/events.h>
@@ -75,8 +74,8 @@ struct netfront_cb {
 
 #define GRANT_INVALID_REF      0
 
-#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
-#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
+#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
+#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
 
 /* Minimum number of Rx slots (includes slot for GSO metadata). */
 #define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
@@ -292,7 +291,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
                struct sk_buff *skb;
                unsigned short id;
                grant_ref_t ref;
-               unsigned long pfn;
+               struct page *page;
                struct xen_netif_rx_request *req;
 
                skb = xennet_alloc_one_rx_buffer(queue);
@@ -308,14 +307,13 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
                BUG_ON((signed short)ref < 0);
                queue->grant_rx_ref[id] = ref;
 
-               pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
+               page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
 
                req = RING_GET_REQUEST(&queue->rx, req_prod);
-               gnttab_grant_foreign_access_ref(ref,
-                                               queue->info->xbdev->otherend_id,
-                                               pfn_to_mfn(pfn),
-                                               0);
-
+               gnttab_page_grant_foreign_access_ref_one(ref,
+                                                        queue->info->xbdev->otherend_id,
+                                                        page,
+                                                        0);
                req->id = id;
                req->gref = ref;
        }
@@ -416,15 +414,25 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
        xennet_maybe_wake_tx(queue);
 }
 
-static struct xen_netif_tx_request *xennet_make_one_txreq(
-       struct netfront_queue *queue, struct sk_buff *skb,
-       struct page *page, unsigned int offset, unsigned int len)
+struct xennet_gnttab_make_txreq {
+       struct netfront_queue *queue;
+       struct sk_buff *skb;
+       struct page *page;
+       struct xen_netif_tx_request *tx; /* Last request */
+       unsigned int size;
+};
+
+static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
+                                 unsigned int len, void *data)
 {
+       struct xennet_gnttab_make_txreq *info = data;
        unsigned int id;
        struct xen_netif_tx_request *tx;
        grant_ref_t ref;
-
-       len = min_t(unsigned int, PAGE_SIZE - offset, len);
+       /* convenient aliases */
+       struct page *page = info->page;
+       struct netfront_queue *queue = info->queue;
+       struct sk_buff *skb = info->skb;
 
        id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
        tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
@@ -432,7 +440,7 @@ static struct xen_netif_tx_request *xennet_make_one_txreq(
        BUG_ON((signed short)ref < 0);
 
        gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
-                                       page_to_mfn(page), GNTMAP_readonly);
+                                       gfn, GNTMAP_readonly);
 
        queue->tx_skbs[id].skb = skb;
        queue->grant_tx_page[id] = page;
@@ -444,7 +452,34 @@ static struct xen_netif_tx_request *xennet_make_one_txreq(
        tx->size = len;
        tx->flags = 0;
 
-       return tx;
+       info->tx = tx;
+       info->size += tx->size;
+}
+
+static struct xen_netif_tx_request *xennet_make_first_txreq(
+       struct netfront_queue *queue, struct sk_buff *skb,
+       struct page *page, unsigned int offset, unsigned int len)
+{
+       struct xennet_gnttab_make_txreq info = {
+               .queue = queue,
+               .skb = skb,
+               .page = page,
+               .size = 0,
+       };
+
+       gnttab_for_one_grant(page, offset, len, xennet_tx_setup_grant, &info);
+
+       return info.tx;
+}
+
+static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
+                                 unsigned int len, void *data)
+{
+       struct xennet_gnttab_make_txreq *info = data;
+
+       info->tx->flags |= XEN_NETTXF_more_data;
+       skb_get(info->skb);
+       xennet_tx_setup_grant(gfn, offset, len, data);
 }
 
 static struct xen_netif_tx_request *xennet_make_txreqs(
@@ -452,20 +487,30 @@ static struct xen_netif_tx_request *xennet_make_txreqs(
        struct sk_buff *skb, struct page *page,
        unsigned int offset, unsigned int len)
 {
+       struct xennet_gnttab_make_txreq info = {
+               .queue = queue,
+               .skb = skb,
+               .tx = tx,
+       };
+
        /* Skip unused frames from start of page */
        page += offset >> PAGE_SHIFT;
        offset &= ~PAGE_MASK;
 
        while (len) {
-               tx->flags |= XEN_NETTXF_more_data;
-               tx = xennet_make_one_txreq(queue, skb_get(skb),
-                                          page, offset, len);
+               info.page = page;
+               info.size = 0;
+
+               gnttab_foreach_grant_in_range(page, offset, len,
+                                             xennet_make_one_txreq,
+                                             &info);
+
                page++;
                offset = 0;
-               len -= tx->size;
+               len -= info.size;
        }
 
-       return tx;
+       return info.tx;
 }
 
 /*
@@ -475,9 +520,10 @@ static struct xen_netif_tx_request *xennet_make_txreqs(
 static int xennet_count_skb_slots(struct sk_buff *skb)
 {
        int i, frags = skb_shinfo(skb)->nr_frags;
-       int pages;
+       int slots;
 
-       pages = PFN_UP(offset_in_page(skb->data) + skb_headlen(skb));
+       slots = gnttab_count_grant(offset_in_page(skb->data),
+                                  skb_headlen(skb));
 
        for (i = 0; i < frags; i++) {
                skb_frag_t *frag = skb_shinfo(skb)->frags + i;
@@ -487,10 +533,10 @@ static int xennet_count_skb_slots(struct sk_buff *skb)
                /* Skip unused frames from start of page */
                offset &= ~PAGE_MASK;
 
-               pages += PFN_UP(offset + size);
+               slots += gnttab_count_grant(offset, size);
        }
 
-       return pages;
+       return slots;
 }
 
 static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
@@ -511,6 +557,8 @@ static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
        return queue_idx;
 }
 
+#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
+
 static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct netfront_info *np = netdev_priv(dev);
@@ -545,7 +593,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        slots = xennet_count_skb_slots(skb);
-       if (unlikely(slots > MAX_SKB_FRAGS + 1)) {
+       if (unlikely(slots > MAX_XEN_SKB_FRAGS + 1)) {
                net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
                                    slots, skb->len);
                if (skb_linearize(skb))
@@ -566,10 +614,13 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        /* First request for the linear area. */
-       first_tx = tx = xennet_make_one_txreq(queue, skb,
-                                             page, offset, len);
-       page++;
-       offset = 0;
+       first_tx = tx = xennet_make_first_txreq(queue, skb,
+                                               page, offset, len);
+       offset += tx->size;
+       if (offset == PAGE_SIZE) {
+               page++;
+               offset = 0;
+       }
        len -= tx->size;
 
        if (skb->ip_summed == CHECKSUM_PARTIAL)
@@ -731,9 +782,9 @@ static int xennet_get_responses(struct netfront_queue *queue,
 
        for (;;) {
                if (unlikely(rx->status < 0 ||
-                            rx->offset + rx->status > PAGE_SIZE)) {
+                            rx->offset + rx->status > XEN_PAGE_SIZE)) {
                        if (net_ratelimit())
-                               dev_warn(dev, "rx->offset: %x, size: %u\n",
+                               dev_warn(dev, "rx->offset: %u, size: %d\n",
                                         rx->offset, rx->status);
                        xennet_move_rx_slot(queue, skb, ref);
                        err = -EINVAL;
@@ -1245,10 +1296,6 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
        np                   = netdev_priv(netdev);
        np->xbdev            = dev;
 
-       /* No need to use rtnl_lock() before the call below as it
-        * happens before register_netdev().
-        */
-       netif_set_real_num_tx_queues(netdev, 0);
        np->queues = NULL;
 
        err = -ENOMEM;
@@ -1341,7 +1388,7 @@ static void xennet_disconnect_backend(struct netfront_info *info)
 
        netif_carrier_off(info->netdev);
 
-       for (i = 0; i < num_queues; ++i) {
+       for (i = 0; i < num_queues && info->queues; ++i) {
                struct netfront_queue *queue = &info->queues[i];
 
                if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
@@ -1353,7 +1400,8 @@ static void xennet_disconnect_backend(struct netfront_info *info)
                queue->tx_evtchn = queue->rx_evtchn = 0;
                queue->tx_irq = queue->rx_irq = 0;
 
-               napi_synchronize(&queue->napi);
+               if (netif_running(info->netdev))
+                       napi_synchronize(&queue->napi);
 
                xennet_release_tx_bufs(queue);
                xennet_release_rx_bufs(queue);
@@ -1498,7 +1546,7 @@ static int setup_netfront(struct xenbus_device *dev,
                goto fail;
        }
        SHARED_RING_INIT(txs);
-       FRONT_RING_INIT(&queue->tx, txs, PAGE_SIZE);
+       FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
 
        err = xenbus_grant_ring(dev, txs, 1, &gref);
        if (err < 0)
@@ -1512,7 +1560,7 @@ static int setup_netfront(struct xenbus_device *dev,
                goto alloc_rx_ring_fail;
        }
        SHARED_RING_INIT(rxs);
-       FRONT_RING_INIT(&queue->rx, rxs, PAGE_SIZE);
+       FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
 
        err = xenbus_grant_ring(dev, rxs, 1, &gref);
        if (err < 0)
@@ -1560,9 +1608,8 @@ static int xennet_init_queue(struct netfront_queue *queue)
        spin_lock_init(&queue->tx_lock);
        spin_lock_init(&queue->rx_lock);
 
-       init_timer(&queue->rx_refill_timer);
-       queue->rx_refill_timer.data = (unsigned long)queue;
-       queue->rx_refill_timer.function = rx_refill_timeout;
+       setup_timer(&queue->rx_refill_timer, rx_refill_timeout,
+                   (unsigned long)queue);
 
        snprintf(queue->name, sizeof(queue->name), "%s-q%u",
                 queue->info->netdev->name, queue->id);
@@ -1709,19 +1756,19 @@ static void xennet_destroy_queues(struct netfront_info *info)
 }
 
 static int xennet_create_queues(struct netfront_info *info,
-                               unsigned int num_queues)
+                               unsigned int *num_queues)
 {
        unsigned int i;
        int ret;
 
-       info->queues = kcalloc(num_queues, sizeof(struct netfront_queue),
+       info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue),
                               GFP_KERNEL);
        if (!info->queues)
                return -ENOMEM;
 
        rtnl_lock();
 
-       for (i = 0; i < num_queues; i++) {
+       for (i = 0; i < *num_queues; i++) {
                struct netfront_queue *queue = &info->queues[i];
 
                queue->id = i;
@@ -1731,7 +1778,7 @@ static int xennet_create_queues(struct netfront_info *info,
                if (ret < 0) {
                        dev_warn(&info->netdev->dev,
                                 "only created %d queues\n", i);
-                       num_queues = i;
+                       *num_queues = i;
                        break;
                }
 
@@ -1741,11 +1788,11 @@ static int xennet_create_queues(struct netfront_info *info,
                        napi_enable(&queue->napi);
        }
 
-       netif_set_real_num_tx_queues(info->netdev, num_queues);
+       netif_set_real_num_tx_queues(info->netdev, *num_queues);
 
        rtnl_unlock();
 
-       if (num_queues == 0) {
+       if (*num_queues == 0) {
                dev_err(&info->netdev->dev, "no queues\n");
                return -EINVAL;
        }
@@ -1791,7 +1838,7 @@ static int talk_to_netback(struct xenbus_device *dev,
        if (info->queues)
                xennet_destroy_queues(info);
 
-       err = xennet_create_queues(info, num_queues);
+       err = xennet_create_queues(info, &num_queues);
        if (err < 0)
                goto destroy_ring;
 
@@ -1822,19 +1869,22 @@ again:
                goto destroy_ring;
        }
 
-       if (num_queues == 1) {
-               err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
-               if (err)
-                       goto abort_transaction_no_dev_fatal;
-       } else {
+       if (xenbus_exists(XBT_NIL,
+                         info->xbdev->otherend, "multi-queue-max-queues")) {
                /* Write the number of queues */
-               err = xenbus_printf(xbt, dev->nodename, "multi-queue-num-queues",
-                                   "%u", num_queues);
+               err = xenbus_printf(xbt, dev->nodename,
+                                   "multi-queue-num-queues", "%u", num_queues);
                if (err) {
                        message = "writing multi-queue-num-queues";
                        goto abort_transaction_no_dev_fatal;
                }
+       }
 
+       if (num_queues == 1) {
+               err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
+               if (err)
+                       goto abort_transaction_no_dev_fatal;
+       } else {
                /* Write the keys for each queue */
                for (i = 0; i < num_queues; ++i) {
                        queue = &info->queues[i];
@@ -1901,9 +1951,6 @@ abort_transaction_no_dev_fatal:
        xennet_disconnect_backend(info);
        kfree(info->queues);
        info->queues = NULL;
-       rtnl_lock();
-       netif_set_real_num_tx_queues(info->netdev, 0);
-       rtnl_unlock();
  out:
        return err;
 }
@@ -2110,7 +2157,8 @@ static int xennet_remove(struct xenbus_device *dev)
 
        unregister_netdev(info->netdev);
 
-       xennet_destroy_queues(info);
+       if (info->queues)
+               xennet_destroy_queues(info);
        xennet_free_netdev(info->netdev);
 
        return 0;
@@ -2139,8 +2187,11 @@ static int __init netif_init(void)
 
        pr_info("Initialising Xen virtual ethernet driver\n");
 
-       /* Allow as many queues as there are CPUs, by default */
-       xennet_max_queues = num_online_cpus();
+       /* Allow as many queues as there are CPUs if user has not
+        * specified a value.
+        */
+       if (xennet_max_queues == 0)
+               xennet_max_queues = num_online_cpus();
 
        return xenbus_register_frontend(&netfront_driver);
 }