These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / mm / ksm.c
index 7ee101e..b5cd647 100644 (file)
@@ -475,7 +475,8 @@ static struct page *get_mergeable_page(struct rmap_item *rmap_item)
                flush_dcache_page(page);
        } else {
                put_page(page);
-out:           page = NULL;
+out:
+               page = NULL;
        }
        up_read(&mm->mmap_sem);
        return page;
@@ -625,7 +626,7 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
                unlock_page(page);
                put_page(page);
 
-               if (stable_node->hlist.first)
+               if (!hlist_empty(&stable_node->hlist))
                        ksm_pages_sharing--;
                else
                        ksm_pages_shared--;
@@ -1021,8 +1022,6 @@ static int try_to_merge_one_page(struct vm_area_struct *vma,
        if (page == kpage)                      /* ksm page forked */
                return 0;
 
-       if (!(vma->vm_flags & VM_MERGEABLE))
-               goto out;
        if (PageTransCompound(page) && page_trans_compound_anon_split(page))
                goto out;
        BUG_ON(PageTransCompound(page));
@@ -1087,10 +1086,8 @@ static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item,
        int err = -EFAULT;
 
        down_read(&mm->mmap_sem);
-       if (ksm_test_exit(mm))
-               goto out;
-       vma = find_vma(mm, rmap_item->address);
-       if (!vma || vma->vm_start > rmap_item->address)
+       vma = find_mergeable_vma(mm, rmap_item->address);
+       if (!vma)
                goto out;
 
        err = try_to_merge_one_page(vma, page, kpage);
@@ -1177,8 +1174,18 @@ again:
                cond_resched();
                stable_node = rb_entry(*new, struct stable_node, node);
                tree_page = get_ksm_page(stable_node, false);
-               if (!tree_page)
-                       return NULL;
+               if (!tree_page) {
+                       /*
+                        * If we walked over a stale stable_node,
+                        * get_ksm_page() will call rb_erase() and it
+                        * may rebalance the tree from under us. So
+                        * restart the search from scratch. Returning
+                        * NULL would be safe too, but we'd generate
+                        * false negative insertions just because some
+                        * stable_node was stale.
+                        */
+                       goto again;
+               }
 
                ret = memcmp_pages(page, tree_page);
                put_page(tree_page);
@@ -1254,12 +1261,14 @@ static struct stable_node *stable_tree_insert(struct page *kpage)
        unsigned long kpfn;
        struct rb_root *root;
        struct rb_node **new;
-       struct rb_node *parent = NULL;
+       struct rb_node *parent;
        struct stable_node *stable_node;
 
        kpfn = page_to_pfn(kpage);
        nid = get_kpfn_nid(kpfn);
        root = root_stable_tree + nid;
+again:
+       parent = NULL;
        new = &root->rb_node;
 
        while (*new) {
@@ -1269,8 +1278,18 @@ static struct stable_node *stable_tree_insert(struct page *kpage)
                cond_resched();
                stable_node = rb_entry(*new, struct stable_node, node);
                tree_page = get_ksm_page(stable_node, false);
-               if (!tree_page)
-                       return NULL;
+               if (!tree_page) {
+                       /*
+                        * If we walked over a stale stable_node,
+                        * get_ksm_page() will call rb_erase() and it
+                        * may rebalance the tree from under us. So
+                        * restart the search from scratch. Returning
+                        * NULL would be safe too, but we'd generate
+                        * false negative insertions just because some
+                        * stable_node was stale.
+                        */
+                       goto again;
+               }
 
                ret = memcmp_pages(kpage, tree_page);
                put_page(tree_page);
@@ -1340,7 +1359,7 @@ struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item,
                cond_resched();
                tree_rmap_item = rb_entry(*new, struct rmap_item, node);
                tree_page = get_mergeable_page(tree_rmap_item);
-               if (IS_ERR_OR_NULL(tree_page))
+               if (!tree_page)
                        return NULL;
 
                /*
@@ -1914,9 +1933,11 @@ again:
                struct anon_vma_chain *vmac;
                struct vm_area_struct *vma;
 
+               cond_resched();
                anon_vma_lock_read(anon_vma);
                anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
                                               0, ULONG_MAX) {
+                       cond_resched();
                        vma = vmac->vma;
                        if (rmap_item->address < vma->vm_start ||
                            rmap_item->address >= vma->vm_end)