Merge "cleanup: remove fuel plugin since fuel@opnfv uses mcp"
[kvmfornfv.git] / kernel / mm / swap.c
index 1785ac6..ad16649 100644 (file)
@@ -31,8 +31,9 @@
 #include <linux/memcontrol.h>
 #include <linux/gfp.h>
 #include <linux/uio.h>
-#include <linux/hugetlb.h>
 #include <linux/locallock.h>
+#include <linux/hugetlb.h>
+#include <linux/page_idle.h>
 
 #include "internal.h"
 
@@ -135,7 +136,6 @@ void put_unrefcounted_compound_page(struct page *page_head, struct page *page)
                 * here, see the comment above this function.
                 */
                VM_BUG_ON_PAGE(!PageHead(page_head), page_head);
-               VM_BUG_ON_PAGE(page_mapcount(page) != 0, page);
                if (put_page_testzero(page_head)) {
                        /*
                         * If this is the tail of a slab THP page,
@@ -205,7 +205,7 @@ out_put_single:
                                __put_single_page(page);
                        return;
                }
-               VM_BUG_ON_PAGE(page_head != page->first_page, page);
+               VM_BUG_ON_PAGE(page_head != compound_head(page), page);
                /*
                 * We can release the refcount taken by
                 * get_page_unless_zero() now that
@@ -266,7 +266,7 @@ static void put_compound_page(struct page *page)
         *  Case 3 is possible, as we may race with
         *  __split_huge_page_refcount tearing down a THP page.
         */
-       page_head = compound_head_by_tail(page);
+       page_head = compound_head(page);
        if (!__compound_tail_refcounted(page_head))
                put_unrefcounted_compound_page(page_head, page);
        else
@@ -628,6 +628,8 @@ void mark_page_accessed(struct page *page)
        } else if (!PageReferenced(page)) {
                SetPageReferenced(page);
        }
+       if (page_is_idle(page))
+               clear_page_idle(page);
 }
 EXPORT_SYMBOL(mark_page_accessed);
 
@@ -819,9 +821,15 @@ void lru_add_drain_cpu(int cpu)
                unsigned long flags;
 
                /* No harm done if a racing interrupt already did this */
+#ifdef CONFIG_PREEMPT_RT_BASE
+               local_lock_irqsave_on(rotate_lock, flags, cpu);
+               pagevec_move_tail(pvec);
+               local_unlock_irqrestore_on(rotate_lock, flags, cpu);
+#else
                local_lock_irqsave(rotate_lock, flags);
                pagevec_move_tail(pvec);
                local_unlock_irqrestore(rotate_lock, flags);
+#endif
        }
 
        pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
@@ -864,12 +872,32 @@ void lru_add_drain(void)
        local_unlock_cpu(swapvec_lock);
 }
 
+
+#ifdef CONFIG_PREEMPT_RT_BASE
+static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
+{
+       local_lock_on(swapvec_lock, cpu);
+       lru_add_drain_cpu(cpu);
+       local_unlock_on(swapvec_lock, cpu);
+}
+
+#else
+
 static void lru_add_drain_per_cpu(struct work_struct *dummy)
 {
        lru_add_drain();
 }
 
 static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
+static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
+{
+       struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
+
+       INIT_WORK(work, lru_add_drain_per_cpu);
+       schedule_work_on(cpu, work);
+       cpumask_set_cpu(cpu, has_work);
+}
+#endif
 
 void lru_add_drain_all(void)
 {
@@ -882,20 +910,17 @@ void lru_add_drain_all(void)
        cpumask_clear(&has_work);
 
        for_each_online_cpu(cpu) {
-               struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
-
                if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
                    pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
                    pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) ||
-                   need_activate_page_drain(cpu)) {
-                       INIT_WORK(work, lru_add_drain_per_cpu);
-                       schedule_work_on(cpu, work);
-                       cpumask_set_cpu(cpu, &has_work);
-               }
+                   need_activate_page_drain(cpu))
+                       remote_lru_add_drain(cpu, &has_work);
        }
 
+#ifndef CONFIG_PREEMPT_RT_BASE
        for_each_cpu(cpu, &has_work)
                flush_work(&per_cpu(lru_add_drain_work, cpu));
+#endif
 
        put_online_cpus();
        mutex_unlock(&lock);