These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / include / linux / mm_types.h
index 89c0471..b238ebf 100644 (file)
@@ -29,8 +29,6 @@ struct mem_cgroup;
                IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
 #define ALLOC_SPLIT_PTLOCKS    (SPINLOCK_SIZE > BITS_PER_LONG/8)
 
-typedef void compound_page_dtor(struct page *);
-
 /*
  * Each physical page in the system has a struct page associated with
  * it to keep track of whatever it is we are using the page for at the
@@ -114,7 +112,13 @@ struct page {
                };
        };
 
-       /* Third double word block */
+       /*
+        * Third double word block
+        *
+        * WARNING: bit 0 of the first word encode PageTail(). That means
+        * the rest users of the storage space MUST NOT use the bit to
+        * avoid collision and false-positive PageTail().
+        */
        union {
                struct list_head lru;   /* Pageout list, eg. active_list
                                         * protected by zone->lru_lock !
@@ -132,18 +136,37 @@ struct page {
 #endif
                };
 
-               struct slab *slab_page; /* slab fields */
                struct rcu_head rcu_head;       /* Used by SLAB
                                                 * when destroying via RCU
                                                 */
-               /* First tail page of compound page */
+               /* Tail pages of compound page */
                struct {
-                       compound_page_dtor *compound_dtor;
-                       unsigned long compound_order;
+                       unsigned long compound_head; /* If bit zero is set */
+
+                       /* First tail page only */
+#ifdef CONFIG_64BIT
+                       /*
+                        * On 64 bit system we have enough space in struct page
+                        * to encode compound_dtor and compound_order with
+                        * unsigned int. It can help compiler generate better or
+                        * smaller code on some archtectures.
+                        */
+                       unsigned int compound_dtor;
+                       unsigned int compound_order;
+#else
+                       unsigned short int compound_dtor;
+                       unsigned short int compound_order;
+#endif
                };
 
 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
-               pgtable_t pmd_huge_pte; /* protected by page->ptl */
+               struct {
+                       unsigned long __pad;    /* do not overlay pmd_huge_pte
+                                                * with compound_head to avoid
+                                                * possible bit 0 collision.
+                                                */
+                       pgtable_t pmd_huge_pte; /* protected by page->ptl */
+               };
 #endif
        };
 
@@ -164,7 +187,6 @@ struct page {
 #endif
 #endif
                struct kmem_cache *slab_cache;  /* SL[AU]B: Pointer to slab */
-               struct page *first_page;        /* Compound tail pages */
        };
 
 #ifdef CONFIG_MEMCG
@@ -218,7 +240,25 @@ struct page_frag {
 #endif
 };
 
-typedef unsigned long __nocast vm_flags_t;
+#define PAGE_FRAG_CACHE_MAX_SIZE       __ALIGN_MASK(32768, ~PAGE_MASK)
+#define PAGE_FRAG_CACHE_MAX_ORDER      get_order(PAGE_FRAG_CACHE_MAX_SIZE)
+
+struct page_frag_cache {
+       void * va;
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+       __u16 offset;
+       __u16 size;
+#else
+       __u32 offset;
+#endif
+       /* we maintain a pagecount bias, so that we dont dirty cache line
+        * containing page->_count every time we allocate a fragment.
+        */
+       unsigned int            pagecnt_bias;
+       bool pfmemalloc;
+};
+
+typedef unsigned long vm_flags_t;
 
 /*
  * A region containing a mapping of a non-memory backed file under NOMMU
@@ -239,6 +279,16 @@ struct vm_region {
                                                * this region */
 };
 
+#ifdef CONFIG_USERFAULTFD
+#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
+struct vm_userfaultfd_ctx {
+       struct userfaultfd_ctx *ctx;
+};
+#else /* CONFIG_USERFAULTFD */
+#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
+struct vm_userfaultfd_ctx {};
+#endif /* CONFIG_USERFAULTFD */
+
 /*
  * This struct defines a memory VMM memory area. There is one of these
  * per VM-area/task.  A VM area is any part of the process virtual memory
@@ -305,6 +355,7 @@ struct vm_area_struct {
 #ifdef CONFIG_NUMA
        struct mempolicy *vm_policy;    /* NUMA policy for the VMA */
 #endif
+       struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
 };
 
 struct core_thread {
@@ -461,6 +512,9 @@ struct mm_struct {
        /* address of the bounds directory */
        void __user *bd_addr;
 #endif
+#ifdef CONFIG_HUGETLB_PAGE
+       atomic_long_t hugetlb_usage;
+#endif
 };
 
 static inline void mm_init_cpumask(struct mm_struct *mm)
@@ -529,6 +583,7 @@ enum tlb_flush_reason {
        TLB_REMOTE_SHOOTDOWN,
        TLB_LOCAL_SHOOTDOWN,
        TLB_LOCAL_MM_SHOOTDOWN,
+       TLB_REMOTE_SEND_IPI,
        NR_TLB_FLUSH_REASONS,
 };