IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
-typedef void compound_page_dtor(struct page *);
-
/*
* Each physical page in the system has a struct page associated with
* it to keep track of whatever it is we are using the page for at the
};
};
- /* Third double word block */
+ /*
+ * Third double word block
+ *
+ * WARNING: bit 0 of the first word encode PageTail(). That means
+ * the rest users of the storage space MUST NOT use the bit to
+ * avoid collision and false-positive PageTail().
+ */
union {
struct list_head lru; /* Pageout list, eg. active_list
* protected by zone->lru_lock !
#endif
};
- struct slab *slab_page; /* slab fields */
struct rcu_head rcu_head; /* Used by SLAB
* when destroying via RCU
*/
- /* First tail page of compound page */
+ /* Tail pages of compound page */
struct {
- compound_page_dtor *compound_dtor;
- unsigned long compound_order;
+ unsigned long compound_head; /* If bit zero is set */
+
+ /* First tail page only */
+#ifdef CONFIG_64BIT
+ /*
+ * On 64 bit system we have enough space in struct page
+ * to encode compound_dtor and compound_order with
+ * unsigned int. It can help compiler generate better or
+ * smaller code on some archtectures.
+ */
+ unsigned int compound_dtor;
+ unsigned int compound_order;
+#else
+ unsigned short int compound_dtor;
+ unsigned short int compound_order;
+#endif
};
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
- pgtable_t pmd_huge_pte; /* protected by page->ptl */
+ struct {
+ unsigned long __pad; /* do not overlay pmd_huge_pte
+ * with compound_head to avoid
+ * possible bit 0 collision.
+ */
+ pgtable_t pmd_huge_pte; /* protected by page->ptl */
+ };
#endif
};
#endif
#endif
struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */
- struct page *first_page; /* Compound tail pages */
};
#ifdef CONFIG_MEMCG
#endif
};
-typedef unsigned long __nocast vm_flags_t;
+#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
+#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
+
+struct page_frag_cache {
+ void * va;
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+ __u16 offset;
+ __u16 size;
+#else
+ __u32 offset;
+#endif
+ /* we maintain a pagecount bias, so that we dont dirty cache line
+ * containing page->_count every time we allocate a fragment.
+ */
+ unsigned int pagecnt_bias;
+ bool pfmemalloc;
+};
+
+typedef unsigned long vm_flags_t;
/*
* A region containing a mapping of a non-memory backed file under NOMMU
* this region */
};
+#ifdef CONFIG_USERFAULTFD
+#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
+struct vm_userfaultfd_ctx {
+ struct userfaultfd_ctx *ctx;
+};
+#else /* CONFIG_USERFAULTFD */
+#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
+struct vm_userfaultfd_ctx {};
+#endif /* CONFIG_USERFAULTFD */
+
/*
* This struct defines a memory VMM memory area. There is one of these
* per VM-area/task. A VM area is any part of the process virtual memory
#ifdef CONFIG_NUMA
struct mempolicy *vm_policy; /* NUMA policy for the VMA */
#endif
+ struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
};
struct core_thread {
/* address of the bounds directory */
void __user *bd_addr;
#endif
+#ifdef CONFIG_HUGETLB_PAGE
+ atomic_long_t hugetlb_usage;
+#endif
};
static inline void mm_init_cpumask(struct mm_struct *mm)
TLB_REMOTE_SHOOTDOWN,
TLB_LOCAL_SHOOTDOWN,
TLB_LOCAL_MM_SHOOTDOWN,
+ TLB_REMOTE_SEND_IPI,
NR_TLB_FLUSH_REASONS,
};