These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / mm / nommu.c
index e544508..92be862 100644 (file)
 #include <asm/mmu_context.h>
 #include "internal.h"
 
-#if 0
-#define kenter(FMT, ...) \
-       printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
-#define kleave(FMT, ...) \
-       printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
-#define kdebug(FMT, ...) \
-       printk(KERN_DEBUG "xxx" FMT"yyy\n", ##__VA_ARGS__)
-#else
-#define kenter(FMT, ...) \
-       no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
-#define kleave(FMT, ...) \
-       no_printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
-#define kdebug(FMT, ...) \
-       no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__)
-#endif
-
 void *high_memory;
 EXPORT_SYMBOL(high_memory);
 struct page *mem_map;
@@ -340,12 +324,12 @@ long vwrite(char *buf, char *addr, unsigned long count)
 }
 
 /*
- *     vmalloc  -  allocate virtually continguos memory
+ *     vmalloc  -  allocate virtually contiguous memory
  *
  *     @size:          allocation size
  *
  *     Allocate enough pages to cover @size from the page level
- *     allocator and map them into continguos kernel virtual space.
+ *     allocator and map them into contiguous kernel virtual space.
  *
  *     For tight control over page level allocator and protection flags
  *     use __vmalloc() instead.
@@ -357,12 +341,12 @@ void *vmalloc(unsigned long size)
 EXPORT_SYMBOL(vmalloc);
 
 /*
- *     vzalloc - allocate virtually continguos memory with zero fill
+ *     vzalloc - allocate virtually contiguous memory with zero fill
  *
  *     @size:          allocation size
  *
  *     Allocate enough pages to cover @size from the page level
- *     allocator and map them into continguos kernel virtual space.
+ *     allocator and map them into contiguous kernel virtual space.
  *     The memory allocated is set to zero.
  *
  *     For tight control over page level allocator and protection flags
@@ -436,7 +420,7 @@ void *vmalloc_exec(unsigned long size)
  *     @size:          allocation size
  *
  *     Allocate enough 32bit PA addressable pages to cover @size from the
- *     page level allocator and map them into continguos kernel virtual space.
+ *     page level allocator and map them into contiguous kernel virtual space.
  */
 void *vmalloc_32(unsigned long size)
 {
@@ -594,16 +578,16 @@ static noinline void validate_nommu_regions(void)
                return;
 
        last = rb_entry(lastp, struct vm_region, vm_rb);
-       BUG_ON(unlikely(last->vm_end <= last->vm_start));
-       BUG_ON(unlikely(last->vm_top < last->vm_end));
+       BUG_ON(last->vm_end <= last->vm_start);
+       BUG_ON(last->vm_top < last->vm_end);
 
        while ((p = rb_next(lastp))) {
                region = rb_entry(p, struct vm_region, vm_rb);
                last = rb_entry(lastp, struct vm_region, vm_rb);
 
-               BUG_ON(unlikely(region->vm_end <= region->vm_start));
-               BUG_ON(unlikely(region->vm_top < region->vm_end));
-               BUG_ON(unlikely(region->vm_start < last->vm_top));
+               BUG_ON(region->vm_end <= region->vm_start);
+               BUG_ON(region->vm_top < region->vm_end);
+               BUG_ON(region->vm_start < last->vm_top);
 
                lastp = p;
        }
@@ -665,11 +649,7 @@ static void free_page_series(unsigned long from, unsigned long to)
        for (; from < to; from += PAGE_SIZE) {
                struct page *page = virt_to_page(from);
 
-               kdebug("- free %lx", from);
                atomic_long_dec(&mmap_pages_allocated);
-               if (page_count(page) != 1)
-                       kdebug("free page %p: refcount not one: %d",
-                              page, page_count(page));
                put_page(page);
        }
 }
@@ -683,8 +663,6 @@ static void free_page_series(unsigned long from, unsigned long to)
 static void __put_nommu_region(struct vm_region *region)
        __releases(nommu_region_sem)
 {
-       kenter("%p{%d}", region, region->vm_usage);
-
        BUG_ON(!nommu_region_tree.rb_node);
 
        if (--region->vm_usage == 0) {
@@ -697,10 +675,8 @@ static void __put_nommu_region(struct vm_region *region)
 
                /* IO memory and memory shared directly out of the pagecache
                 * from ramfs/tmpfs mustn't be released here */
-               if (region->vm_flags & VM_MAPPED_COPY) {
-                       kdebug("free series");
+               if (region->vm_flags & VM_MAPPED_COPY)
                        free_page_series(region->vm_start, region->vm_top);
-               }
                kmem_cache_free(vm_region_jar, region);
        } else {
                up_write(&nommu_region_sem);
@@ -744,8 +720,6 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
        struct address_space *mapping;
        struct rb_node **p, *parent, *rb_prev;
 
-       kenter(",%p", vma);
-
        BUG_ON(!vma->vm_region);
 
        mm->map_count++;
@@ -813,8 +787,6 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
        struct mm_struct *mm = vma->vm_mm;
        struct task_struct *curr = current;
 
-       kenter("%p", vma);
-
        protect_vma(vma, 0);
 
        mm->map_count--;
@@ -854,7 +826,6 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
  */
 static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
 {
-       kenter("%p", vma);
        if (vma->vm_ops && vma->vm_ops->close)
                vma->vm_ops->close(vma);
        if (vma->vm_file)
@@ -957,12 +928,8 @@ static int validate_mmap_request(struct file *file,
        int ret;
 
        /* do the simple checks first */
-       if (flags & MAP_FIXED) {
-               printk(KERN_DEBUG
-                      "%d: Can't do fixed-address/overlay mmap of RAM\n",
-                      current->pid);
+       if (flags & MAP_FIXED)
                return -EINVAL;
-       }
 
        if ((flags & MAP_TYPE) != MAP_PRIVATE &&
            (flags & MAP_TYPE) != MAP_SHARED)
@@ -1060,8 +1027,7 @@ static int validate_mmap_request(struct file *file,
                            ) {
                                capabilities &= ~NOMMU_MAP_DIRECT;
                                if (flags & MAP_SHARED) {
-                                       printk(KERN_WARNING
-                                              "MAP_SHARED not completely supported on !MMU\n");
+                                       pr_warn("MAP_SHARED not completely supported on !MMU\n");
                                        return -EINVAL;
                                }
                        }
@@ -1069,7 +1035,7 @@ static int validate_mmap_request(struct file *file,
 
                /* handle executable mappings and implied executable
                 * mappings */
-               if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
+               if (path_noexec(&file->f_path)) {
                        if (prot & PROT_EXEC)
                                return -EPERM;
                } else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
@@ -1205,16 +1171,12 @@ static int do_mmap_private(struct vm_area_struct *vma,
         *   we're allocating is smaller than a page
         */
        order = get_order(len);
-       kdebug("alloc order %d for %lx", order, len);
-
        total = 1 << order;
        point = len >> PAGE_SHIFT;
 
        /* we don't want to allocate a power-of-2 sized page set */
-       if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) {
+       if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages)
                total = point;
-               kdebug("try to alloc exact %lu pages", total);
-       }
 
        base = alloc_pages_exact(total << PAGE_SHIFT, GFP_KERNEL);
        if (!base)
@@ -1271,32 +1233,29 @@ enomem:
 /*
  * handle mapping creation for uClinux
  */
-unsigned long do_mmap_pgoff(struct file *file,
-                           unsigned long addr,
-                           unsigned long len,
-                           unsigned long prot,
-                           unsigned long flags,
-                           unsigned long pgoff,
-                           unsigned long *populate)
+unsigned long do_mmap(struct file *file,
+                       unsigned long addr,
+                       unsigned long len,
+                       unsigned long prot,
+                       unsigned long flags,
+                       vm_flags_t vm_flags,
+                       unsigned long pgoff,
+                       unsigned long *populate)
 {
        struct vm_area_struct *vma;
        struct vm_region *region;
        struct rb_node *rb;
-       unsigned long capabilities, vm_flags, result;
+       unsigned long capabilities, result;
        int ret;
 
-       kenter(",%lx,%lx,%lx,%lx,%lx", addr, len, prot, flags, pgoff);
-
        *populate = 0;
 
        /* decide whether we should attempt the mapping, and if so what sort of
         * mapping */
        ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
                                    &capabilities);
-       if (ret < 0) {
-               kleave(" = %d [val]", ret);
+       if (ret < 0)
                return ret;
-       }
 
        /* we ignore the address hint */
        addr = 0;
@@ -1304,7 +1263,7 @@ unsigned long do_mmap_pgoff(struct file *file,
 
        /* we've determined that we can make the mapping, now translate what we
         * now know into VMA flags */
-       vm_flags = determine_vm_flags(file, prot, flags, capabilities);
+       vm_flags |= determine_vm_flags(file, prot, flags, capabilities);
 
        /* we're going to need to record the mapping */
        region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
@@ -1383,11 +1342,9 @@ unsigned long do_mmap_pgoff(struct file *file,
                        vma->vm_start = start;
                        vma->vm_end = start + len;
 
-                       if (pregion->vm_flags & VM_MAPPED_COPY) {
-                               kdebug("share copy");
+                       if (pregion->vm_flags & VM_MAPPED_COPY)
                                vma->vm_flags |= VM_MAPPED_COPY;
-                       } else {
-                               kdebug("share mmap");
+                       else {
                                ret = do_mmap_shared_file(vma);
                                if (ret < 0) {
                                        vma->vm_region = NULL;
@@ -1467,7 +1424,6 @@ share:
 
        up_write(&nommu_region_sem);
 
-       kleave(" = %lx", result);
        return result;
 
 error_just_free:
@@ -1479,27 +1435,24 @@ error:
        if (vma->vm_file)
                fput(vma->vm_file);
        kmem_cache_free(vm_area_cachep, vma);
-       kleave(" = %d", ret);
        return ret;
 
 sharing_violation:
        up_write(&nommu_region_sem);
-       printk(KERN_WARNING "Attempt to share mismatched mappings\n");
+       pr_warn("Attempt to share mismatched mappings\n");
        ret = -EINVAL;
        goto error;
 
 error_getting_vma:
        kmem_cache_free(vm_region_jar, region);
-       printk(KERN_WARNING "Allocation of vma for %lu byte allocation"
-              " from process %d failed\n",
-              len, current->pid);
+       pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n",
+                       len, current->pid);
        show_free_areas(0);
        return -ENOMEM;
 
 error_getting_region:
-       printk(KERN_WARNING "Allocation of vm region for %lu byte allocation"
-              " from process %d failed\n",
-              len, current->pid);
+       pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n",
+                       len, current->pid);
        show_free_areas(0);
        return -ENOMEM;
 }
@@ -1544,7 +1497,7 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
 
        if (copy_from_user(&a, arg, sizeof(a)))
                return -EFAULT;
-       if (a.offset & ~PAGE_MASK)
+       if (offset_in_page(a.offset))
                return -EINVAL;
 
        return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
@@ -1563,8 +1516,6 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
        struct vm_region *region;
        unsigned long npages;
 
-       kenter("");
-
        /* we're only permitted to split anonymous regions (these should have
         * only a single usage on the region) */
        if (vma->vm_file)
@@ -1628,8 +1579,6 @@ static int shrink_vma(struct mm_struct *mm,
 {
        struct vm_region *region;
 
-       kenter("");
-
        /* adjust the VMA's pointers, which may reposition it in the MM's tree
         * and list */
        delete_vma_from_mm(vma);
@@ -1669,8 +1618,6 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
        unsigned long end;
        int ret;
 
-       kenter(",%lx,%zx", start, len);
-
        len = PAGE_ALIGN(len);
        if (len == 0)
                return -EINVAL;
@@ -1682,11 +1629,9 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
        if (!vma) {
                static int limit;
                if (limit < 5) {
-                       printk(KERN_WARNING
-                              "munmap of memory not mmapped by process %d"
-                              " (%s): 0x%lx-0x%lx\n",
-                              current->pid, current->comm,
-                              start, start + len - 1);
+                       pr_warn("munmap of memory not mmapped by process %d (%s): 0x%lx-0x%lx\n",
+                                       current->pid, current->comm,
+                                       start, start + len - 1);
                        limit++;
                }
                return -EINVAL;
@@ -1695,38 +1640,27 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
        /* we're allowed to split an anonymous VMA but not a file-backed one */
        if (vma->vm_file) {
                do {
-                       if (start > vma->vm_start) {
-                               kleave(" = -EINVAL [miss]");
+                       if (start > vma->vm_start)
                                return -EINVAL;
-                       }
                        if (end == vma->vm_end)
                                goto erase_whole_vma;
                        vma = vma->vm_next;
                } while (vma);
-               kleave(" = -EINVAL [split file]");
                return -EINVAL;
        } else {
                /* the chunk must be a subset of the VMA found */
                if (start == vma->vm_start && end == vma->vm_end)
                        goto erase_whole_vma;
-               if (start < vma->vm_start || end > vma->vm_end) {
-                       kleave(" = -EINVAL [superset]");
+               if (start < vma->vm_start || end > vma->vm_end)
                        return -EINVAL;
-               }
-               if (start & ~PAGE_MASK) {
-                       kleave(" = -EINVAL [unaligned start]");
+               if (offset_in_page(start))
                        return -EINVAL;
-               }
-               if (end != vma->vm_end && end & ~PAGE_MASK) {
-                       kleave(" = -EINVAL [unaligned split]");
+               if (end != vma->vm_end && offset_in_page(end))
                        return -EINVAL;
-               }
                if (start != vma->vm_start && end != vma->vm_end) {
                        ret = split_vma(mm, vma, start, 1);
-                       if (ret < 0) {
-                               kleave(" = %d [split]", ret);
+                       if (ret < 0)
                                return ret;
-                       }
                }
                return shrink_vma(mm, vma, start, end);
        }
@@ -1734,7 +1668,6 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
 erase_whole_vma:
        delete_vma_from_mm(vma);
        delete_vma(mm, vma);
-       kleave(" = 0");
        return 0;
 }
 EXPORT_SYMBOL(do_munmap);
@@ -1766,8 +1699,6 @@ void exit_mmap(struct mm_struct *mm)
        if (!mm)
                return;
 
-       kenter("");
-
        mm->total_vm = 0;
 
        while ((vma = mm->mmap)) {
@@ -1776,8 +1707,6 @@ void exit_mmap(struct mm_struct *mm)
                delete_vma(mm, vma);
                cond_resched();
        }
-
-       kleave("");
 }
 
 unsigned long vm_brk(unsigned long addr, unsigned long len)
@@ -1807,7 +1736,7 @@ static unsigned long do_mremap(unsigned long addr,
        if (old_len == 0 || new_len == 0)
                return (unsigned long) -EINVAL;
 
-       if (addr & ~PAGE_MASK)
+       if (offset_in_page(addr))
                return -EINVAL;
 
        if (flags & MREMAP_FIXED && new_addr != addr)
@@ -2157,7 +2086,7 @@ static int __meminit init_user_reserve(void)
        sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
        return 0;
 }
-module_init(init_user_reserve)
+subsys_initcall(init_user_reserve);
 
 /*
  * Initialise sysctl_admin_reserve_kbytes.
@@ -2178,4 +2107,4 @@ static int __meminit init_admin_reserve(void)
        sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
        return 0;
 }
-module_init(init_admin_reserve)
+subsys_initcall(init_admin_reserve);