These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / mm / kasan / kasan.c
index 6c513a6..bc0a8d8 100644 (file)
@@ -2,9 +2,9 @@
  * This file contains shadow memory manipulation code.
  *
  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
- * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
  *
- * Some of code borrowed from https://github.com/xairy/linux by
+ * Some code borrowed from https://github.com/xairy/kasan-prototype by
  *        Andrey Konovalov <adech.fo@gmail.com>
  *
  * This program is free software; you can redistribute it and/or modify
@@ -19,6 +19,7 @@
 #include <linux/export.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <linux/kmemleak.h>
 #include <linux/memblock.h>
 #include <linux/memory.h>
 #include <linux/mm.h>
@@ -86,6 +87,11 @@ static __always_inline bool memory_is_poisoned_2(unsigned long addr)
                if (memory_is_poisoned_1(addr + 1))
                        return true;
 
+               /*
+                * If single shadow byte covers 2-byte access, we don't
+                * need to do anything more. Otherwise, test the first
+                * shadow byte.
+                */
                if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0))
                        return false;
 
@@ -103,6 +109,11 @@ static __always_inline bool memory_is_poisoned_4(unsigned long addr)
                if (memory_is_poisoned_1(addr + 3))
                        return true;
 
+               /*
+                * If single shadow byte covers 4-byte access, we don't
+                * need to do anything more. Otherwise, test the first
+                * shadow byte.
+                */
                if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3))
                        return false;
 
@@ -120,7 +131,12 @@ static __always_inline bool memory_is_poisoned_8(unsigned long addr)
                if (memory_is_poisoned_1(addr + 7))
                        return true;
 
-               if (likely(((addr + 7) & KASAN_SHADOW_MASK) >= 7))
+               /*
+                * If single shadow byte covers 8-byte access, we don't
+                * need to do anything more. Otherwise, test the first
+                * shadow byte.
+                */
+               if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
                        return false;
 
                return unlikely(*(u8 *)shadow_addr);
@@ -135,12 +151,16 @@ static __always_inline bool memory_is_poisoned_16(unsigned long addr)
 
        if (unlikely(*shadow_addr)) {
                u16 shadow_first_bytes = *(u16 *)shadow_addr;
-               s8 last_byte = (addr + 15) & KASAN_SHADOW_MASK;
 
                if (unlikely(shadow_first_bytes))
                        return true;
 
-               if (likely(!last_byte))
+               /*
+                * If two shadow bytes covers 16-byte access, we don't
+                * need to do anything more. Otherwise, test the last
+                * shadow byte.
+                */
+               if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
                        return false;
 
                return memory_is_poisoned_1(addr + 15);
@@ -204,7 +224,7 @@ static __always_inline bool memory_is_poisoned_n(unsigned long addr,
                s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
 
                if (unlikely(ret != (unsigned long)last_shadow ||
-                       ((last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
+                       ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
                        return true;
        }
        return false;
@@ -236,18 +256,12 @@ static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
 static __always_inline void check_memory_region(unsigned long addr,
                                                size_t size, bool write)
 {
-       struct kasan_access_info info;
-
        if (unlikely(size == 0))
                return;
 
        if (unlikely((void *)addr <
                kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
-               info.access_addr = (void *)addr;
-               info.access_size = size;
-               info.is_write = write;
-               info.ip = _RET_IP_;
-               kasan_report_user_access(&info);
+               kasan_report(addr, size, write, _RET_IP_);
                return;
        }
 
@@ -431,6 +445,7 @@ int kasan_module_alloc(void *addr, size_t size)
 
        if (ret) {
                find_vm_area(addr)->flags |= VM_KASAN;
+               kmemleak_ignore(ret);
                return 0;
        }
 
@@ -525,7 +540,7 @@ static int kasan_mem_notifier(struct notifier_block *nb,
 
 static int __init kasan_memhotplug_init(void)
 {
-       pr_err("WARNING: KASan doesn't support memory hot-add\n");
+       pr_err("WARNING: KASAN doesn't support memory hot-add\n");
        pr_err("Memory hot-add will be disabled\n");
 
        hotplug_memory_notifier(kasan_mem_notifier, 0);