These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / mm / kmemleak.c
index 3716cdb..19423a4 100644 (file)
  *   modifications to the memory scanning parameters including the scan_thread
  *   pointer
  *
+ * Locks and mutexes are acquired/nested in the following order:
+ *
+ *   scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
+ *
+ * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
+ * regions.
+ *
  * The kmemleak_object structures have a use_count incremented or decremented
  * using the get_object()/put_object() functions. When the use_count becomes
  * 0, this count can no longer be incremented and put_object() schedules the
@@ -295,23 +302,14 @@ static void hex_dump_object(struct seq_file *seq,
                            struct kmemleak_object *object)
 {
        const u8 *ptr = (const u8 *)object->pointer;
-       int i, len, remaining;
-       unsigned char linebuf[HEX_ROW_SIZE * 5];
+       size_t len;
 
        /* limit the number of lines to HEX_MAX_LINES */
-       remaining = len =
-               min(object->size, (size_t)(HEX_MAX_LINES * HEX_ROW_SIZE));
-
-       seq_printf(seq, "  hex dump (first %d bytes):\n", len);
-       for (i = 0; i < len; i += HEX_ROW_SIZE) {
-               int linelen = min(remaining, HEX_ROW_SIZE);
-
-               remaining -= HEX_ROW_SIZE;
-               hex_dump_to_buffer(ptr + i, linelen, HEX_ROW_SIZE,
-                                  HEX_GROUP_SIZE, linebuf, sizeof(linebuf),
-                                  HEX_ASCII);
-               seq_printf(seq, "    %s\n", linebuf);
-       }
+       len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
+
+       seq_printf(seq, "  hex dump (first %zu bytes):\n", len);
+       seq_hex_dump(seq, "    ", DUMP_PREFIX_NONE, HEX_ROW_SIZE,
+                    HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
 }
 
 /*
@@ -481,12 +479,11 @@ static void put_object(struct kmemleak_object *object)
 static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
 {
        unsigned long flags;
-       struct kmemleak_object *object = NULL;
+       struct kmemleak_object *object;
 
        rcu_read_lock();
        read_lock_irqsave(&kmemleak_lock, flags);
-       if (ptr >= min_addr && ptr < max_addr)
-               object = lookup_object(ptr, alias);
+       object = lookup_object(ptr, alias);
        read_unlock_irqrestore(&kmemleak_lock, flags);
 
        /* check whether the object is still available */
@@ -497,6 +494,27 @@ static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
        return object;
 }
 
+/*
+ * Look up an object in the object search tree and remove it from both
+ * object_tree_root and object_list. The returned object's use_count should be
+ * at least 1, as initially set by create_object().
+ */
+static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias)
+{
+       unsigned long flags;
+       struct kmemleak_object *object;
+
+       write_lock_irqsave(&kmemleak_lock, flags);
+       object = lookup_object(ptr, alias);
+       if (object) {
+               rb_erase(&object->rb_node, &object_tree_root);
+               list_del_rcu(&object->object_list);
+       }
+       write_unlock_irqrestore(&kmemleak_lock, flags);
+
+       return object;
+}
+
 /*
  * Save stack trace to the given array of MAX_TRACE size.
  */
@@ -582,11 +600,13 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
                        kmemleak_stop("Cannot insert 0x%lx into the object "
                                      "search tree (overlaps existing)\n",
                                      ptr);
+                       /*
+                        * No need for parent->lock here since "parent" cannot
+                        * be freed while the kmemleak_lock is held.
+                        */
+                       dump_object_info(parent);
                        kmem_cache_free(object_cache, object);
-                       object = parent;
-                       spin_lock(&object->lock);
-                       dump_object_info(object);
-                       spin_unlock(&object->lock);
+                       object = NULL;
                        goto out;
                }
        }
@@ -600,20 +620,14 @@ out:
 }
 
 /*
- * Remove the metadata (struct kmemleak_object) for a memory block from the
- * object_list and object_tree_root and decrement its use_count.
+ * Mark the object as not allocated and schedule RCU freeing via put_object().
  */
 static void __delete_object(struct kmemleak_object *object)
 {
        unsigned long flags;
 
-       write_lock_irqsave(&kmemleak_lock, flags);
-       rb_erase(&object->rb_node, &object_tree_root);
-       list_del_rcu(&object->object_list);
-       write_unlock_irqrestore(&kmemleak_lock, flags);
-
        WARN_ON(!(object->flags & OBJECT_ALLOCATED));
-       WARN_ON(atomic_read(&object->use_count) < 2);
+       WARN_ON(atomic_read(&object->use_count) < 1);
 
        /*
         * Locking here also ensures that the corresponding memory block
@@ -633,7 +647,7 @@ static void delete_object_full(unsigned long ptr)
 {
        struct kmemleak_object *object;
 
-       object = find_and_get_object(ptr, 0);
+       object = find_and_remove_object(ptr, 0);
        if (!object) {
 #ifdef DEBUG
                kmemleak_warn("Freeing unknown object at 0x%08lx\n",
@@ -642,7 +656,6 @@ static void delete_object_full(unsigned long ptr)
                return;
        }
        __delete_object(object);
-       put_object(object);
 }
 
 /*
@@ -655,7 +668,7 @@ static void delete_object_part(unsigned long ptr, size_t size)
        struct kmemleak_object *object;
        unsigned long start, end;
 
-       object = find_and_get_object(ptr, 1);
+       object = find_and_remove_object(ptr, 1);
        if (!object) {
 #ifdef DEBUG
                kmemleak_warn("Partially freeing unknown object at 0x%08lx "
@@ -663,7 +676,6 @@ static void delete_object_part(unsigned long ptr, size_t size)
 #endif
                return;
        }
-       __delete_object(object);
 
        /*
         * Create one or two objects that may result from the memory block
@@ -681,7 +693,7 @@ static void delete_object_part(unsigned long ptr, size_t size)
                create_object(ptr + size, end - ptr - size, object->min_count,
                              GFP_KERNEL);
 
-       put_object(object);
+       __delete_object(object);
 }
 
 static void __paint_it(struct kmemleak_object *object, int color)
@@ -817,6 +829,7 @@ static void __init log_early(int op_type, const void *ptr, size_t size,
        }
 
        if (crt_early_log >= ARRAY_SIZE(early_log)) {
+               crt_early_log++;
                kmemleak_disable();
                return;
        }
@@ -1151,19 +1164,18 @@ static int scan_should_stop(void)
  * found to the gray list.
  */
 static void scan_block(void *_start, void *_end,
-                      struct kmemleak_object *scanned, int allow_resched)
+                      struct kmemleak_object *scanned)
 {
        unsigned long *ptr;
        unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
        unsigned long *end = _end - (BYTES_PER_POINTER - 1);
+       unsigned long flags;
 
+       read_lock_irqsave(&kmemleak_lock, flags);
        for (ptr = start; ptr < end; ptr++) {
                struct kmemleak_object *object;
-               unsigned long flags;
                unsigned long pointer;
 
-               if (allow_resched)
-                       cond_resched();
                if (scan_should_stop())
                        break;
 
@@ -1176,26 +1188,31 @@ static void scan_block(void *_start, void *_end,
                pointer = *ptr;
                kasan_enable_current();
 
-               object = find_and_get_object(pointer, 1);
+               if (pointer < min_addr || pointer >= max_addr)
+                       continue;
+
+               /*
+                * No need for get_object() here since we hold kmemleak_lock.
+                * object->use_count cannot be dropped to 0 while the object
+                * is still present in object_tree_root and object_list
+                * (with updates protected by kmemleak_lock).
+                */
+               object = lookup_object(pointer, 1);
                if (!object)
                        continue;
-               if (object == scanned) {
+               if (object == scanned)
                        /* self referenced, ignore */
-                       put_object(object);
                        continue;
-               }
 
                /*
                 * Avoid the lockdep recursive warning on object->lock being
                 * previously acquired in scan_object(). These locks are
                 * enclosed by scan_mutex.
                 */
-               spin_lock_irqsave_nested(&object->lock, flags,
-                                        SINGLE_DEPTH_NESTING);
+               spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
                if (!color_white(object)) {
                        /* non-orphan, ignored or new */
-                       spin_unlock_irqrestore(&object->lock, flags);
-                       put_object(object);
+                       spin_unlock(&object->lock);
                        continue;
                }
 
@@ -1207,13 +1224,27 @@ static void scan_block(void *_start, void *_end,
                 */
                object->count++;
                if (color_gray(object)) {
+                       /* put_object() called when removing from gray_list */
+                       WARN_ON(!get_object(object));
                        list_add_tail(&object->gray_list, &gray_list);
-                       spin_unlock_irqrestore(&object->lock, flags);
-                       continue;
                }
+               spin_unlock(&object->lock);
+       }
+       read_unlock_irqrestore(&kmemleak_lock, flags);
+}
 
-               spin_unlock_irqrestore(&object->lock, flags);
-               put_object(object);
+/*
+ * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
+ */
+static void scan_large_block(void *start, void *end)
+{
+       void *next;
+
+       while (start < end) {
+               next = min(start + MAX_SCAN_SIZE, end);
+               scan_block(start, next, NULL);
+               start = next;
+               cond_resched();
        }
 }
 
@@ -1239,22 +1270,25 @@ static void scan_object(struct kmemleak_object *object)
        if (hlist_empty(&object->area_list)) {
                void *start = (void *)object->pointer;
                void *end = (void *)(object->pointer + object->size);
+               void *next;
+
+               do {
+                       next = min(start + MAX_SCAN_SIZE, end);
+                       scan_block(start, next, object);
 
-               while (start < end && (object->flags & OBJECT_ALLOCATED) &&
-                      !(object->flags & OBJECT_NO_SCAN)) {
-                       scan_block(start, min(start + MAX_SCAN_SIZE, end),
-                                  object, 0);
-                       start += MAX_SCAN_SIZE;
+                       start = next;
+                       if (start >= end)
+                               break;
 
                        spin_unlock_irqrestore(&object->lock, flags);
                        cond_resched();
                        spin_lock_irqsave(&object->lock, flags);
-               }
+               } while (object->flags & OBJECT_ALLOCATED);
        } else
                hlist_for_each_entry(area, &object->area_list, node)
                        scan_block((void *)area->start,
                                   (void *)(area->start + area->size),
-                                  object, 0);
+                                  object);
 out:
        spin_unlock_irqrestore(&object->lock, flags);
 }
@@ -1331,14 +1365,14 @@ static void kmemleak_scan(void)
        rcu_read_unlock();
 
        /* data/bss scanning */
-       scan_block(_sdata, _edata, NULL, 1);
-       scan_block(__bss_start, __bss_stop, NULL, 1);
+       scan_large_block(_sdata, _edata);
+       scan_large_block(__bss_start, __bss_stop);
 
 #ifdef CONFIG_SMP
        /* per-cpu sections scanning */
        for_each_possible_cpu(i)
-               scan_block(__per_cpu_start + per_cpu_offset(i),
-                          __per_cpu_end + per_cpu_offset(i), NULL, 1);
+               scan_large_block(__per_cpu_start + per_cpu_offset(i),
+                                __per_cpu_end + per_cpu_offset(i));
 #endif
 
        /*
@@ -1359,7 +1393,7 @@ static void kmemleak_scan(void)
                        /* only scan if page is in use */
                        if (page_count(page) == 0)
                                continue;
-                       scan_block(page, page + 1, NULL, 1);
+                       scan_block(page, page + 1, NULL);
                }
        }
        put_online_mems();
@@ -1373,7 +1407,7 @@ static void kmemleak_scan(void)
                read_lock(&tasklist_lock);
                do_each_thread(g, p) {
                        scan_block(task_stack_page(p), task_stack_page(p) +
-                                  THREAD_SIZE, NULL, 0);
+                                  THREAD_SIZE, NULL);
                } while_each_thread(g, p);
                read_unlock(&tasklist_lock);
        }
@@ -1750,7 +1784,6 @@ static void __kmemleak_do_cleanup(void)
  */
 static void kmemleak_do_cleanup(struct work_struct *work)
 {
-       mutex_lock(&scan_mutex);
        stop_scan_thread();
 
        /*
@@ -1765,7 +1798,6 @@ static void kmemleak_do_cleanup(struct work_struct *work)
        else
                pr_info("Kmemleak disabled without freeing internal data. "
                        "Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\"\n");
-       mutex_unlock(&scan_mutex);
 }
 
 static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
@@ -1842,7 +1874,7 @@ void __init kmemleak_init(void)
        object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
        scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
 
-       if (crt_early_log >= ARRAY_SIZE(early_log))
+       if (crt_early_log > ARRAY_SIZE(early_log))
                pr_warning("Early log buffer exceeded (%d), please increase "
                           "DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n", crt_early_log);