These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / kernel / locking / lockdep.c
index 577f026..e98ee95 100644 (file)
@@ -6,7 +6,7 @@
  * Started by Ingo Molnar:
  *
  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
- *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
  *
  * this code maps all the lock dependencies as they occur in a live kernel
  * and will warn about the following classes of locking bugs:
@@ -2738,7 +2738,7 @@ static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags)
                return;
 
        /* no reclaim without waiting on it */
-       if (!(gfp_mask & __GFP_WAIT))
+       if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
                return;
 
        /* this guy won't enter reclaim */
@@ -3068,7 +3068,7 @@ static int __lock_is_held(struct lockdep_map *lock);
 static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
                          int trylock, int read, int check, int hardirqs_off,
                          struct lockdep_map *nest_lock, unsigned long ip,
-                         int references)
+                         int references, int pin_count)
 {
        struct task_struct *curr = current;
        struct lock_class *class = NULL;
@@ -3157,6 +3157,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
        hlock->waittime_stamp = 0;
        hlock->holdtime_stamp = lockstat_clock();
 #endif
+       hlock->pin_count = pin_count;
 
        if (check && !mark_irqflags(curr, hlock))
                return 0;
@@ -3260,26 +3261,6 @@ print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
        return 0;
 }
 
-/*
- * Common debugging checks for both nested and non-nested unlock:
- */
-static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
-                       unsigned long ip)
-{
-       if (unlikely(!debug_locks))
-               return 0;
-       /*
-        * Lockdep should run with IRQs disabled, recursion, head-ache, etc..
-        */
-       if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
-               return 0;
-
-       if (curr->lockdep_depth <= 0)
-               return print_unlock_imbalance_bug(curr, lock, ip);
-
-       return 1;
-}
-
 static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
 {
        if (hlock->instance == lock)
@@ -3362,7 +3343,7 @@ found_it:
                        hlock_class(hlock)->subclass, hlock->trylock,
                                hlock->read, hlock->check, hlock->hardirqs_off,
                                hlock->nest_lock, hlock->acquire_ip,
-                               hlock->references))
+                               hlock->references, hlock->pin_count))
                        return 0;
        }
 
@@ -3376,31 +3357,35 @@ found_it:
 }
 
 /*
- * Remove the lock to the list of currently held locks in a
- * potentially non-nested (out of order) manner. This is a
- * relatively rare operation, as all the unlock APIs default
- * to nested mode (which uses lock_release()):
+ * Remove the lock to the list of currently held locks - this gets
+ * called on mutex_unlock()/spin_unlock*() (or on a failed
+ * mutex_lock_interruptible()).
+ *
+ * @nested is an hysterical artifact, needs a tree wide cleanup.
  */
 static int
-lock_release_non_nested(struct task_struct *curr,
-                       struct lockdep_map *lock, unsigned long ip)
+__lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
 {
+       struct task_struct *curr = current;
        struct held_lock *hlock, *prev_hlock;
        unsigned int depth;
        int i;
 
-       /*
-        * Check whether the lock exists in the current stack
-        * of held locks:
-        */
+       if (unlikely(!debug_locks))
+               return 0;
+
        depth = curr->lockdep_depth;
        /*
         * So we're all set to release this lock.. wait what lock? We don't
         * own any locks, you've been drinking again?
         */
-       if (DEBUG_LOCKS_WARN_ON(!depth))
-               return 0;
+       if (DEBUG_LOCKS_WARN_ON(depth <= 0))
+                return print_unlock_imbalance_bug(curr, lock, ip);
 
+       /*
+        * Check whether the lock exists in the current stack
+        * of held locks:
+        */
        prev_hlock = NULL;
        for (i = depth-1; i >= 0; i--) {
                hlock = curr->held_locks + i;
@@ -3419,6 +3404,8 @@ found_it:
        if (hlock->instance == lock)
                lock_release_holdtime(hlock);
 
+       WARN(hlock->pin_count, "releasing a pinned lock\n");
+
        if (hlock->references) {
                hlock->references--;
                if (hlock->references) {
@@ -3446,7 +3433,7 @@ found_it:
                        hlock_class(hlock)->subclass, hlock->trylock,
                                hlock->read, hlock->check, hlock->hardirqs_off,
                                hlock->nest_lock, hlock->acquire_ip,
-                               hlock->references))
+                               hlock->references, hlock->pin_count))
                        return 0;
        }
 
@@ -3456,91 +3443,66 @@ found_it:
         */
        if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
                return 0;
+
        return 1;
 }
 
-/*
- * Remove the lock to the list of currently held locks - this gets
- * called on mutex_unlock()/spin_unlock*() (or on a failed
- * mutex_lock_interruptible()). This is done for unlocks that nest
- * perfectly. (i.e. the current top of the lock-stack is unlocked)
- */
-static int lock_release_nested(struct task_struct *curr,
-                              struct lockdep_map *lock, unsigned long ip)
+static int __lock_is_held(struct lockdep_map *lock)
 {
-       struct held_lock *hlock;
-       unsigned int depth;
-
-       /*
-        * Pop off the top of the lock stack:
-        */
-       depth = curr->lockdep_depth - 1;
-       hlock = curr->held_locks + depth;
-
-       /*
-        * Is the unlock non-nested:
-        */
-       if (hlock->instance != lock || hlock->references)
-               return lock_release_non_nested(curr, lock, ip);
-       curr->lockdep_depth--;
-
-       /*
-        * No more locks, but somehow we've got hash left over, who left it?
-        */
-       if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0)))
-               return 0;
+       struct task_struct *curr = current;
+       int i;
 
-       curr->curr_chain_key = hlock->prev_chain_key;
+       for (i = 0; i < curr->lockdep_depth; i++) {
+               struct held_lock *hlock = curr->held_locks + i;
 
-       lock_release_holdtime(hlock);
+               if (match_held_lock(hlock, lock))
+                       return 1;
+       }
 
-#ifdef CONFIG_DEBUG_LOCKDEP
-       hlock->prev_chain_key = 0;
-       hlock->class_idx = 0;
-       hlock->acquire_ip = 0;
-       hlock->irq_context = 0;
-#endif
-       return 1;
+       return 0;
 }
 
-/*
- * Remove the lock to the list of currently held locks - this gets
- * called on mutex_unlock()/spin_unlock*() (or on a failed
- * mutex_lock_interruptible()). This is done for unlocks that nest
- * perfectly. (i.e. the current top of the lock-stack is unlocked)
- */
-static void
-__lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
+static void __lock_pin_lock(struct lockdep_map *lock)
 {
        struct task_struct *curr = current;
+       int i;
 
-       if (!check_unlock(curr, lock, ip))
+       if (unlikely(!debug_locks))
                return;
 
-       if (nested) {
-               if (!lock_release_nested(curr, lock, ip))
-                       return;
-       } else {
-               if (!lock_release_non_nested(curr, lock, ip))
+       for (i = 0; i < curr->lockdep_depth; i++) {
+               struct held_lock *hlock = curr->held_locks + i;
+
+               if (match_held_lock(hlock, lock)) {
+                       hlock->pin_count++;
                        return;
+               }
        }
 
-       check_chain_key(curr);
+       WARN(1, "pinning an unheld lock\n");
 }
 
-static int __lock_is_held(struct lockdep_map *lock)
+static void __lock_unpin_lock(struct lockdep_map *lock)
 {
        struct task_struct *curr = current;
        int i;
 
+       if (unlikely(!debug_locks))
+               return;
+
        for (i = 0; i < curr->lockdep_depth; i++) {
                struct held_lock *hlock = curr->held_locks + i;
 
-               if (match_held_lock(hlock, lock))
-                       return 1;
+               if (match_held_lock(hlock, lock)) {
+                       if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n"))
+                               return;
+
+                       hlock->pin_count--;
+                       return;
+               }
        }
 
-       return 0;
+       WARN(1, "unpinning an unheld lock\n");
 }
 
 /*
@@ -3623,7 +3585,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
        current->lockdep_recursion = 1;
        trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
        __lock_acquire(lock, subclass, trylock, read, check,
-                      irqs_disabled_flags(flags), nest_lock, ip, 0);
+                      irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
        current->lockdep_recursion = 0;
        raw_local_irq_restore(flags);
 }
@@ -3641,7 +3603,8 @@ void lock_release(struct lockdep_map *lock, int nested,
        check_flags(flags);
        current->lockdep_recursion = 1;
        trace_lock_release(lock, ip);
-       __lock_release(lock, nested, ip);
+       if (__lock_release(lock, nested, ip))
+               check_chain_key(current);
        current->lockdep_recursion = 0;
        raw_local_irq_restore(flags);
 }
@@ -3667,6 +3630,40 @@ int lock_is_held(struct lockdep_map *lock)
 }
 EXPORT_SYMBOL_GPL(lock_is_held);
 
+void lock_pin_lock(struct lockdep_map *lock)
+{
+       unsigned long flags;
+
+       if (unlikely(current->lockdep_recursion))
+               return;
+
+       raw_local_irq_save(flags);
+       check_flags(flags);
+
+       current->lockdep_recursion = 1;
+       __lock_pin_lock(lock);
+       current->lockdep_recursion = 0;
+       raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(lock_pin_lock);
+
+void lock_unpin_lock(struct lockdep_map *lock)
+{
+       unsigned long flags;
+
+       if (unlikely(current->lockdep_recursion))
+               return;
+
+       raw_local_irq_save(flags);
+       check_flags(flags);
+
+       current->lockdep_recursion = 1;
+       __lock_unpin_lock(lock);
+       current->lockdep_recursion = 0;
+       raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(lock_unpin_lock);
+
 void lockdep_set_current_reclaim_state(gfp_t gfp_mask)
 {
        current->lockdep_reclaim_gfp = gfp_mask;
@@ -4069,8 +4066,7 @@ void __init lockdep_info(void)
 
 #ifdef CONFIG_DEBUG_LOCKDEP
        if (lockdep_init_error) {
-               printk("WARNING: lockdep init error! lock-%s was acquired"
-                       "before lockdep_init\n", lock_init_error);
+               printk("WARNING: lockdep init error: lock '%s' was acquired before lockdep_init().\n", lock_init_error);
                printk("Call stack leading to lockdep invocation was:\n");
                print_stack_trace(&lockdep_init_trace, 0);
        }