X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=kernel%2Finclude%2Flinux%2Fmutex.h;fp=kernel%2Finclude%2Flinux%2Fmutex.h;h=b3fdfc8202168868c78f2519d4ce7cfff15d23d5;hb=9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00;hp=0000000000000000000000000000000000000000;hpb=98260f3884f4a202f9ca5eabed40b1354c489b29;p=kvmfornfv.git diff --git a/kernel/include/linux/mutex.h b/kernel/include/linux/mutex.h new file mode 100644 index 000000000..b3fdfc820 --- /dev/null +++ b/kernel/include/linux/mutex.h @@ -0,0 +1,184 @@ +/* + * Mutexes: blocking mutual exclusion locks + * + * started by Ingo Molnar: + * + * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar + * + * This file contains the main data structure and API definitions. + */ +#ifndef __LINUX_MUTEX_H +#define __LINUX_MUTEX_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ + , .dep_map = { .name = #lockname } +#else +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) +#endif + +#ifdef CONFIG_PREEMPT_RT_FULL +# include +#else + +/* + * Simple, straightforward mutexes with strict semantics: + * + * - only one task can hold the mutex at a time + * - only the owner can unlock the mutex + * - multiple unlocks are not permitted + * - recursive locking is not permitted + * - a mutex object must be initialized via the API + * - a mutex object must not be initialized via memset or copying + * - task may not exit with mutex held + * - memory areas where held locks reside must not be freed + * - held mutexes must not be reinitialized + * - mutexes may not be used in hardware or software interrupt + * contexts such as tasklets and timers + * + * These semantics are fully enforced when DEBUG_MUTEXES is + * enabled. Furthermore, besides enforcing the above rules, the mutex + * debugging code also implements a number of additional features + * that make lock debugging easier and faster: + * + * - uses symbolic names of mutexes, whenever they are printed in debug output + * - point-of-acquire tracking, symbolic lookup of function names + * - list of all locks held in the system, printout of them + * - owner tracking + * - detects self-recursing locks and prints out all relevant info + * - detects multi-task circular deadlocks and prints out all affected + * locks and tasks (and only those tasks) + */ +struct mutex { + /* 1: unlocked, 0: locked, negative: locked, possible waiters */ + atomic_t count; + spinlock_t wait_lock; + struct list_head wait_list; +#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER) + struct task_struct *owner; +#endif +#ifdef CONFIG_MUTEX_SPIN_ON_OWNER + struct optimistic_spin_queue osq; /* Spinner MCS lock */ +#endif +#ifdef CONFIG_DEBUG_MUTEXES + void *magic; +#endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +/* + * This is the control structure for tasks blocked on mutex, + * which resides on the blocked task's kernel stack: + */ +struct mutex_waiter { + struct list_head list; + struct task_struct *task; +#ifdef CONFIG_DEBUG_MUTEXES + void *magic; +#endif +}; + +#ifdef CONFIG_DEBUG_MUTEXES +# include +#else +# define __DEBUG_MUTEX_INITIALIZER(lockname) +/** + * mutex_init - initialize the mutex + * @mutex: the mutex to be initialized + * + * Initialize the mutex to unlocked state. + * + * It is not allowed to initialize an already locked mutex. + */ +# define mutex_init(mutex) \ +do { \ + static struct lock_class_key __key; \ + \ + __mutex_init((mutex), #mutex, &__key); \ +} while (0) +static inline void mutex_destroy(struct mutex *lock) {} +#endif + +#define __MUTEX_INITIALIZER(lockname) \ + { .count = ATOMIC_INIT(1) \ + , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ + , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \ + __DEBUG_MUTEX_INITIALIZER(lockname) \ + __DEP_MAP_MUTEX_INITIALIZER(lockname) } + +#define DEFINE_MUTEX(mutexname) \ + struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) + +extern void __mutex_init(struct mutex *lock, const char *name, + struct lock_class_key *key); + +/** + * mutex_is_locked - is the mutex locked + * @lock: the mutex to be queried + * + * Returns 1 if the mutex is locked, 0 if unlocked. + */ +static inline int mutex_is_locked(struct mutex *lock) +{ + return atomic_read(&lock->count) != 1; +} + +/* + * See kernel/locking/mutex.c for detailed documentation of these APIs. + * Also see Documentation/locking/mutex-design.txt. + */ +#ifdef CONFIG_DEBUG_LOCK_ALLOC +extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); +extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); + +extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock, + unsigned int subclass); +extern int __must_check mutex_lock_killable_nested(struct mutex *lock, + unsigned int subclass); + +#define mutex_lock(lock) mutex_lock_nested(lock, 0) +#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0) +#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0) + +#define mutex_lock_nest_lock(lock, nest_lock) \ +do { \ + typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ + _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \ +} while (0) + +#else +extern void mutex_lock(struct mutex *lock); +extern int __must_check mutex_lock_interruptible(struct mutex *lock); +extern int __must_check mutex_lock_killable(struct mutex *lock); + +# define mutex_lock_nested(lock, subclass) mutex_lock(lock) +# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock) +# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock) +# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock) +#endif + +/* + * NOTE: mutex_trylock() follows the spin_trylock() convention, + * not the down_trylock() convention! + * + * Returns 1 if the mutex has been acquired successfully, and 0 on contention. + */ +extern int mutex_trylock(struct mutex *lock); +extern void mutex_unlock(struct mutex *lock); + +#endif /* !PREEMPT_RT_FULL */ + +extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); + +#endif /* __LINUX_MUTEX_H */