1 #ifndef _LINUX_RWSEM_RT_H
2 #define _LINUX_RWSEM_RT_H
5 #error "Include rwsem.h"
9 * RW-semaphores are a spinlock plus a reader-depth count.
11 * Note that the semantics are different from the usual
12 * Linux rw-sems, in PREEMPT_RT mode we do not allow
13 * multiple readers to hold the lock at once, we only allow
14 * a read-lock owner to read-lock recursively. This is
15 * better for latency, makes the implementation inherently
16 * fair and makes it simpler as well.
19 #include <linux/rtmutex.h>
24 #ifdef CONFIG_DEBUG_LOCK_ALLOC
25 struct lockdep_map dep_map;
29 #define __RWSEM_INITIALIZER(name) \
30 { .lock = __RT_MUTEX_INITIALIZER(name.lock), \
31 RW_DEP_MAP_INIT(name) }
33 #define DECLARE_RWSEM(lockname) \
34 struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
36 extern void __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name,
37 struct lock_class_key *key);
39 #define __rt_init_rwsem(sem, name, key) \
41 rt_mutex_init(&(sem)->lock); \
42 __rt_rwsem_init((sem), (name), (key));\
45 #define __init_rwsem(sem, name, key) __rt_init_rwsem(sem, name, key)
47 # define rt_init_rwsem(sem) \
49 static struct lock_class_key __key; \
51 __rt_init_rwsem((sem), #sem, &__key); \
54 extern void rt_down_write(struct rw_semaphore *rwsem);
55 extern void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass);
56 extern void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass);
57 extern void rt_down_write_nested_lock(struct rw_semaphore *rwsem,
58 struct lockdep_map *nest);
59 extern void rt_down_read(struct rw_semaphore *rwsem);
60 extern int rt_down_write_trylock(struct rw_semaphore *rwsem);
61 extern int rt_down_read_trylock(struct rw_semaphore *rwsem);
62 extern void __rt_up_read(struct rw_semaphore *rwsem);
63 extern void rt_up_read(struct rw_semaphore *rwsem);
64 extern void rt_up_write(struct rw_semaphore *rwsem);
65 extern void rt_downgrade_write(struct rw_semaphore *rwsem);
67 #define init_rwsem(sem) rt_init_rwsem(sem)
68 #define rwsem_is_locked(s) rt_mutex_is_locked(&(s)->lock)
70 static inline int rwsem_is_contended(struct rw_semaphore *sem)
72 /* rt_mutex_has_waiters() */
73 return !RB_EMPTY_ROOT(&sem->lock.waiters);
76 static inline void down_read(struct rw_semaphore *sem)
81 static inline int down_read_trylock(struct rw_semaphore *sem)
83 return rt_down_read_trylock(sem);
86 static inline void down_write(struct rw_semaphore *sem)
91 static inline int down_write_trylock(struct rw_semaphore *sem)
93 return rt_down_write_trylock(sem);
96 static inline void __up_read(struct rw_semaphore *sem)
101 static inline void up_read(struct rw_semaphore *sem)
106 static inline void up_write(struct rw_semaphore *sem)
111 static inline void downgrade_write(struct rw_semaphore *sem)
113 rt_downgrade_write(sem);
116 static inline void down_read_nested(struct rw_semaphore *sem, int subclass)
118 return rt_down_read_nested(sem, subclass);
121 static inline void down_write_nested(struct rw_semaphore *sem, int subclass)
123 rt_down_write_nested(sem, subclass);
125 #ifdef CONFIG_DEBUG_LOCK_ALLOC
126 static inline void down_write_nest_lock(struct rw_semaphore *sem,
127 struct rw_semaphore *nest_lock)
129 rt_down_write_nested_lock(sem, &nest_lock->dep_map);
134 static inline void down_write_nest_lock(struct rw_semaphore *sem,
135 struct rw_semaphore *nest_lock)
137 rt_down_write_nested_lock(sem, NULL);