1 #ifndef _LINUX_RWSEM_RT_H
2 #define _LINUX_RWSEM_RT_H
5 #error "Include rwsem.h"
9 * RW-semaphores are a spinlock plus a reader-depth count.
11 * Note that the semantics are different from the usual
12 * Linux rw-sems, in PREEMPT_RT mode we do not allow
13 * multiple readers to hold the lock at once, we only allow
14 * a read-lock owner to read-lock recursively. This is
15 * better for latency, makes the implementation inherently
16 * fair and makes it simpler as well.
19 #include <linux/rtmutex.h>
24 #ifdef CONFIG_DEBUG_LOCK_ALLOC
25 struct lockdep_map dep_map;
29 #define __RWSEM_INITIALIZER(name) \
30 { .lock = __RT_MUTEX_INITIALIZER(name.lock), \
31 RW_DEP_MAP_INIT(name) }
33 #define DECLARE_RWSEM(lockname) \
34 struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
36 extern void __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name,
37 struct lock_class_key *key);
39 #define __rt_init_rwsem(sem, name, key) \
41 rt_mutex_init(&(sem)->lock); \
42 __rt_rwsem_init((sem), (name), (key));\
45 #define __init_rwsem(sem, name, key) __rt_init_rwsem(sem, name, key)
47 # define rt_init_rwsem(sem) \
49 static struct lock_class_key __key; \
51 __rt_init_rwsem((sem), #sem, &__key); \
54 extern void rt_down_write(struct rw_semaphore *rwsem);
55 extern void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass);
56 extern void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass);
57 extern void rt_down_write_nested_lock(struct rw_semaphore *rwsem,
58 struct lockdep_map *nest);
59 extern void rt__down_read(struct rw_semaphore *rwsem);
60 extern void rt_down_read(struct rw_semaphore *rwsem);
61 extern int rt_down_write_trylock(struct rw_semaphore *rwsem);
62 extern int rt__down_read_trylock(struct rw_semaphore *rwsem);
63 extern int rt_down_read_trylock(struct rw_semaphore *rwsem);
64 extern void __rt_up_read(struct rw_semaphore *rwsem);
65 extern void rt_up_read(struct rw_semaphore *rwsem);
66 extern void rt_up_write(struct rw_semaphore *rwsem);
67 extern void rt_downgrade_write(struct rw_semaphore *rwsem);
69 #define init_rwsem(sem) rt_init_rwsem(sem)
70 #define rwsem_is_locked(s) rt_mutex_is_locked(&(s)->lock)
72 static inline int rwsem_is_contended(struct rw_semaphore *sem)
74 /* rt_mutex_has_waiters() */
75 return !RB_EMPTY_ROOT(&sem->lock.waiters);
78 static inline void __down_read(struct rw_semaphore *sem)
83 static inline void down_read(struct rw_semaphore *sem)
88 static inline int __down_read_trylock(struct rw_semaphore *sem)
90 return rt__down_read_trylock(sem);
93 static inline int down_read_trylock(struct rw_semaphore *sem)
95 return rt_down_read_trylock(sem);
98 static inline void down_write(struct rw_semaphore *sem)
103 static inline int down_write_trylock(struct rw_semaphore *sem)
105 return rt_down_write_trylock(sem);
108 static inline void __up_read(struct rw_semaphore *sem)
113 static inline void up_read(struct rw_semaphore *sem)
118 static inline void up_write(struct rw_semaphore *sem)
123 static inline void downgrade_write(struct rw_semaphore *sem)
125 rt_downgrade_write(sem);
128 static inline void down_read_nested(struct rw_semaphore *sem, int subclass)
130 return rt_down_read_nested(sem, subclass);
133 static inline void down_write_nested(struct rw_semaphore *sem, int subclass)
135 rt_down_write_nested(sem, subclass);
137 #ifdef CONFIG_DEBUG_LOCK_ALLOC
138 static inline void down_write_nest_lock(struct rw_semaphore *sem,
139 struct rw_semaphore *nest_lock)
141 rt_down_write_nested_lock(sem, &nest_lock->dep_map);
146 static inline void down_write_nest_lock(struct rw_semaphore *sem,
147 struct rw_semaphore *nest_lock)
149 rt_down_write_nested_lock(sem, NULL);