1 /* See include/linux/lglock.h for description */
2 #include <linux/module.h>
3 #include <linux/lglock.h>
5 #include <linux/string.h>
7 #ifndef CONFIG_PREEMPT_RT_FULL
8 # define lg_lock_ptr arch_spinlock_t
9 # define lg_do_lock(l) arch_spin_lock(l)
10 # define lg_do_unlock(l) arch_spin_unlock(l)
12 # define lg_lock_ptr struct rt_mutex
13 # define lg_do_lock(l) __rt_spin_lock__no_mg(l)
14 # define lg_do_unlock(l) __rt_spin_unlock(l)
17 * Note there is no uninit, so lglocks cannot be defined in
18 * modules (but it's fine to use them from there)
19 * Could be added though, just undo lg_lock_init
22 void lg_lock_init(struct lglock *lg, char *name)
24 #ifdef CONFIG_PREEMPT_RT_FULL
27 for_each_possible_cpu(i) {
28 struct rt_mutex *lock = per_cpu_ptr(lg->lock, i);
33 LOCKDEP_INIT_MAP(&lg->lock_dep_map, name, &lg->lock_key, 0);
35 EXPORT_SYMBOL(lg_lock_init);
37 void lg_local_lock(struct lglock *lg)
42 lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
43 lock = this_cpu_ptr(lg->lock);
46 EXPORT_SYMBOL(lg_local_lock);
48 void lg_local_unlock(struct lglock *lg)
52 lock_release(&lg->lock_dep_map, 1, _RET_IP_);
53 lock = this_cpu_ptr(lg->lock);
57 EXPORT_SYMBOL(lg_local_unlock);
59 void lg_local_lock_cpu(struct lglock *lg, int cpu)
63 preempt_disable_nort();
64 lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
65 lock = per_cpu_ptr(lg->lock, cpu);
68 EXPORT_SYMBOL(lg_local_lock_cpu);
70 void lg_local_unlock_cpu(struct lglock *lg, int cpu)
74 lock_release(&lg->lock_dep_map, 1, _RET_IP_);
75 lock = per_cpu_ptr(lg->lock, cpu);
77 preempt_enable_nort();
79 EXPORT_SYMBOL(lg_local_unlock_cpu);
81 void lg_double_lock(struct lglock *lg, int cpu1, int cpu2)
85 /* lock in cpu order, just like lg_global_lock */
89 preempt_disable_nort();
90 lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
91 lg_do_lock(per_cpu_ptr(lg->lock, cpu1));
92 lg_do_lock(per_cpu_ptr(lg->lock, cpu2));
95 void lg_double_unlock(struct lglock *lg, int cpu1, int cpu2)
97 lock_release(&lg->lock_dep_map, 1, _RET_IP_);
98 lg_do_unlock(per_cpu_ptr(lg->lock, cpu1));
99 lg_do_unlock(per_cpu_ptr(lg->lock, cpu2));
100 preempt_enable_nort();
103 void lg_global_lock(struct lglock *lg)
107 preempt_disable_nort();
108 lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
109 for_each_possible_cpu(i) {
111 lock = per_cpu_ptr(lg->lock, i);
115 EXPORT_SYMBOL(lg_global_lock);
117 void lg_global_unlock(struct lglock *lg)
121 lock_release(&lg->lock_dep_map, 1, _RET_IP_);
122 for_each_possible_cpu(i) {
124 lock = per_cpu_ptr(lg->lock, i);
127 preempt_enable_nort();
129 EXPORT_SYMBOL(lg_global_unlock);
131 #ifdef CONFIG_PREEMPT_RT_FULL
133 * HACK: If you use this, you get to keep the pieces.
134 * Used in queue_stop_cpus_work() when stop machinery
135 * is called from inactive CPU, so we can't schedule.
137 # define lg_do_trylock_relax(l) \
139 while (!__rt_spin_trylock(l)) \
143 void lg_global_trylock_relax(struct lglock *lg)
147 lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
148 for_each_possible_cpu(i) {
150 lock = per_cpu_ptr(lg->lock, i);
151 lg_do_trylock_relax(lock);