These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / kernel / locking / lglock.c
1 /* See include/linux/lglock.h for description */
2 #include <linux/module.h>
3 #include <linux/lglock.h>
4 #include <linux/cpu.h>
5 #include <linux/string.h>
6
7 #ifndef CONFIG_PREEMPT_RT_FULL
8 # define lg_lock_ptr            arch_spinlock_t
9 # define lg_do_lock(l)          arch_spin_lock(l)
10 # define lg_do_unlock(l)        arch_spin_unlock(l)
11 #else
12 # define lg_lock_ptr            struct rt_mutex
13 # define lg_do_lock(l)          __rt_spin_lock__no_mg(l)
14 # define lg_do_unlock(l)        __rt_spin_unlock(l)
15 #endif
16 /*
17  * Note there is no uninit, so lglocks cannot be defined in
18  * modules (but it's fine to use them from there)
19  * Could be added though, just undo lg_lock_init
20  */
21
22 void lg_lock_init(struct lglock *lg, char *name)
23 {
24 #ifdef CONFIG_PREEMPT_RT_FULL
25         int i;
26
27         for_each_possible_cpu(i) {
28                 struct rt_mutex *lock = per_cpu_ptr(lg->lock, i);
29
30                 rt_mutex_init(lock);
31         }
32 #endif
33         LOCKDEP_INIT_MAP(&lg->lock_dep_map, name, &lg->lock_key, 0);
34 }
35 EXPORT_SYMBOL(lg_lock_init);
36
37 void lg_local_lock(struct lglock *lg)
38 {
39         lg_lock_ptr *lock;
40
41         migrate_disable();
42         lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
43         lock = this_cpu_ptr(lg->lock);
44         lg_do_lock(lock);
45 }
46 EXPORT_SYMBOL(lg_local_lock);
47
48 void lg_local_unlock(struct lglock *lg)
49 {
50         lg_lock_ptr *lock;
51
52         lock_release(&lg->lock_dep_map, 1, _RET_IP_);
53         lock = this_cpu_ptr(lg->lock);
54         lg_do_unlock(lock);
55         migrate_enable();
56 }
57 EXPORT_SYMBOL(lg_local_unlock);
58
59 void lg_local_lock_cpu(struct lglock *lg, int cpu)
60 {
61         lg_lock_ptr *lock;
62
63         preempt_disable_nort();
64         lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
65         lock = per_cpu_ptr(lg->lock, cpu);
66         lg_do_lock(lock);
67 }
68 EXPORT_SYMBOL(lg_local_lock_cpu);
69
70 void lg_local_unlock_cpu(struct lglock *lg, int cpu)
71 {
72         lg_lock_ptr *lock;
73
74         lock_release(&lg->lock_dep_map, 1, _RET_IP_);
75         lock = per_cpu_ptr(lg->lock, cpu);
76         lg_do_unlock(lock);
77         preempt_enable_nort();
78 }
79 EXPORT_SYMBOL(lg_local_unlock_cpu);
80
81 void lg_double_lock(struct lglock *lg, int cpu1, int cpu2)
82 {
83         BUG_ON(cpu1 == cpu2);
84
85         /* lock in cpu order, just like lg_global_lock */
86         if (cpu2 < cpu1)
87                 swap(cpu1, cpu2);
88
89         preempt_disable_nort();
90         lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
91         lg_do_lock(per_cpu_ptr(lg->lock, cpu1));
92         lg_do_lock(per_cpu_ptr(lg->lock, cpu2));
93 }
94
95 void lg_double_unlock(struct lglock *lg, int cpu1, int cpu2)
96 {
97         lock_release(&lg->lock_dep_map, 1, _RET_IP_);
98         lg_do_unlock(per_cpu_ptr(lg->lock, cpu1));
99         lg_do_unlock(per_cpu_ptr(lg->lock, cpu2));
100         preempt_enable_nort();
101 }
102
103 void lg_global_lock(struct lglock *lg)
104 {
105         int i;
106
107         preempt_disable_nort();
108         lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
109         for_each_possible_cpu(i) {
110                 lg_lock_ptr *lock;
111                 lock = per_cpu_ptr(lg->lock, i);
112                 lg_do_lock(lock);
113         }
114 }
115 EXPORT_SYMBOL(lg_global_lock);
116
117 void lg_global_unlock(struct lglock *lg)
118 {
119         int i;
120
121         lock_release(&lg->lock_dep_map, 1, _RET_IP_);
122         for_each_possible_cpu(i) {
123                 lg_lock_ptr *lock;
124                 lock = per_cpu_ptr(lg->lock, i);
125                 lg_do_unlock(lock);
126         }
127         preempt_enable_nort();
128 }
129 EXPORT_SYMBOL(lg_global_unlock);
130
131 #ifdef CONFIG_PREEMPT_RT_FULL
132 /*
133  * HACK: If you use this, you get to keep the pieces.
134  * Used in queue_stop_cpus_work() when stop machinery
135  * is called from inactive CPU, so we can't schedule.
136  */
137 # define lg_do_trylock_relax(l)                 \
138         do {                                    \
139                 while (!__rt_spin_trylock(l))   \
140                         cpu_relax();            \
141         } while (0)
142
143 void lg_global_trylock_relax(struct lglock *lg)
144 {
145         int i;
146
147         lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
148         for_each_possible_cpu(i) {
149                 lg_lock_ptr *lock;
150                 lock = per_cpu_ptr(lg->lock, i);
151                 lg_do_trylock_relax(lock);
152         }
153 }
154 #endif