Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / fs / btrfs / locking.c
1 /*
2  * Copyright (C) 2008 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/spinlock.h>
21 #include <linux/page-flags.h>
22 #include <asm/bug.h>
23 #include "ctree.h"
24 #include "extent_io.h"
25 #include "locking.h"
26
27 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
28
29 /*
30  * if we currently have a spinning reader or writer lock
31  * (indicated by the rw flag) this will bump the count
32  * of blocking holders and drop the spinlock.
33  */
34 void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
35 {
36         /*
37          * no lock is required.  The lock owner may change if
38          * we have a read lock, but it won't change to or away
39          * from us.  If we have the write lock, we are the owner
40          * and it'll never change.
41          */
42         if (eb->lock_nested && current->pid == eb->lock_owner)
43                 return;
44         if (rw == BTRFS_WRITE_LOCK) {
45                 if (atomic_read(&eb->blocking_writers) == 0) {
46                         WARN_ON(atomic_read(&eb->spinning_writers) != 1);
47                         atomic_dec(&eb->spinning_writers);
48                         btrfs_assert_tree_locked(eb);
49                         atomic_inc(&eb->blocking_writers);
50                         write_unlock(&eb->lock);
51                 }
52         } else if (rw == BTRFS_READ_LOCK) {
53                 btrfs_assert_tree_read_locked(eb);
54                 atomic_inc(&eb->blocking_readers);
55                 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
56                 atomic_dec(&eb->spinning_readers);
57                 read_unlock(&eb->lock);
58         }
59         return;
60 }
61
62 /*
63  * if we currently have a blocking lock, take the spinlock
64  * and drop our blocking count
65  */
66 void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
67 {
68         /*
69          * no lock is required.  The lock owner may change if
70          * we have a read lock, but it won't change to or away
71          * from us.  If we have the write lock, we are the owner
72          * and it'll never change.
73          */
74         if (eb->lock_nested && current->pid == eb->lock_owner)
75                 return;
76
77         if (rw == BTRFS_WRITE_LOCK_BLOCKING) {
78                 BUG_ON(atomic_read(&eb->blocking_writers) != 1);
79                 write_lock(&eb->lock);
80                 WARN_ON(atomic_read(&eb->spinning_writers));
81                 atomic_inc(&eb->spinning_writers);
82                 if (atomic_dec_and_test(&eb->blocking_writers) &&
83                     waitqueue_active(&eb->write_lock_wq))
84                         wake_up(&eb->write_lock_wq);
85         } else if (rw == BTRFS_READ_LOCK_BLOCKING) {
86                 BUG_ON(atomic_read(&eb->blocking_readers) == 0);
87                 read_lock(&eb->lock);
88                 atomic_inc(&eb->spinning_readers);
89                 if (atomic_dec_and_test(&eb->blocking_readers) &&
90                     waitqueue_active(&eb->read_lock_wq))
91                         wake_up(&eb->read_lock_wq);
92         }
93         return;
94 }
95
96 /*
97  * take a spinning read lock.  This will wait for any blocking
98  * writers
99  */
100 void btrfs_tree_read_lock(struct extent_buffer *eb)
101 {
102 again:
103         BUG_ON(!atomic_read(&eb->blocking_writers) &&
104                current->pid == eb->lock_owner);
105
106         read_lock(&eb->lock);
107         if (atomic_read(&eb->blocking_writers) &&
108             current->pid == eb->lock_owner) {
109                 /*
110                  * This extent is already write-locked by our thread. We allow
111                  * an additional read lock to be added because it's for the same
112                  * thread. btrfs_find_all_roots() depends on this as it may be
113                  * called on a partly (write-)locked tree.
114                  */
115                 BUG_ON(eb->lock_nested);
116                 eb->lock_nested = 1;
117                 read_unlock(&eb->lock);
118                 return;
119         }
120         if (atomic_read(&eb->blocking_writers)) {
121                 read_unlock(&eb->lock);
122                 wait_event(eb->write_lock_wq,
123                            atomic_read(&eb->blocking_writers) == 0);
124                 goto again;
125         }
126         atomic_inc(&eb->read_locks);
127         atomic_inc(&eb->spinning_readers);
128 }
129
130 /*
131  * take a spinning read lock.
132  * returns 1 if we get the read lock and 0 if we don't
133  * this won't wait for blocking writers
134  */
135 int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
136 {
137         if (atomic_read(&eb->blocking_writers))
138                 return 0;
139
140         read_lock(&eb->lock);
141         if (atomic_read(&eb->blocking_writers)) {
142                 read_unlock(&eb->lock);
143                 return 0;
144         }
145         atomic_inc(&eb->read_locks);
146         atomic_inc(&eb->spinning_readers);
147         return 1;
148 }
149
150 /*
151  * returns 1 if we get the read lock and 0 if we don't
152  * this won't wait for blocking writers
153  */
154 int btrfs_try_tree_read_lock(struct extent_buffer *eb)
155 {
156         if (atomic_read(&eb->blocking_writers))
157                 return 0;
158
159         if (!read_trylock(&eb->lock))
160                 return 0;
161
162         if (atomic_read(&eb->blocking_writers)) {
163                 read_unlock(&eb->lock);
164                 return 0;
165         }
166         atomic_inc(&eb->read_locks);
167         atomic_inc(&eb->spinning_readers);
168         return 1;
169 }
170
171 /*
172  * returns 1 if we get the read lock and 0 if we don't
173  * this won't wait for blocking writers or readers
174  */
175 int btrfs_try_tree_write_lock(struct extent_buffer *eb)
176 {
177         if (atomic_read(&eb->blocking_writers) ||
178             atomic_read(&eb->blocking_readers))
179                 return 0;
180
181         write_lock(&eb->lock);
182         if (atomic_read(&eb->blocking_writers) ||
183             atomic_read(&eb->blocking_readers)) {
184                 write_unlock(&eb->lock);
185                 return 0;
186         }
187         atomic_inc(&eb->write_locks);
188         atomic_inc(&eb->spinning_writers);
189         eb->lock_owner = current->pid;
190         return 1;
191 }
192
193 /*
194  * drop a spinning read lock
195  */
196 void btrfs_tree_read_unlock(struct extent_buffer *eb)
197 {
198         /*
199          * if we're nested, we have the write lock.  No new locking
200          * is needed as long as we are the lock owner.
201          * The write unlock will do a barrier for us, and the lock_nested
202          * field only matters to the lock owner.
203          */
204         if (eb->lock_nested && current->pid == eb->lock_owner) {
205                 eb->lock_nested = 0;
206                 return;
207         }
208         btrfs_assert_tree_read_locked(eb);
209         WARN_ON(atomic_read(&eb->spinning_readers) == 0);
210         atomic_dec(&eb->spinning_readers);
211         atomic_dec(&eb->read_locks);
212         read_unlock(&eb->lock);
213 }
214
215 /*
216  * drop a blocking read lock
217  */
218 void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
219 {
220         /*
221          * if we're nested, we have the write lock.  No new locking
222          * is needed as long as we are the lock owner.
223          * The write unlock will do a barrier for us, and the lock_nested
224          * field only matters to the lock owner.
225          */
226         if (eb->lock_nested && current->pid == eb->lock_owner) {
227                 eb->lock_nested = 0;
228                 return;
229         }
230         btrfs_assert_tree_read_locked(eb);
231         WARN_ON(atomic_read(&eb->blocking_readers) == 0);
232         if (atomic_dec_and_test(&eb->blocking_readers) &&
233             waitqueue_active(&eb->read_lock_wq))
234                 wake_up(&eb->read_lock_wq);
235         atomic_dec(&eb->read_locks);
236 }
237
238 /*
239  * take a spinning write lock.  This will wait for both
240  * blocking readers or writers
241  */
242 void btrfs_tree_lock(struct extent_buffer *eb)
243 {
244 again:
245         wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
246         wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
247         write_lock(&eb->lock);
248         if (atomic_read(&eb->blocking_readers)) {
249                 write_unlock(&eb->lock);
250                 wait_event(eb->read_lock_wq,
251                            atomic_read(&eb->blocking_readers) == 0);
252                 goto again;
253         }
254         if (atomic_read(&eb->blocking_writers)) {
255                 write_unlock(&eb->lock);
256                 wait_event(eb->write_lock_wq,
257                            atomic_read(&eb->blocking_writers) == 0);
258                 goto again;
259         }
260         WARN_ON(atomic_read(&eb->spinning_writers));
261         atomic_inc(&eb->spinning_writers);
262         atomic_inc(&eb->write_locks);
263         eb->lock_owner = current->pid;
264 }
265
266 /*
267  * drop a spinning or a blocking write lock.
268  */
269 void btrfs_tree_unlock(struct extent_buffer *eb)
270 {
271         int blockers = atomic_read(&eb->blocking_writers);
272
273         BUG_ON(blockers > 1);
274
275         btrfs_assert_tree_locked(eb);
276         eb->lock_owner = 0;
277         atomic_dec(&eb->write_locks);
278
279         if (blockers) {
280                 WARN_ON(atomic_read(&eb->spinning_writers));
281                 atomic_dec(&eb->blocking_writers);
282                 smp_mb();
283                 if (waitqueue_active(&eb->write_lock_wq))
284                         wake_up(&eb->write_lock_wq);
285         } else {
286                 WARN_ON(atomic_read(&eb->spinning_writers) != 1);
287                 atomic_dec(&eb->spinning_writers);
288                 write_unlock(&eb->lock);
289         }
290 }
291
292 void btrfs_assert_tree_locked(struct extent_buffer *eb)
293 {
294         BUG_ON(!atomic_read(&eb->write_locks));
295 }
296
297 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
298 {
299         BUG_ON(!atomic_read(&eb->read_locks));
300 }