Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / fs / btrfs / async-thread.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  * Copyright (C) 2014 Fujitsu.  All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public
7  * License v2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public
15  * License along with this program; if not, write to the
16  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17  * Boston, MA 021110-1307, USA.
18  */
19
20 #include <linux/kthread.h>
21 #include <linux/slab.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/freezer.h>
25 #include "async-thread.h"
26 #include "ctree.h"
27
28 #define WORK_DONE_BIT 0
29 #define WORK_ORDER_DONE_BIT 1
30 #define WORK_HIGH_PRIO_BIT 2
31
32 #define NO_THRESHOLD (-1)
33 #define DFT_THRESHOLD (32)
34
35 struct __btrfs_workqueue {
36         struct workqueue_struct *normal_wq;
37         /* List head pointing to ordered work list */
38         struct list_head ordered_list;
39
40         /* Spinlock for ordered_list */
41         spinlock_t list_lock;
42
43         /* Thresholding related variants */
44         atomic_t pending;
45         int max_active;
46         int current_max;
47         int thresh;
48         unsigned int count;
49         spinlock_t thres_lock;
50 };
51
52 struct btrfs_workqueue {
53         struct __btrfs_workqueue *normal;
54         struct __btrfs_workqueue *high;
55 };
56
57 static void normal_work_helper(struct btrfs_work *work);
58
59 #define BTRFS_WORK_HELPER(name)                                 \
60 void btrfs_##name(struct work_struct *arg)                              \
61 {                                                                       \
62         struct btrfs_work *work = container_of(arg, struct btrfs_work,  \
63                                                normal_work);            \
64         normal_work_helper(work);                                       \
65 }
66
67 BTRFS_WORK_HELPER(worker_helper);
68 BTRFS_WORK_HELPER(delalloc_helper);
69 BTRFS_WORK_HELPER(flush_delalloc_helper);
70 BTRFS_WORK_HELPER(cache_helper);
71 BTRFS_WORK_HELPER(submit_helper);
72 BTRFS_WORK_HELPER(fixup_helper);
73 BTRFS_WORK_HELPER(endio_helper);
74 BTRFS_WORK_HELPER(endio_meta_helper);
75 BTRFS_WORK_HELPER(endio_meta_write_helper);
76 BTRFS_WORK_HELPER(endio_raid56_helper);
77 BTRFS_WORK_HELPER(endio_repair_helper);
78 BTRFS_WORK_HELPER(rmw_helper);
79 BTRFS_WORK_HELPER(endio_write_helper);
80 BTRFS_WORK_HELPER(freespace_write_helper);
81 BTRFS_WORK_HELPER(delayed_meta_helper);
82 BTRFS_WORK_HELPER(readahead_helper);
83 BTRFS_WORK_HELPER(qgroup_rescan_helper);
84 BTRFS_WORK_HELPER(extent_refs_helper);
85 BTRFS_WORK_HELPER(scrub_helper);
86 BTRFS_WORK_HELPER(scrubwrc_helper);
87 BTRFS_WORK_HELPER(scrubnc_helper);
88
89 static struct __btrfs_workqueue *
90 __btrfs_alloc_workqueue(const char *name, unsigned int flags, int max_active,
91                          int thresh)
92 {
93         struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
94
95         if (!ret)
96                 return NULL;
97
98         ret->max_active = max_active;
99         atomic_set(&ret->pending, 0);
100         if (thresh == 0)
101                 thresh = DFT_THRESHOLD;
102         /* For low threshold, disabling threshold is a better choice */
103         if (thresh < DFT_THRESHOLD) {
104                 ret->current_max = max_active;
105                 ret->thresh = NO_THRESHOLD;
106         } else {
107                 ret->current_max = 1;
108                 ret->thresh = thresh;
109         }
110
111         if (flags & WQ_HIGHPRI)
112                 ret->normal_wq = alloc_workqueue("%s-%s-high", flags,
113                                                  ret->max_active,
114                                                  "btrfs", name);
115         else
116                 ret->normal_wq = alloc_workqueue("%s-%s", flags,
117                                                  ret->max_active, "btrfs",
118                                                  name);
119         if (!ret->normal_wq) {
120                 kfree(ret);
121                 return NULL;
122         }
123
124         INIT_LIST_HEAD(&ret->ordered_list);
125         spin_lock_init(&ret->list_lock);
126         spin_lock_init(&ret->thres_lock);
127         trace_btrfs_workqueue_alloc(ret, name, flags & WQ_HIGHPRI);
128         return ret;
129 }
130
131 static inline void
132 __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq);
133
134 struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
135                                               unsigned int flags,
136                                               int max_active,
137                                               int thresh)
138 {
139         struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
140
141         if (!ret)
142                 return NULL;
143
144         ret->normal = __btrfs_alloc_workqueue(name, flags & ~WQ_HIGHPRI,
145                                               max_active, thresh);
146         if (!ret->normal) {
147                 kfree(ret);
148                 return NULL;
149         }
150
151         if (flags & WQ_HIGHPRI) {
152                 ret->high = __btrfs_alloc_workqueue(name, flags, max_active,
153                                                     thresh);
154                 if (!ret->high) {
155                         __btrfs_destroy_workqueue(ret->normal);
156                         kfree(ret);
157                         return NULL;
158                 }
159         }
160         return ret;
161 }
162
163 /*
164  * Hook for threshold which will be called in btrfs_queue_work.
165  * This hook WILL be called in IRQ handler context,
166  * so workqueue_set_max_active MUST NOT be called in this hook
167  */
168 static inline void thresh_queue_hook(struct __btrfs_workqueue *wq)
169 {
170         if (wq->thresh == NO_THRESHOLD)
171                 return;
172         atomic_inc(&wq->pending);
173 }
174
175 /*
176  * Hook for threshold which will be called before executing the work,
177  * This hook is called in kthread content.
178  * So workqueue_set_max_active is called here.
179  */
180 static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
181 {
182         int new_max_active;
183         long pending;
184         int need_change = 0;
185
186         if (wq->thresh == NO_THRESHOLD)
187                 return;
188
189         atomic_dec(&wq->pending);
190         spin_lock(&wq->thres_lock);
191         /*
192          * Use wq->count to limit the calling frequency of
193          * workqueue_set_max_active.
194          */
195         wq->count++;
196         wq->count %= (wq->thresh / 4);
197         if (!wq->count)
198                 goto  out;
199         new_max_active = wq->current_max;
200
201         /*
202          * pending may be changed later, but it's OK since we really
203          * don't need it so accurate to calculate new_max_active.
204          */
205         pending = atomic_read(&wq->pending);
206         if (pending > wq->thresh)
207                 new_max_active++;
208         if (pending < wq->thresh / 2)
209                 new_max_active--;
210         new_max_active = clamp_val(new_max_active, 1, wq->max_active);
211         if (new_max_active != wq->current_max)  {
212                 need_change = 1;
213                 wq->current_max = new_max_active;
214         }
215 out:
216         spin_unlock(&wq->thres_lock);
217
218         if (need_change) {
219                 workqueue_set_max_active(wq->normal_wq, wq->current_max);
220         }
221 }
222
223 static void run_ordered_work(struct __btrfs_workqueue *wq)
224 {
225         struct list_head *list = &wq->ordered_list;
226         struct btrfs_work *work;
227         spinlock_t *lock = &wq->list_lock;
228         unsigned long flags;
229
230         while (1) {
231                 spin_lock_irqsave(lock, flags);
232                 if (list_empty(list))
233                         break;
234                 work = list_entry(list->next, struct btrfs_work,
235                                   ordered_list);
236                 if (!test_bit(WORK_DONE_BIT, &work->flags))
237                         break;
238
239                 /*
240                  * we are going to call the ordered done function, but
241                  * we leave the work item on the list as a barrier so
242                  * that later work items that are done don't have their
243                  * functions called before this one returns
244                  */
245                 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
246                         break;
247                 trace_btrfs_ordered_sched(work);
248                 spin_unlock_irqrestore(lock, flags);
249                 work->ordered_func(work);
250
251                 /* now take the lock again and drop our item from the list */
252                 spin_lock_irqsave(lock, flags);
253                 list_del(&work->ordered_list);
254                 spin_unlock_irqrestore(lock, flags);
255
256                 /*
257                  * we don't want to call the ordered free functions
258                  * with the lock held though
259                  */
260                 work->ordered_free(work);
261                 trace_btrfs_all_work_done(work);
262         }
263         spin_unlock_irqrestore(lock, flags);
264 }
265
266 static void normal_work_helper(struct btrfs_work *work)
267 {
268         struct __btrfs_workqueue *wq;
269         int need_order = 0;
270
271         /*
272          * We should not touch things inside work in the following cases:
273          * 1) after work->func() if it has no ordered_free
274          *    Since the struct is freed in work->func().
275          * 2) after setting WORK_DONE_BIT
276          *    The work may be freed in other threads almost instantly.
277          * So we save the needed things here.
278          */
279         if (work->ordered_func)
280                 need_order = 1;
281         wq = work->wq;
282
283         trace_btrfs_work_sched(work);
284         thresh_exec_hook(wq);
285         work->func(work);
286         if (need_order) {
287                 set_bit(WORK_DONE_BIT, &work->flags);
288                 run_ordered_work(wq);
289         }
290         if (!need_order)
291                 trace_btrfs_all_work_done(work);
292 }
293
294 void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func,
295                      btrfs_func_t func,
296                      btrfs_func_t ordered_func,
297                      btrfs_func_t ordered_free)
298 {
299         work->func = func;
300         work->ordered_func = ordered_func;
301         work->ordered_free = ordered_free;
302         INIT_WORK(&work->normal_work, uniq_func);
303         INIT_LIST_HEAD(&work->ordered_list);
304         work->flags = 0;
305 }
306
307 static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq,
308                                       struct btrfs_work *work)
309 {
310         unsigned long flags;
311
312         work->wq = wq;
313         thresh_queue_hook(wq);
314         if (work->ordered_func) {
315                 spin_lock_irqsave(&wq->list_lock, flags);
316                 list_add_tail(&work->ordered_list, &wq->ordered_list);
317                 spin_unlock_irqrestore(&wq->list_lock, flags);
318         }
319         queue_work(wq->normal_wq, &work->normal_work);
320         trace_btrfs_work_queued(work);
321 }
322
323 void btrfs_queue_work(struct btrfs_workqueue *wq,
324                       struct btrfs_work *work)
325 {
326         struct __btrfs_workqueue *dest_wq;
327
328         if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high)
329                 dest_wq = wq->high;
330         else
331                 dest_wq = wq->normal;
332         __btrfs_queue_work(dest_wq, work);
333 }
334
335 static inline void
336 __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq)
337 {
338         destroy_workqueue(wq->normal_wq);
339         trace_btrfs_workqueue_destroy(wq);
340         kfree(wq);
341 }
342
343 void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
344 {
345         if (!wq)
346                 return;
347         if (wq->high)
348                 __btrfs_destroy_workqueue(wq->high);
349         __btrfs_destroy_workqueue(wq->normal);
350         kfree(wq);
351 }
352
353 void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max)
354 {
355         if (!wq)
356                 return;
357         wq->normal->max_active = max;
358         if (wq->high)
359                 wq->high->max_active = max;
360 }
361
362 void btrfs_set_work_high_priority(struct btrfs_work *work)
363 {
364         set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
365 }