Upgrade to 4.4.50-rt62
[kvmfornfv.git] / kernel / fs / btrfs / async-thread.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  * Copyright (C) 2014 Fujitsu.  All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public
7  * License v2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public
15  * License along with this program; if not, write to the
16  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17  * Boston, MA 021110-1307, USA.
18  */
19
20 #include <linux/kthread.h>
21 #include <linux/slab.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/freezer.h>
25 #include "async-thread.h"
26 #include "ctree.h"
27
28 #define WORK_DONE_BIT 0
29 #define WORK_ORDER_DONE_BIT 1
30 #define WORK_HIGH_PRIO_BIT 2
31
32 #define NO_THRESHOLD (-1)
33 #define DFT_THRESHOLD (32)
34
35 struct __btrfs_workqueue {
36         struct workqueue_struct *normal_wq;
37         /* List head pointing to ordered work list */
38         struct list_head ordered_list;
39
40         /* Spinlock for ordered_list */
41         spinlock_t list_lock;
42
43         /* Thresholding related variants */
44         atomic_t pending;
45
46         /* Up limit of concurrency workers */
47         int limit_active;
48
49         /* Current number of concurrency workers */
50         int current_active;
51
52         /* Threshold to change current_active */
53         int thresh;
54         unsigned int count;
55         spinlock_t thres_lock;
56 };
57
58 struct btrfs_workqueue {
59         struct __btrfs_workqueue *normal;
60         struct __btrfs_workqueue *high;
61 };
62
63 static void normal_work_helper(struct btrfs_work *work);
64
65 #define BTRFS_WORK_HELPER(name)                                 \
66 void btrfs_##name(struct work_struct *arg)                              \
67 {                                                                       \
68         struct btrfs_work *work = container_of(arg, struct btrfs_work,  \
69                                                normal_work);            \
70         normal_work_helper(work);                                       \
71 }
72
73 bool btrfs_workqueue_normal_congested(struct btrfs_workqueue *wq)
74 {
75         /*
76          * We could compare wq->normal->pending with num_online_cpus()
77          * to support "thresh == NO_THRESHOLD" case, but it requires
78          * moving up atomic_inc/dec in thresh_queue/exec_hook. Let's
79          * postpone it until someone needs the support of that case.
80          */
81         if (wq->normal->thresh == NO_THRESHOLD)
82                 return false;
83
84         return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2;
85 }
86
87 BTRFS_WORK_HELPER(worker_helper);
88 BTRFS_WORK_HELPER(delalloc_helper);
89 BTRFS_WORK_HELPER(flush_delalloc_helper);
90 BTRFS_WORK_HELPER(cache_helper);
91 BTRFS_WORK_HELPER(submit_helper);
92 BTRFS_WORK_HELPER(fixup_helper);
93 BTRFS_WORK_HELPER(endio_helper);
94 BTRFS_WORK_HELPER(endio_meta_helper);
95 BTRFS_WORK_HELPER(endio_meta_write_helper);
96 BTRFS_WORK_HELPER(endio_raid56_helper);
97 BTRFS_WORK_HELPER(endio_repair_helper);
98 BTRFS_WORK_HELPER(rmw_helper);
99 BTRFS_WORK_HELPER(endio_write_helper);
100 BTRFS_WORK_HELPER(freespace_write_helper);
101 BTRFS_WORK_HELPER(delayed_meta_helper);
102 BTRFS_WORK_HELPER(readahead_helper);
103 BTRFS_WORK_HELPER(qgroup_rescan_helper);
104 BTRFS_WORK_HELPER(extent_refs_helper);
105 BTRFS_WORK_HELPER(scrub_helper);
106 BTRFS_WORK_HELPER(scrubwrc_helper);
107 BTRFS_WORK_HELPER(scrubnc_helper);
108 BTRFS_WORK_HELPER(scrubparity_helper);
109
110 static struct __btrfs_workqueue *
111 __btrfs_alloc_workqueue(const char *name, unsigned int flags, int limit_active,
112                          int thresh)
113 {
114         struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
115
116         if (!ret)
117                 return NULL;
118
119         ret->limit_active = limit_active;
120         atomic_set(&ret->pending, 0);
121         if (thresh == 0)
122                 thresh = DFT_THRESHOLD;
123         /* For low threshold, disabling threshold is a better choice */
124         if (thresh < DFT_THRESHOLD) {
125                 ret->current_active = limit_active;
126                 ret->thresh = NO_THRESHOLD;
127         } else {
128                 /*
129                  * For threshold-able wq, let its concurrency grow on demand.
130                  * Use minimal max_active at alloc time to reduce resource
131                  * usage.
132                  */
133                 ret->current_active = 1;
134                 ret->thresh = thresh;
135         }
136
137         if (flags & WQ_HIGHPRI)
138                 ret->normal_wq = alloc_workqueue("%s-%s-high", flags,
139                                                  ret->current_active, "btrfs",
140                                                  name);
141         else
142                 ret->normal_wq = alloc_workqueue("%s-%s", flags,
143                                                  ret->current_active, "btrfs",
144                                                  name);
145         if (!ret->normal_wq) {
146                 kfree(ret);
147                 return NULL;
148         }
149
150         INIT_LIST_HEAD(&ret->ordered_list);
151         spin_lock_init(&ret->list_lock);
152         spin_lock_init(&ret->thres_lock);
153         trace_btrfs_workqueue_alloc(ret, name, flags & WQ_HIGHPRI);
154         return ret;
155 }
156
157 static inline void
158 __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq);
159
160 struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
161                                               unsigned int flags,
162                                               int limit_active,
163                                               int thresh)
164 {
165         struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
166
167         if (!ret)
168                 return NULL;
169
170         ret->normal = __btrfs_alloc_workqueue(name, flags & ~WQ_HIGHPRI,
171                                               limit_active, thresh);
172         if (!ret->normal) {
173                 kfree(ret);
174                 return NULL;
175         }
176
177         if (flags & WQ_HIGHPRI) {
178                 ret->high = __btrfs_alloc_workqueue(name, flags, limit_active,
179                                                     thresh);
180                 if (!ret->high) {
181                         __btrfs_destroy_workqueue(ret->normal);
182                         kfree(ret);
183                         return NULL;
184                 }
185         }
186         return ret;
187 }
188
189 /*
190  * Hook for threshold which will be called in btrfs_queue_work.
191  * This hook WILL be called in IRQ handler context,
192  * so workqueue_set_max_active MUST NOT be called in this hook
193  */
194 static inline void thresh_queue_hook(struct __btrfs_workqueue *wq)
195 {
196         if (wq->thresh == NO_THRESHOLD)
197                 return;
198         atomic_inc(&wq->pending);
199 }
200
201 /*
202  * Hook for threshold which will be called before executing the work,
203  * This hook is called in kthread content.
204  * So workqueue_set_max_active is called here.
205  */
206 static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
207 {
208         int new_current_active;
209         long pending;
210         int need_change = 0;
211
212         if (wq->thresh == NO_THRESHOLD)
213                 return;
214
215         atomic_dec(&wq->pending);
216         spin_lock(&wq->thres_lock);
217         /*
218          * Use wq->count to limit the calling frequency of
219          * workqueue_set_max_active.
220          */
221         wq->count++;
222         wq->count %= (wq->thresh / 4);
223         if (!wq->count)
224                 goto  out;
225         new_current_active = wq->current_active;
226
227         /*
228          * pending may be changed later, but it's OK since we really
229          * don't need it so accurate to calculate new_max_active.
230          */
231         pending = atomic_read(&wq->pending);
232         if (pending > wq->thresh)
233                 new_current_active++;
234         if (pending < wq->thresh / 2)
235                 new_current_active--;
236         new_current_active = clamp_val(new_current_active, 1, wq->limit_active);
237         if (new_current_active != wq->current_active)  {
238                 need_change = 1;
239                 wq->current_active = new_current_active;
240         }
241 out:
242         spin_unlock(&wq->thres_lock);
243
244         if (need_change) {
245                 workqueue_set_max_active(wq->normal_wq, wq->current_active);
246         }
247 }
248
249 static void run_ordered_work(struct __btrfs_workqueue *wq)
250 {
251         struct list_head *list = &wq->ordered_list;
252         struct btrfs_work *work;
253         spinlock_t *lock = &wq->list_lock;
254         unsigned long flags;
255
256         while (1) {
257                 spin_lock_irqsave(lock, flags);
258                 if (list_empty(list))
259                         break;
260                 work = list_entry(list->next, struct btrfs_work,
261                                   ordered_list);
262                 if (!test_bit(WORK_DONE_BIT, &work->flags))
263                         break;
264
265                 /*
266                  * we are going to call the ordered done function, but
267                  * we leave the work item on the list as a barrier so
268                  * that later work items that are done don't have their
269                  * functions called before this one returns
270                  */
271                 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
272                         break;
273                 trace_btrfs_ordered_sched(work);
274                 spin_unlock_irqrestore(lock, flags);
275                 work->ordered_func(work);
276
277                 /* now take the lock again and drop our item from the list */
278                 spin_lock_irqsave(lock, flags);
279                 list_del(&work->ordered_list);
280                 spin_unlock_irqrestore(lock, flags);
281
282                 /*
283                  * we don't want to call the ordered free functions
284                  * with the lock held though
285                  */
286                 work->ordered_free(work);
287                 trace_btrfs_all_work_done(work);
288         }
289         spin_unlock_irqrestore(lock, flags);
290 }
291
292 static void normal_work_helper(struct btrfs_work *work)
293 {
294         struct __btrfs_workqueue *wq;
295         int need_order = 0;
296
297         /*
298          * We should not touch things inside work in the following cases:
299          * 1) after work->func() if it has no ordered_free
300          *    Since the struct is freed in work->func().
301          * 2) after setting WORK_DONE_BIT
302          *    The work may be freed in other threads almost instantly.
303          * So we save the needed things here.
304          */
305         if (work->ordered_func)
306                 need_order = 1;
307         wq = work->wq;
308
309         trace_btrfs_work_sched(work);
310         thresh_exec_hook(wq);
311         work->func(work);
312         if (need_order) {
313                 set_bit(WORK_DONE_BIT, &work->flags);
314                 run_ordered_work(wq);
315         }
316         if (!need_order)
317                 trace_btrfs_all_work_done(work);
318 }
319
320 void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func,
321                      btrfs_func_t func,
322                      btrfs_func_t ordered_func,
323                      btrfs_func_t ordered_free)
324 {
325         work->func = func;
326         work->ordered_func = ordered_func;
327         work->ordered_free = ordered_free;
328         INIT_WORK(&work->normal_work, uniq_func);
329         INIT_LIST_HEAD(&work->ordered_list);
330         work->flags = 0;
331 }
332
333 static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq,
334                                       struct btrfs_work *work)
335 {
336         unsigned long flags;
337
338         work->wq = wq;
339         thresh_queue_hook(wq);
340         if (work->ordered_func) {
341                 spin_lock_irqsave(&wq->list_lock, flags);
342                 list_add_tail(&work->ordered_list, &wq->ordered_list);
343                 spin_unlock_irqrestore(&wq->list_lock, flags);
344         }
345         trace_btrfs_work_queued(work);
346         queue_work(wq->normal_wq, &work->normal_work);
347 }
348
349 void btrfs_queue_work(struct btrfs_workqueue *wq,
350                       struct btrfs_work *work)
351 {
352         struct __btrfs_workqueue *dest_wq;
353
354         if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high)
355                 dest_wq = wq->high;
356         else
357                 dest_wq = wq->normal;
358         __btrfs_queue_work(dest_wq, work);
359 }
360
361 static inline void
362 __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq)
363 {
364         destroy_workqueue(wq->normal_wq);
365         trace_btrfs_workqueue_destroy(wq);
366         kfree(wq);
367 }
368
369 void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
370 {
371         if (!wq)
372                 return;
373         if (wq->high)
374                 __btrfs_destroy_workqueue(wq->high);
375         __btrfs_destroy_workqueue(wq->normal);
376         kfree(wq);
377 }
378
379 void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active)
380 {
381         if (!wq)
382                 return;
383         wq->normal->limit_active = limit_active;
384         if (wq->high)
385                 wq->high->limit_active = limit_active;
386 }
387
388 void btrfs_set_work_high_priority(struct btrfs_work *work)
389 {
390         set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
391 }