Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / fs / f2fs / gc.c
1 /*
2  * fs/f2fs/gc.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/module.h>
13 #include <linux/backing-dev.h>
14 #include <linux/init.h>
15 #include <linux/f2fs_fs.h>
16 #include <linux/kthread.h>
17 #include <linux/delay.h>
18 #include <linux/freezer.h>
19 #include <linux/blkdev.h>
20
21 #include "f2fs.h"
22 #include "node.h"
23 #include "segment.h"
24 #include "gc.h"
25 #include <trace/events/f2fs.h>
26
27 static int gc_thread_func(void *data)
28 {
29         struct f2fs_sb_info *sbi = data;
30         struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
31         wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
32         long wait_ms;
33
34         wait_ms = gc_th->min_sleep_time;
35
36         do {
37                 if (try_to_freeze())
38                         continue;
39                 else
40                         wait_event_interruptible_timeout(*wq,
41                                                 kthread_should_stop(),
42                                                 msecs_to_jiffies(wait_ms));
43                 if (kthread_should_stop())
44                         break;
45
46                 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
47                         increase_sleep_time(gc_th, &wait_ms);
48                         continue;
49                 }
50
51                 /*
52                  * [GC triggering condition]
53                  * 0. GC is not conducted currently.
54                  * 1. There are enough dirty segments.
55                  * 2. IO subsystem is idle by checking the # of writeback pages.
56                  * 3. IO subsystem is idle by checking the # of requests in
57                  *    bdev's request list.
58                  *
59                  * Note) We have to avoid triggering GCs frequently.
60                  * Because it is possible that some segments can be
61                  * invalidated soon after by user update or deletion.
62                  * So, I'd like to wait some time to collect dirty segments.
63                  */
64                 if (!mutex_trylock(&sbi->gc_mutex))
65                         continue;
66
67                 if (!is_idle(sbi)) {
68                         increase_sleep_time(gc_th, &wait_ms);
69                         mutex_unlock(&sbi->gc_mutex);
70                         continue;
71                 }
72
73                 if (has_enough_invalid_blocks(sbi))
74                         decrease_sleep_time(gc_th, &wait_ms);
75                 else
76                         increase_sleep_time(gc_th, &wait_ms);
77
78                 stat_inc_bggc_count(sbi);
79
80                 /* if return value is not zero, no victim was selected */
81                 if (f2fs_gc(sbi))
82                         wait_ms = gc_th->no_gc_sleep_time;
83
84                 /* balancing f2fs's metadata periodically */
85                 f2fs_balance_fs_bg(sbi);
86
87         } while (!kthread_should_stop());
88         return 0;
89 }
90
91 int start_gc_thread(struct f2fs_sb_info *sbi)
92 {
93         struct f2fs_gc_kthread *gc_th;
94         dev_t dev = sbi->sb->s_bdev->bd_dev;
95         int err = 0;
96
97         gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
98         if (!gc_th) {
99                 err = -ENOMEM;
100                 goto out;
101         }
102
103         gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
104         gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
105         gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
106
107         gc_th->gc_idle = 0;
108
109         sbi->gc_thread = gc_th;
110         init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
111         sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
112                         "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
113         if (IS_ERR(gc_th->f2fs_gc_task)) {
114                 err = PTR_ERR(gc_th->f2fs_gc_task);
115                 kfree(gc_th);
116                 sbi->gc_thread = NULL;
117         }
118 out:
119         return err;
120 }
121
122 void stop_gc_thread(struct f2fs_sb_info *sbi)
123 {
124         struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
125         if (!gc_th)
126                 return;
127         kthread_stop(gc_th->f2fs_gc_task);
128         kfree(gc_th);
129         sbi->gc_thread = NULL;
130 }
131
132 static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type)
133 {
134         int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
135
136         if (gc_th && gc_th->gc_idle) {
137                 if (gc_th->gc_idle == 1)
138                         gc_mode = GC_CB;
139                 else if (gc_th->gc_idle == 2)
140                         gc_mode = GC_GREEDY;
141         }
142         return gc_mode;
143 }
144
145 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
146                         int type, struct victim_sel_policy *p)
147 {
148         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
149
150         if (p->alloc_mode == SSR) {
151                 p->gc_mode = GC_GREEDY;
152                 p->dirty_segmap = dirty_i->dirty_segmap[type];
153                 p->max_search = dirty_i->nr_dirty[type];
154                 p->ofs_unit = 1;
155         } else {
156                 p->gc_mode = select_gc_type(sbi->gc_thread, gc_type);
157                 p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
158                 p->max_search = dirty_i->nr_dirty[DIRTY];
159                 p->ofs_unit = sbi->segs_per_sec;
160         }
161
162         if (p->max_search > sbi->max_victim_search)
163                 p->max_search = sbi->max_victim_search;
164
165         p->offset = sbi->last_victim[p->gc_mode];
166 }
167
168 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
169                                 struct victim_sel_policy *p)
170 {
171         /* SSR allocates in a segment unit */
172         if (p->alloc_mode == SSR)
173                 return 1 << sbi->log_blocks_per_seg;
174         if (p->gc_mode == GC_GREEDY)
175                 return (1 << sbi->log_blocks_per_seg) * p->ofs_unit;
176         else if (p->gc_mode == GC_CB)
177                 return UINT_MAX;
178         else /* No other gc_mode */
179                 return 0;
180 }
181
182 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
183 {
184         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
185         unsigned int secno;
186
187         /*
188          * If the gc_type is FG_GC, we can select victim segments
189          * selected by background GC before.
190          * Those segments guarantee they have small valid blocks.
191          */
192         for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
193                 if (sec_usage_check(sbi, secno))
194                         continue;
195                 clear_bit(secno, dirty_i->victim_secmap);
196                 return secno * sbi->segs_per_sec;
197         }
198         return NULL_SEGNO;
199 }
200
201 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
202 {
203         struct sit_info *sit_i = SIT_I(sbi);
204         unsigned int secno = GET_SECNO(sbi, segno);
205         unsigned int start = secno * sbi->segs_per_sec;
206         unsigned long long mtime = 0;
207         unsigned int vblocks;
208         unsigned char age = 0;
209         unsigned char u;
210         unsigned int i;
211
212         for (i = 0; i < sbi->segs_per_sec; i++)
213                 mtime += get_seg_entry(sbi, start + i)->mtime;
214         vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);
215
216         mtime = div_u64(mtime, sbi->segs_per_sec);
217         vblocks = div_u64(vblocks, sbi->segs_per_sec);
218
219         u = (vblocks * 100) >> sbi->log_blocks_per_seg;
220
221         /* Handle if the system time has changed by the user */
222         if (mtime < sit_i->min_mtime)
223                 sit_i->min_mtime = mtime;
224         if (mtime > sit_i->max_mtime)
225                 sit_i->max_mtime = mtime;
226         if (sit_i->max_mtime != sit_i->min_mtime)
227                 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
228                                 sit_i->max_mtime - sit_i->min_mtime);
229
230         return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
231 }
232
233 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
234                         unsigned int segno, struct victim_sel_policy *p)
235 {
236         if (p->alloc_mode == SSR)
237                 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
238
239         /* alloc_mode == LFS */
240         if (p->gc_mode == GC_GREEDY)
241                 return get_valid_blocks(sbi, segno, sbi->segs_per_sec);
242         else
243                 return get_cb_cost(sbi, segno);
244 }
245
246 /*
247  * This function is called from two paths.
248  * One is garbage collection and the other is SSR segment selection.
249  * When it is called during GC, it just gets a victim segment
250  * and it does not remove it from dirty seglist.
251  * When it is called from SSR segment selection, it finds a segment
252  * which has minimum valid blocks and removes it from dirty seglist.
253  */
254 static int get_victim_by_default(struct f2fs_sb_info *sbi,
255                 unsigned int *result, int gc_type, int type, char alloc_mode)
256 {
257         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
258         struct victim_sel_policy p;
259         unsigned int secno, max_cost;
260         int nsearched = 0;
261
262         mutex_lock(&dirty_i->seglist_lock);
263
264         p.alloc_mode = alloc_mode;
265         select_policy(sbi, gc_type, type, &p);
266
267         p.min_segno = NULL_SEGNO;
268         p.min_cost = max_cost = get_max_cost(sbi, &p);
269
270         if (p.alloc_mode == LFS && gc_type == FG_GC) {
271                 p.min_segno = check_bg_victims(sbi);
272                 if (p.min_segno != NULL_SEGNO)
273                         goto got_it;
274         }
275
276         while (1) {
277                 unsigned long cost;
278                 unsigned int segno;
279
280                 segno = find_next_bit(p.dirty_segmap, MAIN_SEGS(sbi), p.offset);
281                 if (segno >= MAIN_SEGS(sbi)) {
282                         if (sbi->last_victim[p.gc_mode]) {
283                                 sbi->last_victim[p.gc_mode] = 0;
284                                 p.offset = 0;
285                                 continue;
286                         }
287                         break;
288                 }
289
290                 p.offset = segno + p.ofs_unit;
291                 if (p.ofs_unit > 1)
292                         p.offset -= segno % p.ofs_unit;
293
294                 secno = GET_SECNO(sbi, segno);
295
296                 if (sec_usage_check(sbi, secno))
297                         continue;
298                 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
299                         continue;
300
301                 cost = get_gc_cost(sbi, segno, &p);
302
303                 if (p.min_cost > cost) {
304                         p.min_segno = segno;
305                         p.min_cost = cost;
306                 } else if (unlikely(cost == max_cost)) {
307                         continue;
308                 }
309
310                 if (nsearched++ >= p.max_search) {
311                         sbi->last_victim[p.gc_mode] = segno;
312                         break;
313                 }
314         }
315         if (p.min_segno != NULL_SEGNO) {
316 got_it:
317                 if (p.alloc_mode == LFS) {
318                         secno = GET_SECNO(sbi, p.min_segno);
319                         if (gc_type == FG_GC)
320                                 sbi->cur_victim_sec = secno;
321                         else
322                                 set_bit(secno, dirty_i->victim_secmap);
323                 }
324                 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
325
326                 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
327                                 sbi->cur_victim_sec,
328                                 prefree_segments(sbi), free_segments(sbi));
329         }
330         mutex_unlock(&dirty_i->seglist_lock);
331
332         return (p.min_segno == NULL_SEGNO) ? 0 : 1;
333 }
334
335 static const struct victim_selection default_v_ops = {
336         .get_victim = get_victim_by_default,
337 };
338
339 static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
340 {
341         struct inode_entry *ie;
342
343         ie = radix_tree_lookup(&gc_list->iroot, ino);
344         if (ie)
345                 return ie->inode;
346         return NULL;
347 }
348
349 static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
350 {
351         struct inode_entry *new_ie;
352
353         if (inode == find_gc_inode(gc_list, inode->i_ino)) {
354                 iput(inode);
355                 return;
356         }
357         new_ie = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
358         new_ie->inode = inode;
359
360         f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
361         list_add_tail(&new_ie->list, &gc_list->ilist);
362 }
363
364 static void put_gc_inode(struct gc_inode_list *gc_list)
365 {
366         struct inode_entry *ie, *next_ie;
367         list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
368                 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
369                 iput(ie->inode);
370                 list_del(&ie->list);
371                 kmem_cache_free(inode_entry_slab, ie);
372         }
373 }
374
375 static int check_valid_map(struct f2fs_sb_info *sbi,
376                                 unsigned int segno, int offset)
377 {
378         struct sit_info *sit_i = SIT_I(sbi);
379         struct seg_entry *sentry;
380         int ret;
381
382         mutex_lock(&sit_i->sentry_lock);
383         sentry = get_seg_entry(sbi, segno);
384         ret = f2fs_test_bit(offset, sentry->cur_valid_map);
385         mutex_unlock(&sit_i->sentry_lock);
386         return ret;
387 }
388
389 /*
390  * This function compares node address got in summary with that in NAT.
391  * On validity, copy that node with cold status, otherwise (invalid node)
392  * ignore that.
393  */
394 static void gc_node_segment(struct f2fs_sb_info *sbi,
395                 struct f2fs_summary *sum, unsigned int segno, int gc_type)
396 {
397         bool initial = true;
398         struct f2fs_summary *entry;
399         int off;
400
401 next_step:
402         entry = sum;
403
404         for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
405                 nid_t nid = le32_to_cpu(entry->nid);
406                 struct page *node_page;
407
408                 /* stop BG_GC if there is not enough free sections. */
409                 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
410                         return;
411
412                 if (check_valid_map(sbi, segno, off) == 0)
413                         continue;
414
415                 if (initial) {
416                         ra_node_page(sbi, nid);
417                         continue;
418                 }
419                 node_page = get_node_page(sbi, nid);
420                 if (IS_ERR(node_page))
421                         continue;
422
423                 /* block may become invalid during get_node_page */
424                 if (check_valid_map(sbi, segno, off) == 0) {
425                         f2fs_put_page(node_page, 1);
426                         continue;
427                 }
428
429                 /* set page dirty and write it */
430                 if (gc_type == FG_GC) {
431                         f2fs_wait_on_page_writeback(node_page, NODE);
432                         set_page_dirty(node_page);
433                 } else {
434                         if (!PageWriteback(node_page))
435                                 set_page_dirty(node_page);
436                 }
437                 f2fs_put_page(node_page, 1);
438                 stat_inc_node_blk_count(sbi, 1, gc_type);
439         }
440
441         if (initial) {
442                 initial = false;
443                 goto next_step;
444         }
445
446         if (gc_type == FG_GC) {
447                 struct writeback_control wbc = {
448                         .sync_mode = WB_SYNC_ALL,
449                         .nr_to_write = LONG_MAX,
450                         .for_reclaim = 0,
451                 };
452                 sync_node_pages(sbi, 0, &wbc);
453
454                 /*
455                  * In the case of FG_GC, it'd be better to reclaim this victim
456                  * completely.
457                  */
458                 if (get_valid_blocks(sbi, segno, 1) != 0)
459                         goto next_step;
460         }
461 }
462
463 /*
464  * Calculate start block index indicating the given node offset.
465  * Be careful, caller should give this node offset only indicating direct node
466  * blocks. If any node offsets, which point the other types of node blocks such
467  * as indirect or double indirect node blocks, are given, it must be a caller's
468  * bug.
469  */
470 block_t start_bidx_of_node(unsigned int node_ofs, struct f2fs_inode_info *fi)
471 {
472         unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
473         unsigned int bidx;
474
475         if (node_ofs == 0)
476                 return 0;
477
478         if (node_ofs <= 2) {
479                 bidx = node_ofs - 1;
480         } else if (node_ofs <= indirect_blks) {
481                 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
482                 bidx = node_ofs - 2 - dec;
483         } else {
484                 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
485                 bidx = node_ofs - 5 - dec;
486         }
487         return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(fi);
488 }
489
490 static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
491                 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
492 {
493         struct page *node_page;
494         nid_t nid;
495         unsigned int ofs_in_node;
496         block_t source_blkaddr;
497
498         nid = le32_to_cpu(sum->nid);
499         ofs_in_node = le16_to_cpu(sum->ofs_in_node);
500
501         node_page = get_node_page(sbi, nid);
502         if (IS_ERR(node_page))
503                 return 0;
504
505         get_node_info(sbi, nid, dni);
506
507         if (sum->version != dni->version) {
508                 f2fs_put_page(node_page, 1);
509                 return 0;
510         }
511
512         *nofs = ofs_of_node(node_page);
513         source_blkaddr = datablock_addr(node_page, ofs_in_node);
514         f2fs_put_page(node_page, 1);
515
516         if (source_blkaddr != blkaddr)
517                 return 0;
518         return 1;
519 }
520
521 static void move_data_page(struct inode *inode, struct page *page, int gc_type)
522 {
523         struct f2fs_io_info fio = {
524                 .type = DATA,
525                 .rw = WRITE_SYNC,
526         };
527
528         if (gc_type == BG_GC) {
529                 if (PageWriteback(page))
530                         goto out;
531                 set_page_dirty(page);
532                 set_cold_data(page);
533         } else {
534                 f2fs_wait_on_page_writeback(page, DATA);
535
536                 if (clear_page_dirty_for_io(page))
537                         inode_dec_dirty_pages(inode);
538                 set_cold_data(page);
539                 do_write_data_page(page, &fio);
540                 clear_cold_data(page);
541         }
542 out:
543         f2fs_put_page(page, 1);
544 }
545
546 /*
547  * This function tries to get parent node of victim data block, and identifies
548  * data block validity. If the block is valid, copy that with cold status and
549  * modify parent node.
550  * If the parent node is not valid or the data block address is different,
551  * the victim data block is ignored.
552  */
553 static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
554                 struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
555 {
556         struct super_block *sb = sbi->sb;
557         struct f2fs_summary *entry;
558         block_t start_addr;
559         int off;
560         int phase = 0;
561
562         start_addr = START_BLOCK(sbi, segno);
563
564 next_step:
565         entry = sum;
566
567         for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
568                 struct page *data_page;
569                 struct inode *inode;
570                 struct node_info dni; /* dnode info for the data */
571                 unsigned int ofs_in_node, nofs;
572                 block_t start_bidx;
573
574                 /* stop BG_GC if there is not enough free sections. */
575                 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
576                         return;
577
578                 if (check_valid_map(sbi, segno, off) == 0)
579                         continue;
580
581                 if (phase == 0) {
582                         ra_node_page(sbi, le32_to_cpu(entry->nid));
583                         continue;
584                 }
585
586                 /* Get an inode by ino with checking validity */
587                 if (check_dnode(sbi, entry, &dni, start_addr + off, &nofs) == 0)
588                         continue;
589
590                 if (phase == 1) {
591                         ra_node_page(sbi, dni.ino);
592                         continue;
593                 }
594
595                 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
596
597                 if (phase == 2) {
598                         inode = f2fs_iget(sb, dni.ino);
599                         if (IS_ERR(inode) || is_bad_inode(inode))
600                                 continue;
601
602                         start_bidx = start_bidx_of_node(nofs, F2FS_I(inode));
603
604                         data_page = find_data_page(inode,
605                                         start_bidx + ofs_in_node, false);
606                         if (IS_ERR(data_page)) {
607                                 iput(inode);
608                                 continue;
609                         }
610
611                         f2fs_put_page(data_page, 0);
612                         add_gc_inode(gc_list, inode);
613                         continue;
614                 }
615
616                 /* phase 3 */
617                 inode = find_gc_inode(gc_list, dni.ino);
618                 if (inode) {
619                         start_bidx = start_bidx_of_node(nofs, F2FS_I(inode));
620                         data_page = get_lock_data_page(inode,
621                                                 start_bidx + ofs_in_node);
622                         if (IS_ERR(data_page))
623                                 continue;
624                         move_data_page(inode, data_page, gc_type);
625                         stat_inc_data_blk_count(sbi, 1, gc_type);
626                 }
627         }
628
629         if (++phase < 4)
630                 goto next_step;
631
632         if (gc_type == FG_GC) {
633                 f2fs_submit_merged_bio(sbi, DATA, WRITE);
634
635                 /*
636                  * In the case of FG_GC, it'd be better to reclaim this victim
637                  * completely.
638                  */
639                 if (get_valid_blocks(sbi, segno, 1) != 0) {
640                         phase = 2;
641                         goto next_step;
642                 }
643         }
644 }
645
646 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
647                         int gc_type)
648 {
649         struct sit_info *sit_i = SIT_I(sbi);
650         int ret;
651
652         mutex_lock(&sit_i->sentry_lock);
653         ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
654                                               NO_CHECK_TYPE, LFS);
655         mutex_unlock(&sit_i->sentry_lock);
656         return ret;
657 }
658
659 static void do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
660                                 struct gc_inode_list *gc_list, int gc_type)
661 {
662         struct page *sum_page;
663         struct f2fs_summary_block *sum;
664         struct blk_plug plug;
665
666         /* read segment summary of victim */
667         sum_page = get_sum_page(sbi, segno);
668
669         blk_start_plug(&plug);
670
671         sum = page_address(sum_page);
672
673         switch (GET_SUM_TYPE((&sum->footer))) {
674         case SUM_TYPE_NODE:
675                 gc_node_segment(sbi, sum->entries, segno, gc_type);
676                 break;
677         case SUM_TYPE_DATA:
678                 gc_data_segment(sbi, sum->entries, gc_list, segno, gc_type);
679                 break;
680         }
681         blk_finish_plug(&plug);
682
683         stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)), gc_type);
684         stat_inc_call_count(sbi->stat_info);
685
686         f2fs_put_page(sum_page, 1);
687 }
688
689 int f2fs_gc(struct f2fs_sb_info *sbi)
690 {
691         unsigned int segno, i;
692         int gc_type = BG_GC;
693         int nfree = 0;
694         int ret = -1;
695         struct cp_control cpc;
696         struct gc_inode_list gc_list = {
697                 .ilist = LIST_HEAD_INIT(gc_list.ilist),
698                 .iroot = RADIX_TREE_INIT(GFP_NOFS),
699         };
700
701         cpc.reason = __get_cp_reason(sbi);
702 gc_more:
703         if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE)))
704                 goto stop;
705         if (unlikely(f2fs_cp_error(sbi)))
706                 goto stop;
707
708         if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree)) {
709                 gc_type = FG_GC;
710                 write_checkpoint(sbi, &cpc);
711         }
712
713         if (!__get_victim(sbi, &segno, gc_type))
714                 goto stop;
715         ret = 0;
716
717         /* readahead multi ssa blocks those have contiguous address */
718         if (sbi->segs_per_sec > 1)
719                 ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), sbi->segs_per_sec,
720                                                                 META_SSA);
721
722         for (i = 0; i < sbi->segs_per_sec; i++)
723                 do_garbage_collect(sbi, segno + i, &gc_list, gc_type);
724
725         if (gc_type == FG_GC) {
726                 sbi->cur_victim_sec = NULL_SEGNO;
727                 nfree++;
728                 WARN_ON(get_valid_blocks(sbi, segno, sbi->segs_per_sec));
729         }
730
731         if (has_not_enough_free_secs(sbi, nfree))
732                 goto gc_more;
733
734         if (gc_type == FG_GC)
735                 write_checkpoint(sbi, &cpc);
736 stop:
737         mutex_unlock(&sbi->gc_mutex);
738
739         put_gc_inode(&gc_list);
740         return ret;
741 }
742
743 void build_gc_manager(struct f2fs_sb_info *sbi)
744 {
745         DIRTY_I(sbi)->v_ops = &default_v_ops;
746 }