These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / fs / f2fs / shrinker.c
1 /*
2  * f2fs shrinker support
3  *   the basic infra was copied from fs/ubifs/shrinker.c
4  *
5  * Copyright (c) 2015 Motorola Mobility
6  * Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 #include <linux/fs.h>
13 #include <linux/f2fs_fs.h>
14
15 #include "f2fs.h"
16
17 static LIST_HEAD(f2fs_list);
18 static DEFINE_SPINLOCK(f2fs_list_lock);
19 static unsigned int shrinker_run_no;
20
21 static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
22 {
23         return NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
24 }
25
26 static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
27 {
28         if (NM_I(sbi)->fcnt > NAT_ENTRY_PER_BLOCK)
29                 return NM_I(sbi)->fcnt - NAT_ENTRY_PER_BLOCK;
30         return 0;
31 }
32
33 static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
34 {
35         return sbi->total_ext_tree + atomic_read(&sbi->total_ext_node);
36 }
37
38 unsigned long f2fs_shrink_count(struct shrinker *shrink,
39                                 struct shrink_control *sc)
40 {
41         struct f2fs_sb_info *sbi;
42         struct list_head *p;
43         unsigned long count = 0;
44
45         spin_lock(&f2fs_list_lock);
46         p = f2fs_list.next;
47         while (p != &f2fs_list) {
48                 sbi = list_entry(p, struct f2fs_sb_info, s_list);
49
50                 /* stop f2fs_put_super */
51                 if (!mutex_trylock(&sbi->umount_mutex)) {
52                         p = p->next;
53                         continue;
54                 }
55                 spin_unlock(&f2fs_list_lock);
56
57                 /* count extent cache entries */
58                 count += __count_extent_cache(sbi);
59
60                 /* shrink clean nat cache entries */
61                 count += __count_nat_entries(sbi);
62
63                 /* count free nids cache entries */
64                 count += __count_free_nids(sbi);
65
66                 spin_lock(&f2fs_list_lock);
67                 p = p->next;
68                 mutex_unlock(&sbi->umount_mutex);
69         }
70         spin_unlock(&f2fs_list_lock);
71         return count;
72 }
73
74 unsigned long f2fs_shrink_scan(struct shrinker *shrink,
75                                 struct shrink_control *sc)
76 {
77         unsigned long nr = sc->nr_to_scan;
78         struct f2fs_sb_info *sbi;
79         struct list_head *p;
80         unsigned int run_no;
81         unsigned long freed = 0;
82
83         spin_lock(&f2fs_list_lock);
84         do {
85                 run_no = ++shrinker_run_no;
86         } while (run_no == 0);
87         p = f2fs_list.next;
88         while (p != &f2fs_list) {
89                 sbi = list_entry(p, struct f2fs_sb_info, s_list);
90
91                 if (sbi->shrinker_run_no == run_no)
92                         break;
93
94                 /* stop f2fs_put_super */
95                 if (!mutex_trylock(&sbi->umount_mutex)) {
96                         p = p->next;
97                         continue;
98                 }
99                 spin_unlock(&f2fs_list_lock);
100
101                 sbi->shrinker_run_no = run_no;
102
103                 /* shrink extent cache entries */
104                 freed += f2fs_shrink_extent_tree(sbi, nr >> 1);
105
106                 /* shrink clean nat cache entries */
107                 if (freed < nr)
108                         freed += try_to_free_nats(sbi, nr - freed);
109
110                 /* shrink free nids cache entries */
111                 if (freed < nr)
112                         freed += try_to_free_nids(sbi, nr - freed);
113
114                 spin_lock(&f2fs_list_lock);
115                 p = p->next;
116                 list_move_tail(&sbi->s_list, &f2fs_list);
117                 mutex_unlock(&sbi->umount_mutex);
118                 if (freed >= nr)
119                         break;
120         }
121         spin_unlock(&f2fs_list_lock);
122         return freed;
123 }
124
125 void f2fs_join_shrinker(struct f2fs_sb_info *sbi)
126 {
127         spin_lock(&f2fs_list_lock);
128         list_add_tail(&sbi->s_list, &f2fs_list);
129         spin_unlock(&f2fs_list_lock);
130 }
131
132 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi)
133 {
134         f2fs_shrink_extent_tree(sbi, __count_extent_cache(sbi));
135
136         spin_lock(&f2fs_list_lock);
137         list_del(&sbi->s_list);
138         spin_unlock(&f2fs_list_lock);
139 }