Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / drivers / md / bcache / closure.c
1 /*
2  * Asynchronous refcounty things
3  *
4  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5  * Copyright 2012 Google, Inc.
6  */
7
8 #include <linux/debugfs.h>
9 #include <linux/module.h>
10 #include <linux/seq_file.h>
11
12 #include "closure.h"
13
14 static inline void closure_put_after_sub(struct closure *cl, int flags)
15 {
16         int r = flags & CLOSURE_REMAINING_MASK;
17
18         BUG_ON(flags & CLOSURE_GUARD_MASK);
19         BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR));
20
21         /* Must deliver precisely one wakeup */
22         if (r == 1 && (flags & CLOSURE_SLEEPING))
23                 wake_up_process(cl->task);
24
25         if (!r) {
26                 if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
27                         atomic_set(&cl->remaining,
28                                    CLOSURE_REMAINING_INITIALIZER);
29                         closure_queue(cl);
30                 } else {
31                         struct closure *parent = cl->parent;
32                         closure_fn *destructor = cl->fn;
33
34                         closure_debug_destroy(cl);
35
36                         if (destructor)
37                                 destructor(cl);
38
39                         if (parent)
40                                 closure_put(parent);
41                 }
42         }
43 }
44
45 /* For clearing flags with the same atomic op as a put */
46 void closure_sub(struct closure *cl, int v)
47 {
48         closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining));
49 }
50 EXPORT_SYMBOL(closure_sub);
51
52 /**
53  * closure_put - decrement a closure's refcount
54  */
55 void closure_put(struct closure *cl)
56 {
57         closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
58 }
59 EXPORT_SYMBOL(closure_put);
60
61 /**
62  * closure_wake_up - wake up all closures on a wait list, without memory barrier
63  */
64 void __closure_wake_up(struct closure_waitlist *wait_list)
65 {
66         struct llist_node *list;
67         struct closure *cl;
68         struct llist_node *reverse = NULL;
69
70         list = llist_del_all(&wait_list->list);
71
72         /* We first reverse the list to preserve FIFO ordering and fairness */
73
74         while (list) {
75                 struct llist_node *t = list;
76                 list = llist_next(list);
77
78                 t->next = reverse;
79                 reverse = t;
80         }
81
82         /* Then do the wakeups */
83
84         while (reverse) {
85                 cl = container_of(reverse, struct closure, list);
86                 reverse = llist_next(reverse);
87
88                 closure_set_waiting(cl, 0);
89                 closure_sub(cl, CLOSURE_WAITING + 1);
90         }
91 }
92 EXPORT_SYMBOL(__closure_wake_up);
93
94 /**
95  * closure_wait - add a closure to a waitlist
96  *
97  * @waitlist will own a ref on @cl, which will be released when
98  * closure_wake_up() is called on @waitlist.
99  *
100  */
101 bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
102 {
103         if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
104                 return false;
105
106         closure_set_waiting(cl, _RET_IP_);
107         atomic_add(CLOSURE_WAITING + 1, &cl->remaining);
108         llist_add(&cl->list, &waitlist->list);
109
110         return true;
111 }
112 EXPORT_SYMBOL(closure_wait);
113
114 /**
115  * closure_sync - sleep until a closure a closure has nothing left to wait on
116  *
117  * Sleeps until the refcount hits 1 - the thread that's running the closure owns
118  * the last refcount.
119  */
120 void closure_sync(struct closure *cl)
121 {
122         while (1) {
123                 __closure_start_sleep(cl);
124                 closure_set_ret_ip(cl);
125
126                 if ((atomic_read(&cl->remaining) &
127                      CLOSURE_REMAINING_MASK) == 1)
128                         break;
129
130                 schedule();
131         }
132
133         __closure_end_sleep(cl);
134 }
135 EXPORT_SYMBOL(closure_sync);
136
137 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
138
139 static LIST_HEAD(closure_list);
140 static DEFINE_SPINLOCK(closure_list_lock);
141
142 void closure_debug_create(struct closure *cl)
143 {
144         unsigned long flags;
145
146         BUG_ON(cl->magic == CLOSURE_MAGIC_ALIVE);
147         cl->magic = CLOSURE_MAGIC_ALIVE;
148
149         spin_lock_irqsave(&closure_list_lock, flags);
150         list_add(&cl->all, &closure_list);
151         spin_unlock_irqrestore(&closure_list_lock, flags);
152 }
153 EXPORT_SYMBOL(closure_debug_create);
154
155 void closure_debug_destroy(struct closure *cl)
156 {
157         unsigned long flags;
158
159         BUG_ON(cl->magic != CLOSURE_MAGIC_ALIVE);
160         cl->magic = CLOSURE_MAGIC_DEAD;
161
162         spin_lock_irqsave(&closure_list_lock, flags);
163         list_del(&cl->all);
164         spin_unlock_irqrestore(&closure_list_lock, flags);
165 }
166 EXPORT_SYMBOL(closure_debug_destroy);
167
168 static struct dentry *debug;
169
170 #define work_data_bits(work) ((unsigned long *)(&(work)->data))
171
172 static int debug_seq_show(struct seq_file *f, void *data)
173 {
174         struct closure *cl;
175         spin_lock_irq(&closure_list_lock);
176
177         list_for_each_entry(cl, &closure_list, all) {
178                 int r = atomic_read(&cl->remaining);
179
180                 seq_printf(f, "%p: %pF -> %pf p %p r %i ",
181                            cl, (void *) cl->ip, cl->fn, cl->parent,
182                            r & CLOSURE_REMAINING_MASK);
183
184                 seq_printf(f, "%s%s%s%s\n",
185                            test_bit(WORK_STRUCT_PENDING,
186                                     work_data_bits(&cl->work)) ? "Q" : "",
187                            r & CLOSURE_RUNNING  ? "R" : "",
188                            r & CLOSURE_STACK    ? "S" : "",
189                            r & CLOSURE_SLEEPING ? "Sl" : "");
190
191                 if (r & CLOSURE_WAITING)
192                         seq_printf(f, " W %pF\n",
193                                    (void *) cl->waiting_on);
194
195                 seq_printf(f, "\n");
196         }
197
198         spin_unlock_irq(&closure_list_lock);
199         return 0;
200 }
201
202 static int debug_seq_open(struct inode *inode, struct file *file)
203 {
204         return single_open(file, debug_seq_show, NULL);
205 }
206
207 static const struct file_operations debug_ops = {
208         .owner          = THIS_MODULE,
209         .open           = debug_seq_open,
210         .read           = seq_read,
211         .release        = single_release
212 };
213
214 void __init closure_debug_init(void)
215 {
216         debug = debugfs_create_file("closures", 0400, NULL, NULL, &debug_ops);
217 }
218
219 #endif
220
221 MODULE_AUTHOR("Kent Overstreet <koverstreet@google.com>");
222 MODULE_LICENSE("GPL");