These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / include / linux / buffer_head.h
1 /*
2  * include/linux/buffer_head.h
3  *
4  * Everything to do with buffer_heads.
5  */
6
7 #ifndef _LINUX_BUFFER_HEAD_H
8 #define _LINUX_BUFFER_HEAD_H
9
10 #include <linux/types.h>
11 #include <linux/fs.h>
12 #include <linux/linkage.h>
13 #include <linux/pagemap.h>
14 #include <linux/wait.h>
15 #include <linux/atomic.h>
16
17 #ifdef CONFIG_BLOCK
18
19 enum bh_state_bits {
20         BH_Uptodate,    /* Contains valid data */
21         BH_Dirty,       /* Is dirty */
22         BH_Lock,        /* Is locked */
23         BH_Req,         /* Has been submitted for I/O */
24         BH_Uptodate_Lock,/* Used by the first bh in a page, to serialise
25                           * IO completion of other buffers in the page
26                           */
27
28         BH_Mapped,      /* Has a disk mapping */
29         BH_New,         /* Disk mapping was newly created by get_block */
30         BH_Async_Read,  /* Is under end_buffer_async_read I/O */
31         BH_Async_Write, /* Is under end_buffer_async_write I/O */
32         BH_Delay,       /* Buffer is not yet allocated on disk */
33         BH_Boundary,    /* Block is followed by a discontiguity */
34         BH_Write_EIO,   /* I/O error on write */
35         BH_Unwritten,   /* Buffer is allocated on disk but not written */
36         BH_Quiet,       /* Buffer Error Prinks to be quiet */
37         BH_Meta,        /* Buffer contains metadata */
38         BH_Prio,        /* Buffer should be submitted with REQ_PRIO */
39         BH_Defer_Completion, /* Defer AIO completion to workqueue */
40
41         BH_PrivateStart,/* not a state bit, but the first bit available
42                          * for private allocation by other entities
43                          */
44 };
45
46 #define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512)
47
48 struct page;
49 struct buffer_head;
50 struct address_space;
51 typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
52
53 /*
54  * Historically, a buffer_head was used to map a single block
55  * within a page, and of course as the unit of I/O through the
56  * filesystem and block layers.  Nowadays the basic I/O unit
57  * is the bio, and buffer_heads are used for extracting block
58  * mappings (via a get_block_t call), for tracking state within
59  * a page (via a page_mapping) and for wrapping bio submission
60  * for backward compatibility reasons (e.g. submit_bh).
61  */
62 struct buffer_head {
63         unsigned long b_state;          /* buffer state bitmap (see above) */
64         struct buffer_head *b_this_page;/* circular list of page's buffers */
65         struct page *b_page;            /* the page this bh is mapped to */
66
67         sector_t b_blocknr;             /* start block number */
68         size_t b_size;                  /* size of mapping */
69         char *b_data;                   /* pointer to data within the page */
70
71         struct block_device *b_bdev;
72         bh_end_io_t *b_end_io;          /* I/O completion */
73         void *b_private;                /* reserved for b_end_io */
74         struct list_head b_assoc_buffers; /* associated with another mapping */
75         struct address_space *b_assoc_map;      /* mapping this buffer is
76                                                    associated with */
77         atomic_t b_count;               /* users using this buffer_head */
78 #ifdef CONFIG_PREEMPT_RT_BASE
79         spinlock_t b_uptodate_lock;
80 #if IS_ENABLED(CONFIG_JBD2)
81         spinlock_t b_state_lock;
82         spinlock_t b_journal_head_lock;
83 #endif
84 #endif
85 };
86
87 static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh)
88 {
89         unsigned long flags;
90
91 #ifndef CONFIG_PREEMPT_RT_BASE
92         local_irq_save(flags);
93         bit_spin_lock(BH_Uptodate_Lock, &bh->b_state);
94 #else
95         spin_lock_irqsave(&bh->b_uptodate_lock, flags);
96 #endif
97         return flags;
98 }
99
100 static inline void
101 bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags)
102 {
103 #ifndef CONFIG_PREEMPT_RT_BASE
104         bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state);
105         local_irq_restore(flags);
106 #else
107         spin_unlock_irqrestore(&bh->b_uptodate_lock, flags);
108 #endif
109 }
110
111 static inline void buffer_head_init_locks(struct buffer_head *bh)
112 {
113 #ifdef CONFIG_PREEMPT_RT_BASE
114         spin_lock_init(&bh->b_uptodate_lock);
115 #if IS_ENABLED(CONFIG_JBD2)
116         spin_lock_init(&bh->b_state_lock);
117         spin_lock_init(&bh->b_journal_head_lock);
118 #endif
119 #endif
120 }
121
122 /*
123  * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
124  * and buffer_foo() functions.
125  */
126 #define BUFFER_FNS(bit, name)                                           \
127 static inline void set_buffer_##name(struct buffer_head *bh)            \
128 {                                                                       \
129         set_bit(BH_##bit, &(bh)->b_state);                              \
130 }                                                                       \
131 static inline void clear_buffer_##name(struct buffer_head *bh)          \
132 {                                                                       \
133         clear_bit(BH_##bit, &(bh)->b_state);                            \
134 }                                                                       \
135 static inline int buffer_##name(const struct buffer_head *bh)           \
136 {                                                                       \
137         return test_bit(BH_##bit, &(bh)->b_state);                      \
138 }
139
140 /*
141  * test_set_buffer_foo() and test_clear_buffer_foo()
142  */
143 #define TAS_BUFFER_FNS(bit, name)                                       \
144 static inline int test_set_buffer_##name(struct buffer_head *bh)        \
145 {                                                                       \
146         return test_and_set_bit(BH_##bit, &(bh)->b_state);              \
147 }                                                                       \
148 static inline int test_clear_buffer_##name(struct buffer_head *bh)      \
149 {                                                                       \
150         return test_and_clear_bit(BH_##bit, &(bh)->b_state);            \
151 }                                                                       \
152
153 /*
154  * Emit the buffer bitops functions.   Note that there are also functions
155  * of the form "mark_buffer_foo()".  These are higher-level functions which
156  * do something in addition to setting a b_state bit.
157  */
158 BUFFER_FNS(Uptodate, uptodate)
159 BUFFER_FNS(Dirty, dirty)
160 TAS_BUFFER_FNS(Dirty, dirty)
161 BUFFER_FNS(Lock, locked)
162 BUFFER_FNS(Req, req)
163 TAS_BUFFER_FNS(Req, req)
164 BUFFER_FNS(Mapped, mapped)
165 BUFFER_FNS(New, new)
166 BUFFER_FNS(Async_Read, async_read)
167 BUFFER_FNS(Async_Write, async_write)
168 BUFFER_FNS(Delay, delay)
169 BUFFER_FNS(Boundary, boundary)
170 BUFFER_FNS(Write_EIO, write_io_error)
171 BUFFER_FNS(Unwritten, unwritten)
172 BUFFER_FNS(Meta, meta)
173 BUFFER_FNS(Prio, prio)
174 BUFFER_FNS(Defer_Completion, defer_completion)
175
176 #define bh_offset(bh)           ((unsigned long)(bh)->b_data & ~PAGE_MASK)
177
178 /* If we *know* page->private refers to buffer_heads */
179 #define page_buffers(page)                                      \
180         ({                                                      \
181                 BUG_ON(!PagePrivate(page));                     \
182                 ((struct buffer_head *)page_private(page));     \
183         })
184 #define page_has_buffers(page)  PagePrivate(page)
185
186 void buffer_check_dirty_writeback(struct page *page,
187                                      bool *dirty, bool *writeback);
188
189 /*
190  * Declarations
191  */
192
193 void mark_buffer_dirty(struct buffer_head *bh);
194 void init_buffer(struct buffer_head *, bh_end_io_t *, void *);
195 void touch_buffer(struct buffer_head *bh);
196 void set_bh_page(struct buffer_head *bh,
197                 struct page *page, unsigned long offset);
198 int try_to_free_buffers(struct page *);
199 struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
200                 int retry);
201 void create_empty_buffers(struct page *, unsigned long,
202                         unsigned long b_state);
203 void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
204 void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
205 void end_buffer_async_write(struct buffer_head *bh, int uptodate);
206
207 /* Things to do with buffers at mapping->private_list */
208 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
209 int inode_has_buffers(struct inode *);
210 void invalidate_inode_buffers(struct inode *);
211 int remove_inode_buffers(struct inode *inode);
212 int sync_mapping_buffers(struct address_space *mapping);
213 void unmap_underlying_metadata(struct block_device *bdev, sector_t block);
214
215 void mark_buffer_async_write(struct buffer_head *bh);
216 void __wait_on_buffer(struct buffer_head *);
217 wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
218 struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
219                         unsigned size);
220 struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
221                                   unsigned size, gfp_t gfp);
222 void __brelse(struct buffer_head *);
223 void __bforget(struct buffer_head *);
224 void __breadahead(struct block_device *, sector_t block, unsigned int size);
225 struct buffer_head *__bread_gfp(struct block_device *,
226                                 sector_t block, unsigned size, gfp_t gfp);
227 void invalidate_bh_lrus(void);
228 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
229 void free_buffer_head(struct buffer_head * bh);
230 void unlock_buffer(struct buffer_head *bh);
231 void __lock_buffer(struct buffer_head *bh);
232 void ll_rw_block(int, int, struct buffer_head * bh[]);
233 int sync_dirty_buffer(struct buffer_head *bh);
234 int __sync_dirty_buffer(struct buffer_head *bh, int rw);
235 void write_dirty_buffer(struct buffer_head *bh, int rw);
236 int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags);
237 int submit_bh(int, struct buffer_head *);
238 void write_boundary_block(struct block_device *bdev,
239                         sector_t bblock, unsigned blocksize);
240 int bh_uptodate_or_lock(struct buffer_head *bh);
241 int bh_submit_read(struct buffer_head *bh);
242
243 extern int buffer_heads_over_limit;
244
245 /*
246  * Generic address_space_operations implementations for buffer_head-backed
247  * address_spaces.
248  */
249 void block_invalidatepage(struct page *page, unsigned int offset,
250                           unsigned int length);
251 int block_write_full_page(struct page *page, get_block_t *get_block,
252                                 struct writeback_control *wbc);
253 int block_read_full_page(struct page*, get_block_t*);
254 int block_is_partially_uptodate(struct page *page, unsigned long from,
255                                 unsigned long count);
256 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
257                 unsigned flags, struct page **pagep, get_block_t *get_block);
258 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
259                 get_block_t *get_block);
260 int block_write_end(struct file *, struct address_space *,
261                                 loff_t, unsigned, unsigned,
262                                 struct page *, void *);
263 int generic_write_end(struct file *, struct address_space *,
264                                 loff_t, unsigned, unsigned,
265                                 struct page *, void *);
266 void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
267 int cont_write_begin(struct file *, struct address_space *, loff_t,
268                         unsigned, unsigned, struct page **, void **,
269                         get_block_t *, loff_t *);
270 int generic_cont_expand_simple(struct inode *inode, loff_t size);
271 int block_commit_write(struct page *page, unsigned from, unsigned to);
272 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
273                                 get_block_t get_block);
274 /* Convert errno to return value from ->page_mkwrite() call */
275 static inline int block_page_mkwrite_return(int err)
276 {
277         if (err == 0)
278                 return VM_FAULT_LOCKED;
279         if (err == -EFAULT)
280                 return VM_FAULT_NOPAGE;
281         if (err == -ENOMEM)
282                 return VM_FAULT_OOM;
283         if (err == -EAGAIN)
284                 return VM_FAULT_RETRY;
285         /* -ENOSPC, -EDQUOT, -EIO ... */
286         return VM_FAULT_SIGBUS;
287 }
288 sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
289 int block_truncate_page(struct address_space *, loff_t, get_block_t *);
290 int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned,
291                                 struct page **, void **, get_block_t*);
292 int nobh_write_end(struct file *, struct address_space *,
293                                 loff_t, unsigned, unsigned,
294                                 struct page *, void *);
295 int nobh_truncate_page(struct address_space *, loff_t, get_block_t *);
296 int nobh_writepage(struct page *page, get_block_t *get_block,
297                         struct writeback_control *wbc);
298
299 void buffer_init(void);
300
301 /*
302  * inline definitions
303  */
304
305 static inline void attach_page_buffers(struct page *page,
306                 struct buffer_head *head)
307 {
308         page_cache_get(page);
309         SetPagePrivate(page);
310         set_page_private(page, (unsigned long)head);
311 }
312
313 static inline void get_bh(struct buffer_head *bh)
314 {
315         atomic_inc(&bh->b_count);
316 }
317
318 static inline void put_bh(struct buffer_head *bh)
319 {
320         smp_mb__before_atomic();
321         atomic_dec(&bh->b_count);
322 }
323
324 static inline void brelse(struct buffer_head *bh)
325 {
326         if (bh)
327                 __brelse(bh);
328 }
329
330 static inline void bforget(struct buffer_head *bh)
331 {
332         if (bh)
333                 __bforget(bh);
334 }
335
336 static inline struct buffer_head *
337 sb_bread(struct super_block *sb, sector_t block)
338 {
339         return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
340 }
341
342 static inline struct buffer_head *
343 sb_bread_unmovable(struct super_block *sb, sector_t block)
344 {
345         return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
346 }
347
348 static inline void
349 sb_breadahead(struct super_block *sb, sector_t block)
350 {
351         __breadahead(sb->s_bdev, block, sb->s_blocksize);
352 }
353
354 static inline struct buffer_head *
355 sb_getblk(struct super_block *sb, sector_t block)
356 {
357         return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
358 }
359
360
361 static inline struct buffer_head *
362 sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
363 {
364         return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
365 }
366
367 static inline struct buffer_head *
368 sb_find_get_block(struct super_block *sb, sector_t block)
369 {
370         return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
371 }
372
373 static inline void
374 map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
375 {
376         set_buffer_mapped(bh);
377         bh->b_bdev = sb->s_bdev;
378         bh->b_blocknr = block;
379         bh->b_size = sb->s_blocksize;
380 }
381
382 static inline void wait_on_buffer(struct buffer_head *bh)
383 {
384         might_sleep();
385         if (buffer_locked(bh))
386                 __wait_on_buffer(bh);
387 }
388
389 static inline int trylock_buffer(struct buffer_head *bh)
390 {
391         return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state));
392 }
393
394 static inline void lock_buffer(struct buffer_head *bh)
395 {
396         might_sleep();
397         if (!trylock_buffer(bh))
398                 __lock_buffer(bh);
399 }
400
401 static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
402                                                    sector_t block,
403                                                    unsigned size)
404 {
405         return __getblk_gfp(bdev, block, size, 0);
406 }
407
408 static inline struct buffer_head *__getblk(struct block_device *bdev,
409                                            sector_t block,
410                                            unsigned size)
411 {
412         return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
413 }
414
415 /**
416  *  __bread() - reads a specified block and returns the bh
417  *  @bdev: the block_device to read from
418  *  @block: number of block
419  *  @size: size (in bytes) to read
420  *
421  *  Reads a specified block, and returns buffer head that contains it.
422  *  The page cache is allocated from movable area so that it can be migrated.
423  *  It returns NULL if the block was unreadable.
424  */
425 static inline struct buffer_head *
426 __bread(struct block_device *bdev, sector_t block, unsigned size)
427 {
428         return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
429 }
430
431 extern int __set_page_dirty_buffers(struct page *page);
432
433 #else /* CONFIG_BLOCK */
434
435 static inline void buffer_init(void) {}
436 static inline int try_to_free_buffers(struct page *page) { return 1; }
437 static inline int inode_has_buffers(struct inode *inode) { return 0; }
438 static inline void invalidate_inode_buffers(struct inode *inode) {}
439 static inline int remove_inode_buffers(struct inode *inode) { return 1; }
440 static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
441
442 #endif /* CONFIG_BLOCK */
443 #endif /* _LINUX_BUFFER_HEAD_H */