1
2
3
4
5
6
7
8#ifndef _LINUX_BUFFER_HEAD_H
9#define _LINUX_BUFFER_HEAD_H
10
11#include <linux/types.h>
12#include <linux/fs.h>
13#include <linux/linkage.h>
14#include <linux/pagemap.h>
15#include <linux/wait.h>
16#include <linux/atomic.h>
17
18#ifdef CONFIG_BLOCK
19
20enum bh_state_bits {
21 BH_Uptodate,
22 BH_Dirty,
23 BH_Lock,
24 BH_Req,
25
26 BH_Mapped,
27 BH_New,
28 BH_Async_Read,
29 BH_Async_Write,
30 BH_Delay,
31 BH_Boundary,
32 BH_Write_EIO,
33 BH_Unwritten,
34 BH_Quiet,
35 BH_Meta,
36 BH_Prio,
37 BH_Defer_Completion,
38
39 BH_PrivateStart,
40
41
42};
43
44#define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
45
46struct page;
47struct buffer_head;
48struct address_space;
49typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
50
51
52
53
54
55
56
57
58
59
60struct buffer_head {
61 unsigned long b_state;
62 struct buffer_head *b_this_page;
63 struct page *b_page;
64
65 sector_t b_blocknr;
66 size_t b_size;
67 char *b_data;
68
69 struct block_device *b_bdev;
70 bh_end_io_t *b_end_io;
71 void *b_private;
72 struct list_head b_assoc_buffers;
73 struct address_space *b_assoc_map;
74
75 atomic_t b_count;
76 spinlock_t b_uptodate_lock;
77
78
79};
80
81
82
83
84
85
86
87#define BUFFER_FNS(bit, name) \
88static __always_inline void set_buffer_##name(struct buffer_head *bh) \
89{ \
90 if (!test_bit(BH_##bit, &(bh)->b_state)) \
91 set_bit(BH_##bit, &(bh)->b_state); \
92} \
93static __always_inline void clear_buffer_##name(struct buffer_head *bh) \
94{ \
95 clear_bit(BH_##bit, &(bh)->b_state); \
96} \
97static __always_inline int buffer_##name(const struct buffer_head *bh) \
98{ \
99 return test_bit(BH_##bit, &(bh)->b_state); \
100}
101
102
103
104
105#define TAS_BUFFER_FNS(bit, name) \
106static __always_inline int test_set_buffer_##name(struct buffer_head *bh) \
107{ \
108 return test_and_set_bit(BH_##bit, &(bh)->b_state); \
109} \
110static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \
111{ \
112 return test_and_clear_bit(BH_##bit, &(bh)->b_state); \
113} \
114
115
116
117
118
119
120BUFFER_FNS(Uptodate, uptodate)
121BUFFER_FNS(Dirty, dirty)
122TAS_BUFFER_FNS(Dirty, dirty)
123BUFFER_FNS(Lock, locked)
124BUFFER_FNS(Req, req)
125TAS_BUFFER_FNS(Req, req)
126BUFFER_FNS(Mapped, mapped)
127BUFFER_FNS(New, new)
128BUFFER_FNS(Async_Read, async_read)
129BUFFER_FNS(Async_Write, async_write)
130BUFFER_FNS(Delay, delay)
131BUFFER_FNS(Boundary, boundary)
132BUFFER_FNS(Write_EIO, write_io_error)
133BUFFER_FNS(Unwritten, unwritten)
134BUFFER_FNS(Meta, meta)
135BUFFER_FNS(Prio, prio)
136BUFFER_FNS(Defer_Completion, defer_completion)
137
138#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
139
140
141#define page_buffers(page) \
142 ({ \
143 BUG_ON(!PagePrivate(page)); \
144 ((struct buffer_head *)page_private(page)); \
145 })
146#define page_has_buffers(page) PagePrivate(page)
147
148void buffer_check_dirty_writeback(struct page *page,
149 bool *dirty, bool *writeback);
150
151
152
153
154
155void mark_buffer_dirty(struct buffer_head *bh);
156void mark_buffer_write_io_error(struct buffer_head *bh);
157void touch_buffer(struct buffer_head *bh);
158void set_bh_page(struct buffer_head *bh,
159 struct page *page, unsigned long offset);
160int try_to_free_buffers(struct page *);
161struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
162 bool retry);
163void create_empty_buffers(struct page *, unsigned long,
164 unsigned long b_state);
165void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
166void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
167void end_buffer_async_write(struct buffer_head *bh, int uptodate);
168
169
170void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
171int inode_has_buffers(struct inode *);
172void invalidate_inode_buffers(struct inode *);
173int remove_inode_buffers(struct inode *inode);
174int sync_mapping_buffers(struct address_space *mapping);
175void clean_bdev_aliases(struct block_device *bdev, sector_t block,
176 sector_t len);
177static inline void clean_bdev_bh_alias(struct buffer_head *bh)
178{
179 clean_bdev_aliases(bh->b_bdev, bh->b_blocknr, 1);
180}
181
182void mark_buffer_async_write(struct buffer_head *bh);
183void __wait_on_buffer(struct buffer_head *);
184wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
185struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
186 unsigned size);
187struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
188 unsigned size, gfp_t gfp);
189void __brelse(struct buffer_head *);
190void __bforget(struct buffer_head *);
191void __breadahead(struct block_device *, sector_t block, unsigned int size);
192void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size,
193 gfp_t gfp);
194struct buffer_head *__bread_gfp(struct block_device *,
195 sector_t block, unsigned size, gfp_t gfp);
196void invalidate_bh_lrus(void);
197void invalidate_bh_lrus_cpu(int cpu);
198bool has_bh_in_lru(int cpu, void *dummy);
199struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
200void free_buffer_head(struct buffer_head * bh);
201void unlock_buffer(struct buffer_head *bh);
202void __lock_buffer(struct buffer_head *bh);
203void ll_rw_block(int, int, int, struct buffer_head * bh[]);
204int sync_dirty_buffer(struct buffer_head *bh);
205int __sync_dirty_buffer(struct buffer_head *bh, int op_flags);
206void write_dirty_buffer(struct buffer_head *bh, int op_flags);
207int submit_bh(int, int, struct buffer_head *);
208void write_boundary_block(struct block_device *bdev,
209 sector_t bblock, unsigned blocksize);
210int bh_uptodate_or_lock(struct buffer_head *bh);
211int bh_submit_read(struct buffer_head *bh);
212
213extern int buffer_heads_over_limit;
214
215
216
217
218
219void block_invalidatepage(struct page *page, unsigned int offset,
220 unsigned int length);
221int block_write_full_page(struct page *page, get_block_t *get_block,
222 struct writeback_control *wbc);
223int __block_write_full_page(struct inode *inode, struct page *page,
224 get_block_t *get_block, struct writeback_control *wbc,
225 bh_end_io_t *handler);
226int block_read_full_page(struct page*, get_block_t*);
227int block_is_partially_uptodate(struct page *page, unsigned long from,
228 unsigned long count);
229int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
230 unsigned flags, struct page **pagep, get_block_t *get_block);
231int __block_write_begin(struct page *page, loff_t pos, unsigned len,
232 get_block_t *get_block);
233int block_write_end(struct file *, struct address_space *,
234 loff_t, unsigned, unsigned,
235 struct page *, void *);
236int generic_write_end(struct file *, struct address_space *,
237 loff_t, unsigned, unsigned,
238 struct page *, void *);
239void page_zero_new_buffers(struct page *page, unsigned from, unsigned to);
240void clean_page_buffers(struct page *page);
241int cont_write_begin(struct file *, struct address_space *, loff_t,
242 unsigned, unsigned, struct page **, void **,
243 get_block_t *, loff_t *);
244int generic_cont_expand_simple(struct inode *inode, loff_t size);
245int block_commit_write(struct page *page, unsigned from, unsigned to);
246int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
247 get_block_t get_block);
248
249static inline vm_fault_t block_page_mkwrite_return(int err)
250{
251 if (err == 0)
252 return VM_FAULT_LOCKED;
253 if (err == -EFAULT || err == -EAGAIN)
254 return VM_FAULT_NOPAGE;
255 if (err == -ENOMEM)
256 return VM_FAULT_OOM;
257
258 return VM_FAULT_SIGBUS;
259}
260sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
261int block_truncate_page(struct address_space *, loff_t, get_block_t *);
262int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned,
263 struct page **, void **, get_block_t*);
264int nobh_write_end(struct file *, struct address_space *,
265 loff_t, unsigned, unsigned,
266 struct page *, void *);
267int nobh_truncate_page(struct address_space *, loff_t, get_block_t *);
268int nobh_writepage(struct page *page, get_block_t *get_block,
269 struct writeback_control *wbc);
270
271void buffer_init(void);
272
273
274
275
276
277static inline void get_bh(struct buffer_head *bh)
278{
279 atomic_inc(&bh->b_count);
280}
281
282static inline void put_bh(struct buffer_head *bh)
283{
284 smp_mb__before_atomic();
285 atomic_dec(&bh->b_count);
286}
287
288static inline void brelse(struct buffer_head *bh)
289{
290 if (bh)
291 __brelse(bh);
292}
293
294static inline void bforget(struct buffer_head *bh)
295{
296 if (bh)
297 __bforget(bh);
298}
299
300static inline struct buffer_head *
301sb_bread(struct super_block *sb, sector_t block)
302{
303 return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
304}
305
306static inline struct buffer_head *
307sb_bread_unmovable(struct super_block *sb, sector_t block)
308{
309 return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
310}
311
312static inline void
313sb_breadahead(struct super_block *sb, sector_t block)
314{
315 __breadahead(sb->s_bdev, block, sb->s_blocksize);
316}
317
318static inline void
319sb_breadahead_unmovable(struct super_block *sb, sector_t block)
320{
321 __breadahead_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
322}
323
324static inline struct buffer_head *
325sb_getblk(struct super_block *sb, sector_t block)
326{
327 return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
328}
329
330
331static inline struct buffer_head *
332sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
333{
334 return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
335}
336
337static inline struct buffer_head *
338sb_find_get_block(struct super_block *sb, sector_t block)
339{
340 return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
341}
342
343static inline void
344map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
345{
346 set_buffer_mapped(bh);
347 bh->b_bdev = sb->s_bdev;
348 bh->b_blocknr = block;
349 bh->b_size = sb->s_blocksize;
350}
351
352static inline void wait_on_buffer(struct buffer_head *bh)
353{
354 might_sleep();
355 if (buffer_locked(bh))
356 __wait_on_buffer(bh);
357}
358
359static inline int trylock_buffer(struct buffer_head *bh)
360{
361 return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state));
362}
363
364static inline void lock_buffer(struct buffer_head *bh)
365{
366 might_sleep();
367 if (!trylock_buffer(bh))
368 __lock_buffer(bh);
369}
370
371static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
372 sector_t block,
373 unsigned size)
374{
375 return __getblk_gfp(bdev, block, size, 0);
376}
377
378static inline struct buffer_head *__getblk(struct block_device *bdev,
379 sector_t block,
380 unsigned size)
381{
382 return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
383}
384
385
386
387
388
389
390
391
392
393
394
395static inline struct buffer_head *
396__bread(struct block_device *bdev, sector_t block, unsigned size)
397{
398 return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
399}
400
401extern int __set_page_dirty_buffers(struct page *page);
402
403#else
404
405static inline void buffer_init(void) {}
406static inline int try_to_free_buffers(struct page *page) { return 1; }
407static inline int inode_has_buffers(struct inode *inode) { return 0; }
408static inline void invalidate_inode_buffers(struct inode *inode) {}
409static inline int remove_inode_buffers(struct inode *inode) { return 1; }
410static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
411static inline void invalidate_bh_lrus_cpu(int cpu) {}
412static inline bool has_bh_in_lru(int cpu, void *dummy) { return 0; }
413#define buffer_heads_over_limit 0
414
415#endif
416#endif
417