1
2
3
4
5
6
7
8
9
10#include <linux/kernel.h>
11#include <linux/backing-dev.h>
12#include <linux/gfp.h>
13#include <linux/mm.h>
14#include <linux/swap.h>
15#include <linux/module.h>
16#include <linux/pagemap.h>
17#include <linux/highmem.h>
18#include <linux/pagevec.h>
19#include <linux/task_io_accounting_ops.h>
20#include <linux/buffer_head.h>
21
22#include "internal.h"
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39void do_invalidatepage(struct page *page, unsigned long offset)
40{
41 void (*invalidatepage)(struct page *, unsigned long);
42 invalidatepage = page->mapping->a_ops->invalidatepage;
43#ifdef CONFIG_BLOCK
44 if (!invalidatepage)
45 invalidatepage = block_invalidatepage;
46#endif
47 if (invalidatepage)
48 (*invalidatepage)(page, offset);
49}
50
51static inline void truncate_partial_page(struct page *page, unsigned partial)
52{
53 zero_user_segment(page, partial, PAGE_CACHE_SIZE);
54 if (page_has_private(page))
55 do_invalidatepage(page, partial);
56}
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72void cancel_dirty_page(struct page *page, unsigned int account_size)
73{
74 if (TestClearPageDirty(page)) {
75 struct address_space *mapping = page->mapping;
76 if (mapping && mapping_cap_account_dirty(mapping)) {
77 dec_zone_page_state(page, NR_FILE_DIRTY);
78 dec_bdi_stat(mapping->backing_dev_info,
79 BDI_RECLAIMABLE);
80 if (account_size)
81 task_io_account_cancelled_write(account_size);
82 }
83 }
84}
85EXPORT_SYMBOL(cancel_dirty_page);
86
87
88
89
90
91
92
93
94
95
96
97static int
98truncate_complete_page(struct address_space *mapping, struct page *page)
99{
100 if (page->mapping != mapping)
101 return -EIO;
102
103 if (page_has_private(page))
104 do_invalidatepage(page, 0);
105
106 cancel_dirty_page(page, PAGE_CACHE_SIZE);
107
108 clear_page_mlock(page);
109 remove_from_page_cache(page);
110 ClearPageMappedToDisk(page);
111 page_cache_release(page);
112 return 0;
113}
114
115
116
117
118
119
120
121
122
123static int
124invalidate_complete_page(struct address_space *mapping, struct page *page)
125{
126 int ret;
127
128 if (page->mapping != mapping)
129 return 0;
130
131 if (page_has_private(page) && !try_to_release_page(page, 0))
132 return 0;
133
134 clear_page_mlock(page);
135 ret = remove_mapping(mapping, page);
136
137 return ret;
138}
139
140int truncate_inode_page(struct address_space *mapping, struct page *page)
141{
142 if (page_mapped(page)) {
143 unmap_mapping_range(mapping,
144 (loff_t)page->index << PAGE_CACHE_SHIFT,
145 PAGE_CACHE_SIZE, 0);
146 }
147 return truncate_complete_page(mapping, page);
148}
149
150
151
152
153int generic_error_remove_page(struct address_space *mapping, struct page *page)
154{
155 if (!mapping)
156 return -EINVAL;
157
158
159
160
161 if (!S_ISREG(mapping->host->i_mode))
162 return -EIO;
163 return truncate_inode_page(mapping, page);
164}
165EXPORT_SYMBOL(generic_error_remove_page);
166
167
168
169
170
171
172
173int invalidate_inode_page(struct page *page)
174{
175 struct address_space *mapping = page_mapping(page);
176 if (!mapping)
177 return 0;
178 if (PageDirty(page) || PageWriteback(page))
179 return 0;
180 if (page_mapped(page))
181 return 0;
182 return invalidate_complete_page(mapping, page);
183}
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208void truncate_inode_pages_range(struct address_space *mapping,
209 loff_t lstart, loff_t lend)
210{
211 const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
212 pgoff_t end;
213 const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
214 struct pagevec pvec;
215 pgoff_t next;
216 int i;
217
218 if (mapping->nrpages == 0)
219 return;
220
221 BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
222 end = (lend >> PAGE_CACHE_SHIFT);
223
224 pagevec_init(&pvec, 0);
225 next = start;
226 while (next <= end &&
227 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
228 for (i = 0; i < pagevec_count(&pvec); i++) {
229 struct page *page = pvec.pages[i];
230 pgoff_t page_index = page->index;
231
232 if (page_index > end) {
233 next = page_index;
234 break;
235 }
236
237 if (page_index > next)
238 next = page_index;
239 next++;
240 if (!trylock_page(page))
241 continue;
242 if (PageWriteback(page)) {
243 unlock_page(page);
244 continue;
245 }
246 truncate_inode_page(mapping, page);
247 unlock_page(page);
248 }
249 pagevec_release(&pvec);
250 cond_resched();
251 }
252
253 if (partial) {
254 struct page *page = find_lock_page(mapping, start - 1);
255 if (page) {
256 wait_on_page_writeback(page);
257 truncate_partial_page(page, partial);
258 unlock_page(page);
259 page_cache_release(page);
260 }
261 }
262
263 next = start;
264 for ( ; ; ) {
265 cond_resched();
266 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
267 if (next == start)
268 break;
269 next = start;
270 continue;
271 }
272 if (pvec.pages[0]->index > end) {
273 pagevec_release(&pvec);
274 break;
275 }
276 mem_cgroup_uncharge_start();
277 for (i = 0; i < pagevec_count(&pvec); i++) {
278 struct page *page = pvec.pages[i];
279
280 if (page->index > end)
281 break;
282 lock_page(page);
283 wait_on_page_writeback(page);
284 truncate_inode_page(mapping, page);
285 if (page->index > next)
286 next = page->index;
287 next++;
288 unlock_page(page);
289 }
290 pagevec_release(&pvec);
291 mem_cgroup_uncharge_end();
292 }
293}
294EXPORT_SYMBOL(truncate_inode_pages_range);
295
296
297
298
299
300
301
302
303void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
304{
305 truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
306}
307EXPORT_SYMBOL(truncate_inode_pages);
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322unsigned long invalidate_mapping_pages(struct address_space *mapping,
323 pgoff_t start, pgoff_t end)
324{
325 struct pagevec pvec;
326 pgoff_t next = start;
327 unsigned long ret = 0;
328 int i;
329
330 pagevec_init(&pvec, 0);
331 while (next <= end &&
332 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
333 mem_cgroup_uncharge_start();
334 for (i = 0; i < pagevec_count(&pvec); i++) {
335 struct page *page = pvec.pages[i];
336 pgoff_t index;
337 int lock_failed;
338
339 lock_failed = !trylock_page(page);
340
341
342
343
344
345
346
347 index = page->index;
348 if (index > next)
349 next = index;
350 next++;
351 if (lock_failed)
352 continue;
353
354 ret += invalidate_inode_page(page);
355
356 unlock_page(page);
357 if (next > end)
358 break;
359 }
360 pagevec_release(&pvec);
361 mem_cgroup_uncharge_end();
362 cond_resched();
363 }
364 return ret;
365}
366EXPORT_SYMBOL(invalidate_mapping_pages);
367
368
369
370
371
372
373
374
375static int
376invalidate_complete_page2(struct address_space *mapping, struct page *page)
377{
378 if (page->mapping != mapping)
379 return 0;
380
381 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
382 return 0;
383
384 spin_lock_irq(&mapping->tree_lock);
385 if (PageDirty(page))
386 goto failed;
387
388 clear_page_mlock(page);
389 BUG_ON(page_has_private(page));
390 __remove_from_page_cache(page);
391 spin_unlock_irq(&mapping->tree_lock);
392 mem_cgroup_uncharge_cache_page(page);
393 page_cache_release(page);
394 return 1;
395failed:
396 spin_unlock_irq(&mapping->tree_lock);
397 return 0;
398}
399
400static int do_launder_page(struct address_space *mapping, struct page *page)
401{
402 if (!PageDirty(page))
403 return 0;
404 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
405 return 0;
406 return mapping->a_ops->launder_page(page);
407}
408
409
410
411
412
413
414
415
416
417
418
419
420int invalidate_inode_pages2_range(struct address_space *mapping,
421 pgoff_t start, pgoff_t end)
422{
423 struct pagevec pvec;
424 pgoff_t next;
425 int i;
426 int ret = 0;
427 int ret2 = 0;
428 int did_range_unmap = 0;
429 int wrapped = 0;
430
431 pagevec_init(&pvec, 0);
432 next = start;
433 while (next <= end && !wrapped &&
434 pagevec_lookup(&pvec, mapping, next,
435 min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
436 mem_cgroup_uncharge_start();
437 for (i = 0; i < pagevec_count(&pvec); i++) {
438 struct page *page = pvec.pages[i];
439 pgoff_t page_index;
440
441 lock_page(page);
442 if (page->mapping != mapping) {
443 unlock_page(page);
444 continue;
445 }
446 page_index = page->index;
447 next = page_index + 1;
448 if (next == 0)
449 wrapped = 1;
450 if (page_index > end) {
451 unlock_page(page);
452 break;
453 }
454 wait_on_page_writeback(page);
455 if (page_mapped(page)) {
456 if (!did_range_unmap) {
457
458
459
460 unmap_mapping_range(mapping,
461 (loff_t)page_index<<PAGE_CACHE_SHIFT,
462 (loff_t)(end - page_index + 1)
463 << PAGE_CACHE_SHIFT,
464 0);
465 did_range_unmap = 1;
466 } else {
467
468
469
470 unmap_mapping_range(mapping,
471 (loff_t)page_index<<PAGE_CACHE_SHIFT,
472 PAGE_CACHE_SIZE, 0);
473 }
474 }
475 BUG_ON(page_mapped(page));
476 ret2 = do_launder_page(mapping, page);
477 if (ret2 == 0) {
478 if (!invalidate_complete_page2(mapping, page))
479 ret2 = -EBUSY;
480 }
481 if (ret2 < 0)
482 ret = ret2;
483 unlock_page(page);
484 }
485 pagevec_release(&pvec);
486 mem_cgroup_uncharge_end();
487 cond_resched();
488 }
489 return ret;
490}
491EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
492
493
494
495
496
497
498
499
500
501
502int invalidate_inode_pages2(struct address_space *mapping)
503{
504 return invalidate_inode_pages2_range(mapping, 0, -1);
505}
506EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524void truncate_pagecache(struct inode *inode, loff_t old, loff_t new)
525{
526 struct address_space *mapping = inode->i_mapping;
527
528
529
530
531
532
533
534
535
536
537 unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
538 truncate_inode_pages(mapping, new);
539 unmap_mapping_range(mapping, new + PAGE_SIZE - 1, 0, 1);
540}
541EXPORT_SYMBOL(truncate_pagecache);
542
543
544
545
546
547
548
549
550
551
552
553
554
555int vmtruncate(struct inode *inode, loff_t offset)
556{
557 int error;
558
559 error = simple_setsize(inode, offset);
560 if (error)
561 return error;
562
563 if (inode->i_op->truncate)
564 inode->i_op->truncate(inode);
565
566 return error;
567}
568EXPORT_SYMBOL(vmtruncate);
569