1
2
3
4
5
6
7
8
9
10#include <linux/kernel.h>
11#include <linux/backing-dev.h>
12#include <linux/mm.h>
13#include <linux/swap.h>
14#include <linux/module.h>
15#include <linux/pagemap.h>
16#include <linux/highmem.h>
17#include <linux/pagevec.h>
18#include <linux/task_io_accounting_ops.h>
19#include <linux/buffer_head.h>
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37void do_invalidatepage(struct page *page, unsigned long offset)
38{
39 void (*invalidatepage)(struct page *, unsigned long);
40 invalidatepage = page->mapping->a_ops->invalidatepage;
41#ifdef CONFIG_BLOCK
42 if (!invalidatepage)
43 invalidatepage = block_invalidatepage;
44#endif
45 if (invalidatepage)
46 (*invalidatepage)(page, offset);
47}
48
49static inline void truncate_partial_page(struct page *page, unsigned partial)
50{
51 zero_user_segment(page, partial, PAGE_CACHE_SIZE);
52 if (PagePrivate(page))
53 do_invalidatepage(page, partial);
54}
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70void cancel_dirty_page(struct page *page, unsigned int account_size)
71{
72 if (TestClearPageDirty(page)) {
73 struct address_space *mapping = page->mapping;
74 if (mapping && mapping_cap_account_dirty(mapping)) {
75 dec_zone_page_state(page, NR_FILE_DIRTY);
76 dec_bdi_stat(mapping->backing_dev_info,
77 BDI_RECLAIMABLE);
78 if (account_size)
79 task_io_account_cancelled_write(account_size);
80 }
81 }
82}
83EXPORT_SYMBOL(cancel_dirty_page);
84
85
86
87
88
89
90
91
92
93
94
95static void
96truncate_complete_page(struct address_space *mapping, struct page *page)
97{
98 if (page->mapping != mapping)
99 return;
100
101 if (PagePrivate(page))
102 do_invalidatepage(page, 0);
103
104 cancel_dirty_page(page, PAGE_CACHE_SIZE);
105
106 remove_from_page_cache(page);
107 ClearPageMappedToDisk(page);
108 page_cache_release(page);
109}
110
111
112
113
114
115
116
117
118
119static int
120invalidate_complete_page(struct address_space *mapping, struct page *page)
121{
122 int ret;
123
124 if (page->mapping != mapping)
125 return 0;
126
127 if (PagePrivate(page) && !try_to_release_page(page, 0))
128 return 0;
129
130 ret = remove_mapping(mapping, page);
131
132 return ret;
133}
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158void truncate_inode_pages_range(struct address_space *mapping,
159 loff_t lstart, loff_t lend)
160{
161 const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
162 pgoff_t end;
163 const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
164 struct pagevec pvec;
165 pgoff_t next;
166 int i;
167
168 if (mapping->nrpages == 0)
169 return;
170
171 BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
172 end = (lend >> PAGE_CACHE_SHIFT);
173
174 pagevec_init(&pvec, 0);
175 next = start;
176 while (next <= end &&
177 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
178 for (i = 0; i < pagevec_count(&pvec); i++) {
179 struct page *page = pvec.pages[i];
180 pgoff_t page_index = page->index;
181
182 if (page_index > end) {
183 next = page_index;
184 break;
185 }
186
187 if (page_index > next)
188 next = page_index;
189 next++;
190 if (!trylock_page(page))
191 continue;
192 if (PageWriteback(page)) {
193 unlock_page(page);
194 continue;
195 }
196 if (page_mapped(page)) {
197 unmap_mapping_range(mapping,
198 (loff_t)page_index<<PAGE_CACHE_SHIFT,
199 PAGE_CACHE_SIZE, 0);
200 }
201 truncate_complete_page(mapping, page);
202 unlock_page(page);
203 }
204 pagevec_release(&pvec);
205 cond_resched();
206 }
207
208 if (partial) {
209 struct page *page = find_lock_page(mapping, start - 1);
210 if (page) {
211 wait_on_page_writeback(page);
212 truncate_partial_page(page, partial);
213 unlock_page(page);
214 page_cache_release(page);
215 }
216 }
217
218 next = start;
219 for ( ; ; ) {
220 cond_resched();
221 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
222 if (next == start)
223 break;
224 next = start;
225 continue;
226 }
227 if (pvec.pages[0]->index > end) {
228 pagevec_release(&pvec);
229 break;
230 }
231 for (i = 0; i < pagevec_count(&pvec); i++) {
232 struct page *page = pvec.pages[i];
233
234 if (page->index > end)
235 break;
236 lock_page(page);
237 wait_on_page_writeback(page);
238 if (page_mapped(page)) {
239 unmap_mapping_range(mapping,
240 (loff_t)page->index<<PAGE_CACHE_SHIFT,
241 PAGE_CACHE_SIZE, 0);
242 }
243 if (page->index > next)
244 next = page->index;
245 next++;
246 truncate_complete_page(mapping, page);
247 unlock_page(page);
248 }
249 pagevec_release(&pvec);
250 }
251}
252EXPORT_SYMBOL(truncate_inode_pages_range);
253
254
255
256
257
258
259
260
261void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
262{
263 truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
264}
265EXPORT_SYMBOL(truncate_inode_pages);
266
267unsigned long __invalidate_mapping_pages(struct address_space *mapping,
268 pgoff_t start, pgoff_t end, bool be_atomic)
269{
270 struct pagevec pvec;
271 pgoff_t next = start;
272 unsigned long ret = 0;
273 int i;
274
275 pagevec_init(&pvec, 0);
276 while (next <= end &&
277 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
278 for (i = 0; i < pagevec_count(&pvec); i++) {
279 struct page *page = pvec.pages[i];
280 pgoff_t index;
281 int lock_failed;
282
283 lock_failed = !trylock_page(page);
284
285
286
287
288
289
290
291 index = page->index;
292 if (index > next)
293 next = index;
294 next++;
295 if (lock_failed)
296 continue;
297
298 if (PageDirty(page) || PageWriteback(page))
299 goto unlock;
300 if (page_mapped(page))
301 goto unlock;
302 ret += invalidate_complete_page(mapping, page);
303unlock:
304 unlock_page(page);
305 if (next > end)
306 break;
307 }
308 pagevec_release(&pvec);
309 if (likely(!be_atomic))
310 cond_resched();
311 }
312 return ret;
313}
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328unsigned long invalidate_mapping_pages(struct address_space *mapping,
329 pgoff_t start, pgoff_t end)
330{
331 return __invalidate_mapping_pages(mapping, start, end, false);
332}
333EXPORT_SYMBOL(invalidate_mapping_pages);
334
335
336
337
338
339
340
341
342static int
343invalidate_complete_page2(struct address_space *mapping, struct page *page)
344{
345 if (page->mapping != mapping)
346 return 0;
347
348 if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL))
349 return 0;
350
351 spin_lock_irq(&mapping->tree_lock);
352 if (PageDirty(page))
353 goto failed;
354
355 BUG_ON(PagePrivate(page));
356 __remove_from_page_cache(page);
357 spin_unlock_irq(&mapping->tree_lock);
358 page_cache_release(page);
359 return 1;
360failed:
361 spin_unlock_irq(&mapping->tree_lock);
362 return 0;
363}
364
365static int do_launder_page(struct address_space *mapping, struct page *page)
366{
367 if (!PageDirty(page))
368 return 0;
369 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
370 return 0;
371 return mapping->a_ops->launder_page(page);
372}
373
374
375
376
377
378
379
380
381
382
383
384
385int invalidate_inode_pages2_range(struct address_space *mapping,
386 pgoff_t start, pgoff_t end)
387{
388 struct pagevec pvec;
389 pgoff_t next;
390 int i;
391 int ret = 0;
392 int ret2 = 0;
393 int did_range_unmap = 0;
394 int wrapped = 0;
395
396 pagevec_init(&pvec, 0);
397 next = start;
398 while (next <= end && !wrapped &&
399 pagevec_lookup(&pvec, mapping, next,
400 min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
401 for (i = 0; i < pagevec_count(&pvec); i++) {
402 struct page *page = pvec.pages[i];
403 pgoff_t page_index;
404
405 lock_page(page);
406 if (page->mapping != mapping) {
407 unlock_page(page);
408 continue;
409 }
410 page_index = page->index;
411 next = page_index + 1;
412 if (next == 0)
413 wrapped = 1;
414 if (page_index > end) {
415 unlock_page(page);
416 break;
417 }
418 wait_on_page_writeback(page);
419 if (page_mapped(page)) {
420 if (!did_range_unmap) {
421
422
423
424 unmap_mapping_range(mapping,
425 (loff_t)page_index<<PAGE_CACHE_SHIFT,
426 (loff_t)(end - page_index + 1)
427 << PAGE_CACHE_SHIFT,
428 0);
429 did_range_unmap = 1;
430 } else {
431
432
433
434 unmap_mapping_range(mapping,
435 (loff_t)page_index<<PAGE_CACHE_SHIFT,
436 PAGE_CACHE_SIZE, 0);
437 }
438 }
439 BUG_ON(page_mapped(page));
440 ret2 = do_launder_page(mapping, page);
441 if (ret2 == 0) {
442 if (!invalidate_complete_page2(mapping, page))
443 ret2 = -EBUSY;
444 }
445 if (ret2 < 0)
446 ret = ret2;
447 unlock_page(page);
448 }
449 pagevec_release(&pvec);
450 cond_resched();
451 }
452 return ret;
453}
454EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
455
456
457
458
459
460
461
462
463
464
465int invalidate_inode_pages2(struct address_space *mapping)
466{
467 return invalidate_inode_pages2_range(mapping, 0, -1);
468}
469EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
470