1
2
3
4
5
6
7
8
9
10#include <linux/kernel.h>
11#include <linux/backing-dev.h>
12#include <linux/mm.h>
13#include <linux/swap.h>
14#include <linux/module.h>
15#include <linux/pagemap.h>
16#include <linux/highmem.h>
17#include <linux/pagevec.h>
18#include <linux/task_io_accounting_ops.h>
19#include <linux/buffer_head.h>
20
21#include "internal.h"
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38void do_invalidatepage(struct page *page, unsigned long offset)
39{
40 void (*invalidatepage)(struct page *, unsigned long);
41 invalidatepage = page->mapping->a_ops->invalidatepage;
42#ifdef CONFIG_BLOCK
43 if (!invalidatepage)
44 invalidatepage = block_invalidatepage;
45#endif
46 if (invalidatepage)
47 (*invalidatepage)(page, offset);
48}
49
50static inline void truncate_partial_page(struct page *page, unsigned partial)
51{
52 zero_user_segment(page, partial, PAGE_CACHE_SIZE);
53 if (page_has_private(page))
54 do_invalidatepage(page, partial);
55}
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71void cancel_dirty_page(struct page *page, unsigned int account_size)
72{
73 if (TestClearPageDirty(page)) {
74 struct address_space *mapping = page->mapping;
75 if (mapping && mapping_cap_account_dirty(mapping)) {
76 dec_zone_page_state(page, NR_FILE_DIRTY);
77 dec_bdi_stat(mapping->backing_dev_info,
78 BDI_RECLAIMABLE);
79 if (account_size)
80 task_io_account_cancelled_write(account_size);
81 }
82 }
83}
84EXPORT_SYMBOL(cancel_dirty_page);
85
86
87
88
89
90
91
92
93
94
95
96static void
97truncate_complete_page(struct address_space *mapping, struct page *page)
98{
99 if (page->mapping != mapping)
100 return;
101
102 if (page_has_private(page))
103 do_invalidatepage(page, 0);
104
105 cancel_dirty_page(page, PAGE_CACHE_SIZE);
106
107 clear_page_mlock(page);
108 remove_from_page_cache(page);
109 ClearPageMappedToDisk(page);
110 page_cache_release(page);
111}
112
113
114
115
116
117
118
119
120
121static int
122invalidate_complete_page(struct address_space *mapping, struct page *page)
123{
124 int ret;
125
126 if (page->mapping != mapping)
127 return 0;
128
129 if (page_has_private(page) && !try_to_release_page(page, 0))
130 return 0;
131
132 clear_page_mlock(page);
133 ret = remove_mapping(mapping, page);
134
135 return ret;
136}
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161void truncate_inode_pages_range(struct address_space *mapping,
162 loff_t lstart, loff_t lend)
163{
164 const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
165 pgoff_t end;
166 const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
167 struct pagevec pvec;
168 pgoff_t next;
169 int i;
170
171 if (mapping->nrpages == 0)
172 return;
173
174 BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
175 end = (lend >> PAGE_CACHE_SHIFT);
176
177 pagevec_init(&pvec, 0);
178 next = start;
179 while (next <= end &&
180 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
181 for (i = 0; i < pagevec_count(&pvec); i++) {
182 struct page *page = pvec.pages[i];
183 pgoff_t page_index = page->index;
184
185 if (page_index > end) {
186 next = page_index;
187 break;
188 }
189
190 if (page_index > next)
191 next = page_index;
192 next++;
193 if (!trylock_page(page))
194 continue;
195 if (PageWriteback(page)) {
196 unlock_page(page);
197 continue;
198 }
199 if (page_mapped(page)) {
200 unmap_mapping_range(mapping,
201 (loff_t)page_index<<PAGE_CACHE_SHIFT,
202 PAGE_CACHE_SIZE, 0);
203 }
204 truncate_complete_page(mapping, page);
205 unlock_page(page);
206 }
207 pagevec_release(&pvec);
208 cond_resched();
209 }
210
211 if (partial) {
212 struct page *page = find_lock_page(mapping, start - 1);
213 if (page) {
214 wait_on_page_writeback(page);
215 truncate_partial_page(page, partial);
216 unlock_page(page);
217 page_cache_release(page);
218 }
219 }
220
221 next = start;
222 for ( ; ; ) {
223 cond_resched();
224 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
225 if (next == start)
226 break;
227 next = start;
228 continue;
229 }
230 if (pvec.pages[0]->index > end) {
231 pagevec_release(&pvec);
232 break;
233 }
234 for (i = 0; i < pagevec_count(&pvec); i++) {
235 struct page *page = pvec.pages[i];
236
237 if (page->index > end)
238 break;
239 lock_page(page);
240 wait_on_page_writeback(page);
241 if (page_mapped(page)) {
242 unmap_mapping_range(mapping,
243 (loff_t)page->index<<PAGE_CACHE_SHIFT,
244 PAGE_CACHE_SIZE, 0);
245 }
246 if (page->index > next)
247 next = page->index;
248 next++;
249 truncate_complete_page(mapping, page);
250 unlock_page(page);
251 }
252 pagevec_release(&pvec);
253 }
254}
255EXPORT_SYMBOL(truncate_inode_pages_range);
256
257
258
259
260
261
262
263
264void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
265{
266 truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
267}
268EXPORT_SYMBOL(truncate_inode_pages);
269
270unsigned long __invalidate_mapping_pages(struct address_space *mapping,
271 pgoff_t start, pgoff_t end, bool be_atomic)
272{
273 struct pagevec pvec;
274 pgoff_t next = start;
275 unsigned long ret = 0;
276 int i;
277
278 pagevec_init(&pvec, 0);
279 while (next <= end &&
280 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
281 for (i = 0; i < pagevec_count(&pvec); i++) {
282 struct page *page = pvec.pages[i];
283 pgoff_t index;
284 int lock_failed;
285
286 lock_failed = !trylock_page(page);
287
288
289
290
291
292
293
294 index = page->index;
295 if (index > next)
296 next = index;
297 next++;
298 if (lock_failed)
299 continue;
300
301 if (PageDirty(page) || PageWriteback(page))
302 goto unlock;
303 if (page_mapped(page))
304 goto unlock;
305 ret += invalidate_complete_page(mapping, page);
306unlock:
307 unlock_page(page);
308 if (next > end)
309 break;
310 }
311 pagevec_release(&pvec);
312 if (likely(!be_atomic))
313 cond_resched();
314 }
315 return ret;
316}
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331unsigned long invalidate_mapping_pages(struct address_space *mapping,
332 pgoff_t start, pgoff_t end)
333{
334 return __invalidate_mapping_pages(mapping, start, end, false);
335}
336EXPORT_SYMBOL(invalidate_mapping_pages);
337
338
339
340
341
342
343
344
345static int
346invalidate_complete_page2(struct address_space *mapping, struct page *page)
347{
348 if (page->mapping != mapping)
349 return 0;
350
351 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
352 return 0;
353
354 spin_lock_irq(&mapping->tree_lock);
355 if (PageDirty(page))
356 goto failed;
357
358 clear_page_mlock(page);
359 BUG_ON(page_has_private(page));
360 __remove_from_page_cache(page);
361 spin_unlock_irq(&mapping->tree_lock);
362 mem_cgroup_uncharge_cache_page(page);
363 page_cache_release(page);
364 return 1;
365failed:
366 spin_unlock_irq(&mapping->tree_lock);
367 return 0;
368}
369
370static int do_launder_page(struct address_space *mapping, struct page *page)
371{
372 if (!PageDirty(page))
373 return 0;
374 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
375 return 0;
376 return mapping->a_ops->launder_page(page);
377}
378
379
380
381
382
383
384
385
386
387
388
389
390int invalidate_inode_pages2_range(struct address_space *mapping,
391 pgoff_t start, pgoff_t end)
392{
393 struct pagevec pvec;
394 pgoff_t next;
395 int i;
396 int ret = 0;
397 int ret2 = 0;
398 int did_range_unmap = 0;
399 int wrapped = 0;
400
401 pagevec_init(&pvec, 0);
402 next = start;
403 while (next <= end && !wrapped &&
404 pagevec_lookup(&pvec, mapping, next,
405 min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
406 for (i = 0; i < pagevec_count(&pvec); i++) {
407 struct page *page = pvec.pages[i];
408 pgoff_t page_index;
409
410 lock_page(page);
411 if (page->mapping != mapping) {
412 unlock_page(page);
413 continue;
414 }
415 page_index = page->index;
416 next = page_index + 1;
417 if (next == 0)
418 wrapped = 1;
419 if (page_index > end) {
420 unlock_page(page);
421 break;
422 }
423 wait_on_page_writeback(page);
424 if (page_mapped(page)) {
425 if (!did_range_unmap) {
426
427
428
429 unmap_mapping_range(mapping,
430 (loff_t)page_index<<PAGE_CACHE_SHIFT,
431 (loff_t)(end - page_index + 1)
432 << PAGE_CACHE_SHIFT,
433 0);
434 did_range_unmap = 1;
435 } else {
436
437
438
439 unmap_mapping_range(mapping,
440 (loff_t)page_index<<PAGE_CACHE_SHIFT,
441 PAGE_CACHE_SIZE, 0);
442 }
443 }
444 BUG_ON(page_mapped(page));
445 ret2 = do_launder_page(mapping, page);
446 if (ret2 == 0) {
447 if (!invalidate_complete_page2(mapping, page))
448 ret2 = -EBUSY;
449 }
450 if (ret2 < 0)
451 ret = ret2;
452 unlock_page(page);
453 }
454 pagevec_release(&pvec);
455 cond_resched();
456 }
457 return ret;
458}
459EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
460
461
462
463
464
465
466
467
468
469
470int invalidate_inode_pages2(struct address_space *mapping)
471{
472 return invalidate_inode_pages2_range(mapping, 0, -1);
473}
474EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
475