1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56#include <linux/slab.h>
57#include <linux/pagemap.h>
58#include <linux/list_sort.h>
59#include "ubifs.h"
60
61
62
63
64
65
66#define SOFT_LEBS_LIMIT 4
67#define HARD_LEBS_LIMIT 32
68
69
70
71
72
73
74
75
76
77
78
79
80
81static int switch_gc_head(struct ubifs_info *c)
82{
83 int err, gc_lnum = c->gc_lnum;
84 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
85
86 ubifs_assert(gc_lnum != -1);
87 dbg_gc("switch GC head from LEB %d:%d to LEB %d (waste %d bytes)",
88 wbuf->lnum, wbuf->offs + wbuf->used, gc_lnum,
89 c->leb_size - wbuf->offs - wbuf->used);
90
91 err = ubifs_wbuf_sync_nolock(wbuf);
92 if (err)
93 return err;
94
95
96
97
98
99 err = ubifs_leb_unmap(c, gc_lnum);
100 if (err)
101 return err;
102
103 err = ubifs_add_bud_to_log(c, GCHD, gc_lnum, 0);
104 if (err)
105 return err;
106
107 c->gc_lnum = -1;
108 err = ubifs_wbuf_seek_nolock(wbuf, gc_lnum, 0, UBI_LONGTERM);
109 return err;
110}
111
112
113
114
115
116
117
118
119
120
121int data_nodes_cmp(void *priv, struct list_head *a, struct list_head *b)
122{
123 ino_t inuma, inumb;
124 struct ubifs_info *c = priv;
125 struct ubifs_scan_node *sa, *sb;
126
127 cond_resched();
128 if (a == b)
129 return 0;
130
131 sa = list_entry(a, struct ubifs_scan_node, list);
132 sb = list_entry(b, struct ubifs_scan_node, list);
133
134 ubifs_assert(key_type(c, &sa->key) == UBIFS_DATA_KEY);
135 ubifs_assert(key_type(c, &sb->key) == UBIFS_DATA_KEY);
136 ubifs_assert(sa->type == UBIFS_DATA_NODE);
137 ubifs_assert(sb->type == UBIFS_DATA_NODE);
138
139 inuma = key_inum(c, &sa->key);
140 inumb = key_inum(c, &sb->key);
141
142 if (inuma == inumb) {
143 unsigned int blka = key_block(c, &sa->key);
144 unsigned int blkb = key_block(c, &sb->key);
145
146 if (blka <= blkb)
147 return -1;
148 } else if (inuma <= inumb)
149 return -1;
150
151 return 1;
152}
153
154
155
156
157
158
159
160
161
162
163
164int nondata_nodes_cmp(void *priv, struct list_head *a, struct list_head *b)
165{
166 ino_t inuma, inumb;
167 struct ubifs_info *c = priv;
168 struct ubifs_scan_node *sa, *sb;
169
170 cond_resched();
171 if (a == b)
172 return 0;
173
174 sa = list_entry(a, struct ubifs_scan_node, list);
175 sb = list_entry(b, struct ubifs_scan_node, list);
176
177 ubifs_assert(key_type(c, &sa->key) != UBIFS_DATA_KEY &&
178 key_type(c, &sb->key) != UBIFS_DATA_KEY);
179 ubifs_assert(sa->type != UBIFS_DATA_NODE &&
180 sb->type != UBIFS_DATA_NODE);
181
182
183 if (sa->type == UBIFS_INO_NODE) {
184 if (sb->type == UBIFS_INO_NODE)
185 return sb->len - sa->len;
186 return -1;
187 }
188 if (sb->type == UBIFS_INO_NODE)
189 return 1;
190
191 ubifs_assert(key_type(c, &sa->key) == UBIFS_DENT_KEY ||
192 key_type(c, &sa->key) == UBIFS_XENT_KEY);
193 ubifs_assert(key_type(c, &sb->key) == UBIFS_DENT_KEY ||
194 key_type(c, &sb->key) == UBIFS_XENT_KEY);
195 ubifs_assert(sa->type == UBIFS_DENT_NODE ||
196 sa->type == UBIFS_XENT_NODE);
197 ubifs_assert(sb->type == UBIFS_DENT_NODE ||
198 sb->type == UBIFS_XENT_NODE);
199
200 inuma = key_inum(c, &sa->key);
201 inumb = key_inum(c, &sb->key);
202
203 if (inuma == inumb) {
204 uint32_t hasha = key_hash(c, &sa->key);
205 uint32_t hashb = key_hash(c, &sb->key);
206
207 if (hasha <= hashb)
208 return -1;
209 } else if (inuma <= inumb)
210 return -1;
211
212 return 1;
213}
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242static int sort_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
243 struct list_head *nondata, int *min)
244{
245 int err;
246 struct ubifs_scan_node *snod, *tmp;
247
248 *min = INT_MAX;
249
250
251 list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) {
252 ubifs_assert(snod->type == UBIFS_INO_NODE ||
253 snod->type == UBIFS_DATA_NODE ||
254 snod->type == UBIFS_DENT_NODE ||
255 snod->type == UBIFS_XENT_NODE ||
256 snod->type == UBIFS_TRUN_NODE);
257
258 if (snod->type != UBIFS_INO_NODE &&
259 snod->type != UBIFS_DATA_NODE &&
260 snod->type != UBIFS_DENT_NODE &&
261 snod->type != UBIFS_XENT_NODE) {
262
263 list_del(&snod->list);
264 kfree(snod);
265 continue;
266 }
267
268 ubifs_assert(key_type(c, &snod->key) == UBIFS_DATA_KEY ||
269 key_type(c, &snod->key) == UBIFS_INO_KEY ||
270 key_type(c, &snod->key) == UBIFS_DENT_KEY ||
271 key_type(c, &snod->key) == UBIFS_XENT_KEY);
272
273 err = ubifs_tnc_has_node(c, &snod->key, 0, sleb->lnum,
274 snod->offs, 0);
275 if (err < 0)
276 return err;
277
278 if (!err) {
279
280 list_del(&snod->list);
281 kfree(snod);
282 continue;
283 }
284
285 if (snod->len < *min)
286 *min = snod->len;
287
288 if (key_type(c, &snod->key) != UBIFS_DATA_KEY)
289 list_move_tail(&snod->list, nondata);
290 }
291
292
293 list_sort(c, &sleb->nodes, &data_nodes_cmp);
294 list_sort(c, nondata, &nondata_nodes_cmp);
295
296 err = dbg_check_data_nodes_order(c, &sleb->nodes);
297 if (err)
298 return err;
299 err = dbg_check_nondata_nodes_order(c, nondata);
300 if (err)
301 return err;
302 return 0;
303}
304
305
306
307
308
309
310
311
312
313
314
315
316static int move_node(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
317 struct ubifs_scan_node *snod, struct ubifs_wbuf *wbuf)
318{
319 int err, new_lnum = wbuf->lnum, new_offs = wbuf->offs + wbuf->used;
320
321 cond_resched();
322 err = ubifs_wbuf_write_nolock(wbuf, snod->node, snod->len);
323 if (err)
324 return err;
325
326 err = ubifs_tnc_replace(c, &snod->key, sleb->lnum,
327 snod->offs, new_lnum, new_offs,
328 snod->len);
329 list_del(&snod->list);
330 kfree(snod);
331 return err;
332}
333
334
335
336
337
338
339
340
341
342
343
344static int move_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb)
345{
346 int err, min;
347 LIST_HEAD(nondata);
348 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
349
350 if (wbuf->lnum == -1) {
351
352
353
354
355 err = switch_gc_head(c);
356 if (err)
357 return err;
358 }
359
360 err = sort_nodes(c, sleb, &nondata, &min);
361 if (err)
362 goto out;
363
364
365 while (1) {
366 int avail;
367 struct ubifs_scan_node *snod, *tmp;
368
369
370 list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) {
371 avail = c->leb_size - wbuf->offs - wbuf->used;
372 if (snod->len > avail)
373
374
375
376
377 break;
378
379 err = move_node(c, sleb, snod, wbuf);
380 if (err)
381 goto out;
382 }
383
384
385 list_for_each_entry_safe(snod, tmp, &nondata, list) {
386 avail = c->leb_size - wbuf->offs - wbuf->used;
387 if (avail < min)
388 break;
389
390 if (snod->len > avail) {
391
392
393
394
395
396
397
398 if (key_type(c, &snod->key) == UBIFS_DENT_KEY ||
399 snod->len == UBIFS_INO_NODE_SZ)
400 break;
401 continue;
402 }
403
404 err = move_node(c, sleb, snod, wbuf);
405 if (err)
406 goto out;
407 }
408
409 if (list_empty(&sleb->nodes) && list_empty(&nondata))
410 break;
411
412
413
414
415
416 err = switch_gc_head(c);
417 if (err)
418 goto out;
419 }
420
421 return 0;
422
423out:
424 list_splice_tail(&nondata, &sleb->nodes);
425 return err;
426}
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441static int gc_sync_wbufs(struct ubifs_info *c)
442{
443 int err, i;
444
445 for (i = 0; i < c->jhead_cnt; i++) {
446 if (i == GCHD)
447 continue;
448 err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
449 if (err)
450 return err;
451 }
452 return 0;
453}
454
455
456
457
458
459
460
461
462
463
464int ubifs_garbage_collect_leb(struct ubifs_info *c, struct ubifs_lprops *lp)
465{
466 struct ubifs_scan_leb *sleb;
467 struct ubifs_scan_node *snod;
468 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
469 int err = 0, lnum = lp->lnum;
470
471 ubifs_assert(c->gc_lnum != -1 || wbuf->offs + wbuf->used == 0 ||
472 c->need_recovery);
473 ubifs_assert(c->gc_lnum != lnum);
474 ubifs_assert(wbuf->lnum != lnum);
475
476
477
478
479
480 sleb = ubifs_scan(c, lnum, 0, c->sbuf, 0);
481 if (IS_ERR(sleb))
482 return PTR_ERR(sleb);
483
484 ubifs_assert(!list_empty(&sleb->nodes));
485 snod = list_entry(sleb->nodes.next, struct ubifs_scan_node, list);
486
487 if (snod->type == UBIFS_IDX_NODE) {
488 struct ubifs_gced_idx_leb *idx_gc;
489
490 dbg_gc("indexing LEB %d (free %d, dirty %d)",
491 lnum, lp->free, lp->dirty);
492 list_for_each_entry(snod, &sleb->nodes, list) {
493 struct ubifs_idx_node *idx = snod->node;
494 int level = le16_to_cpu(idx->level);
495
496 ubifs_assert(snod->type == UBIFS_IDX_NODE);
497 key_read(c, ubifs_idx_key(c, idx), &snod->key);
498 err = ubifs_dirty_idx_node(c, &snod->key, level, lnum,
499 snod->offs);
500 if (err)
501 goto out;
502 }
503
504 idx_gc = kmalloc(sizeof(struct ubifs_gced_idx_leb), GFP_NOFS);
505 if (!idx_gc) {
506 err = -ENOMEM;
507 goto out;
508 }
509
510 idx_gc->lnum = lnum;
511 idx_gc->unmap = 0;
512 list_add(&idx_gc->list, &c->idx_gc);
513
514
515
516
517
518
519
520 err = ubifs_change_one_lp(c, lnum, c->leb_size, 0, 0,
521 LPROPS_INDEX, 1);
522 if (err)
523 goto out;
524 err = LEB_FREED_IDX;
525 } else {
526 dbg_gc("data LEB %d (free %d, dirty %d)",
527 lnum, lp->free, lp->dirty);
528
529 err = move_nodes(c, sleb);
530 if (err)
531 goto out_inc_seq;
532
533 err = gc_sync_wbufs(c);
534 if (err)
535 goto out_inc_seq;
536
537 err = ubifs_change_one_lp(c, lnum, c->leb_size, 0, 0, 0, 0);
538 if (err)
539 goto out_inc_seq;
540
541
542 c->gced_lnum = lnum;
543 smp_wmb();
544 c->gc_seq += 1;
545 smp_wmb();
546
547 if (c->gc_lnum == -1) {
548 c->gc_lnum = lnum;
549 err = LEB_RETAINED;
550 } else {
551 err = ubifs_wbuf_sync_nolock(wbuf);
552 if (err)
553 goto out;
554
555 err = ubifs_leb_unmap(c, lnum);
556 if (err)
557 goto out;
558
559 err = LEB_FREED;
560 }
561 }
562
563out:
564 ubifs_scan_destroy(sleb);
565 return err;
566
567out_inc_seq:
568
569 c->gced_lnum = lnum;
570 smp_wmb();
571 c->gc_seq += 1;
572 smp_wmb();
573 goto out;
574}
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612int ubifs_garbage_collect(struct ubifs_info *c, int anyway)
613{
614 int i, err, ret, min_space = c->dead_wm;
615 struct ubifs_lprops lp;
616 struct ubifs_wbuf *wbuf = &c->jheads[GCHD].wbuf;
617
618 ubifs_assert_cmt_locked(c);
619 ubifs_assert(!c->ro_media && !c->ro_mount);
620
621 if (ubifs_gc_should_commit(c))
622 return -EAGAIN;
623
624 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
625
626 if (c->ro_error) {
627 ret = -EROFS;
628 goto out_unlock;
629 }
630
631
632 ubifs_assert(!wbuf->used);
633
634 for (i = 0; ; i++) {
635 int space_before = c->leb_size - wbuf->offs - wbuf->used;
636 int space_after;
637
638 cond_resched();
639
640
641 if (ubifs_gc_should_commit(c)) {
642 ret = -EAGAIN;
643 break;
644 }
645
646 if (i > SOFT_LEBS_LIMIT && !list_empty(&c->idx_gc)) {
647
648
649
650
651 dbg_gc("soft limit, some index LEBs GC'ed, -EAGAIN");
652 ubifs_commit_required(c);
653 ret = -EAGAIN;
654 break;
655 }
656
657 if (i > HARD_LEBS_LIMIT) {
658
659
660
661
662 dbg_gc("hard limit, -ENOSPC");
663 ret = -ENOSPC;
664 break;
665 }
666
667
668
669
670
671
672
673
674 ret = ubifs_find_dirty_leb(c, &lp, min_space, anyway ? 0 : 1);
675 if (ret) {
676 if (ret == -ENOSPC)
677 dbg_gc("no more dirty LEBs");
678 break;
679 }
680
681 dbg_gc("found LEB %d: free %d, dirty %d, sum %d "
682 "(min. space %d)", lp.lnum, lp.free, lp.dirty,
683 lp.free + lp.dirty, min_space);
684
685 if (lp.free + lp.dirty == c->leb_size) {
686
687 dbg_gc("LEB %d is free, return it", lp.lnum);
688
689
690
691
692 ubifs_assert(!(lp.flags & LPROPS_INDEX));
693 if (lp.free != c->leb_size) {
694
695
696
697
698
699
700 ret = gc_sync_wbufs(c);
701 if (ret)
702 goto out;
703 ret = ubifs_change_one_lp(c, lp.lnum,
704 c->leb_size, 0, 0, 0,
705 0);
706 if (ret)
707 goto out;
708 }
709 ret = ubifs_leb_unmap(c, lp.lnum);
710 if (ret)
711 goto out;
712 ret = lp.lnum;
713 break;
714 }
715
716 space_before = c->leb_size - wbuf->offs - wbuf->used;
717 if (wbuf->lnum == -1)
718 space_before = 0;
719
720 ret = ubifs_garbage_collect_leb(c, &lp);
721 if (ret < 0) {
722 if (ret == -EAGAIN) {
723
724
725
726
727
728
729 err = ubifs_return_leb(c, lp.lnum);
730 if (err)
731 ret = err;
732 break;
733 }
734 goto out;
735 }
736
737 if (ret == LEB_FREED) {
738
739 dbg_gc("LEB %d freed, return", lp.lnum);
740 ret = lp.lnum;
741 break;
742 }
743
744 if (ret == LEB_FREED_IDX) {
745
746
747
748
749
750
751 dbg_gc("indexing LEB %d freed, continue", lp.lnum);
752 continue;
753 }
754
755 ubifs_assert(ret == LEB_RETAINED);
756 space_after = c->leb_size - wbuf->offs - wbuf->used;
757 dbg_gc("LEB %d retained, freed %d bytes", lp.lnum,
758 space_after - space_before);
759
760 if (space_after > space_before) {
761
762 min_space >>= 1;
763 if (min_space < c->dead_wm)
764 min_space = c->dead_wm;
765 continue;
766 }
767
768 dbg_gc("did not make progress");
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786 if (i < SOFT_LEBS_LIMIT) {
787 dbg_gc("try again");
788 continue;
789 }
790
791 min_space <<= 1;
792 if (min_space > c->dark_wm)
793 min_space = c->dark_wm;
794 dbg_gc("set min. space to %d", min_space);
795 }
796
797 if (ret == -ENOSPC && !list_empty(&c->idx_gc)) {
798 dbg_gc("no space, some index LEBs GC'ed, -EAGAIN");
799 ubifs_commit_required(c);
800 ret = -EAGAIN;
801 }
802
803 err = ubifs_wbuf_sync_nolock(wbuf);
804 if (!err)
805 err = ubifs_leb_unmap(c, c->gc_lnum);
806 if (err) {
807 ret = err;
808 goto out;
809 }
810out_unlock:
811 mutex_unlock(&wbuf->io_mutex);
812 return ret;
813
814out:
815 ubifs_assert(ret < 0);
816 ubifs_assert(ret != -ENOSPC && ret != -EAGAIN);
817 ubifs_wbuf_sync_nolock(wbuf);
818 ubifs_ro_mode(c, ret);
819 mutex_unlock(&wbuf->io_mutex);
820 ubifs_return_leb(c, lp.lnum);
821 return ret;
822}
823
824
825
826
827
828
829
830
831
832
833
834
835int ubifs_gc_start_commit(struct ubifs_info *c)
836{
837 struct ubifs_gced_idx_leb *idx_gc;
838 const struct ubifs_lprops *lp;
839 int err = 0, flags;
840
841 ubifs_get_lprops(c);
842
843
844
845
846
847 while (1) {
848 lp = ubifs_fast_find_freeable(c);
849 if (IS_ERR(lp)) {
850 err = PTR_ERR(lp);
851 goto out;
852 }
853 if (!lp)
854 break;
855 ubifs_assert(!(lp->flags & LPROPS_TAKEN));
856 ubifs_assert(!(lp->flags & LPROPS_INDEX));
857 err = ubifs_leb_unmap(c, lp->lnum);
858 if (err)
859 goto out;
860 lp = ubifs_change_lp(c, lp, c->leb_size, 0, lp->flags, 0);
861 if (IS_ERR(lp)) {
862 err = PTR_ERR(lp);
863 goto out;
864 }
865 ubifs_assert(!(lp->flags & LPROPS_TAKEN));
866 ubifs_assert(!(lp->flags & LPROPS_INDEX));
867 }
868
869
870 list_for_each_entry(idx_gc, &c->idx_gc, list)
871 idx_gc->unmap = 1;
872
873
874 while (1) {
875 lp = ubifs_fast_find_frdi_idx(c);
876 if (IS_ERR(lp)) {
877 err = PTR_ERR(lp);
878 goto out;
879 }
880 if (!lp)
881 break;
882 idx_gc = kmalloc(sizeof(struct ubifs_gced_idx_leb), GFP_NOFS);
883 if (!idx_gc) {
884 err = -ENOMEM;
885 goto out;
886 }
887 ubifs_assert(!(lp->flags & LPROPS_TAKEN));
888 ubifs_assert(lp->flags & LPROPS_INDEX);
889
890 flags = (lp->flags | LPROPS_TAKEN) ^ LPROPS_INDEX;
891 lp = ubifs_change_lp(c, lp, c->leb_size, 0, flags, 1);
892 if (IS_ERR(lp)) {
893 err = PTR_ERR(lp);
894 kfree(idx_gc);
895 goto out;
896 }
897 ubifs_assert(lp->flags & LPROPS_TAKEN);
898 ubifs_assert(!(lp->flags & LPROPS_INDEX));
899 idx_gc->lnum = lp->lnum;
900 idx_gc->unmap = 1;
901 list_add(&idx_gc->list, &c->idx_gc);
902 }
903out:
904 ubifs_release_lprops(c);
905 return err;
906}
907
908
909
910
911
912
913
914int ubifs_gc_end_commit(struct ubifs_info *c)
915{
916 struct ubifs_gced_idx_leb *idx_gc, *tmp;
917 struct ubifs_wbuf *wbuf;
918 int err = 0;
919
920 wbuf = &c->jheads[GCHD].wbuf;
921 mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
922 list_for_each_entry_safe(idx_gc, tmp, &c->idx_gc, list)
923 if (idx_gc->unmap) {
924 dbg_gc("LEB %d", idx_gc->lnum);
925 err = ubifs_leb_unmap(c, idx_gc->lnum);
926 if (err)
927 goto out;
928 err = ubifs_change_one_lp(c, idx_gc->lnum, LPROPS_NC,
929 LPROPS_NC, 0, LPROPS_TAKEN, -1);
930 if (err)
931 goto out;
932 list_del(&idx_gc->list);
933 kfree(idx_gc);
934 }
935out:
936 mutex_unlock(&wbuf->io_mutex);
937 return err;
938}
939
940
941
942
943
944
945
946
947
948void ubifs_destroy_idx_gc(struct ubifs_info *c)
949{
950 while (!list_empty(&c->idx_gc)) {
951 struct ubifs_gced_idx_leb *idx_gc;
952
953 idx_gc = list_entry(c->idx_gc.next, struct ubifs_gced_idx_leb,
954 list);
955 c->idx_gc_cnt -= 1;
956 list_del(&idx_gc->list);
957 kfree(idx_gc);
958 }
959}
960
961
962
963
964
965
966
967int ubifs_get_idx_gc_leb(struct ubifs_info *c)
968{
969 struct ubifs_gced_idx_leb *idx_gc;
970 int lnum;
971
972 if (list_empty(&c->idx_gc))
973 return -ENOSPC;
974 idx_gc = list_entry(c->idx_gc.next, struct ubifs_gced_idx_leb, list);
975 lnum = idx_gc->lnum;
976
977 list_del(&idx_gc->list);
978 kfree(idx_gc);
979 return lnum;
980}
981