1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46#include <linux/freezer.h>
47#include <linux/kthread.h>
48#include <linux/slab.h>
49#include "ubifs.h"
50
51
52
53
54
55
56
57
58
59static int do_commit(struct ubifs_info *c)
60{
61 int err, new_ltail_lnum, old_ltail_lnum, i;
62 struct ubifs_zbranch zroot;
63 struct ubifs_lp_stats lst;
64
65 dbg_cmt("start");
66 ubifs_assert(!c->ro_media && !c->ro_mount);
67
68 if (c->ro_error) {
69 err = -EROFS;
70 goto out_up;
71 }
72
73
74 for (i = 0; i < c->jhead_cnt; i++) {
75 err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
76 if (err)
77 goto out_up;
78 }
79
80 c->cmt_no += 1;
81 err = ubifs_gc_start_commit(c);
82 if (err)
83 goto out_up;
84 err = dbg_check_lprops(c);
85 if (err)
86 goto out_up;
87 err = ubifs_log_start_commit(c, &new_ltail_lnum);
88 if (err)
89 goto out_up;
90 err = ubifs_tnc_start_commit(c, &zroot);
91 if (err)
92 goto out_up;
93 err = ubifs_lpt_start_commit(c);
94 if (err)
95 goto out_up;
96 err = ubifs_orphan_start_commit(c);
97 if (err)
98 goto out_up;
99
100 ubifs_get_lp_stats(c, &lst);
101
102 up_write(&c->commit_sem);
103
104 err = ubifs_tnc_end_commit(c);
105 if (err)
106 goto out;
107 err = ubifs_lpt_end_commit(c);
108 if (err)
109 goto out;
110 err = ubifs_orphan_end_commit(c);
111 if (err)
112 goto out;
113 old_ltail_lnum = c->ltail_lnum;
114 err = ubifs_log_end_commit(c, new_ltail_lnum);
115 if (err)
116 goto out;
117 err = dbg_check_old_index(c, &zroot);
118 if (err)
119 goto out;
120
121 mutex_lock(&c->mst_mutex);
122 c->mst_node->cmt_no = cpu_to_le64(c->cmt_no);
123 c->mst_node->log_lnum = cpu_to_le32(new_ltail_lnum);
124 c->mst_node->root_lnum = cpu_to_le32(zroot.lnum);
125 c->mst_node->root_offs = cpu_to_le32(zroot.offs);
126 c->mst_node->root_len = cpu_to_le32(zroot.len);
127 c->mst_node->ihead_lnum = cpu_to_le32(c->ihead_lnum);
128 c->mst_node->ihead_offs = cpu_to_le32(c->ihead_offs);
129 c->mst_node->index_size = cpu_to_le64(c->old_idx_sz);
130 c->mst_node->lpt_lnum = cpu_to_le32(c->lpt_lnum);
131 c->mst_node->lpt_offs = cpu_to_le32(c->lpt_offs);
132 c->mst_node->nhead_lnum = cpu_to_le32(c->nhead_lnum);
133 c->mst_node->nhead_offs = cpu_to_le32(c->nhead_offs);
134 c->mst_node->ltab_lnum = cpu_to_le32(c->ltab_lnum);
135 c->mst_node->ltab_offs = cpu_to_le32(c->ltab_offs);
136 c->mst_node->lsave_lnum = cpu_to_le32(c->lsave_lnum);
137 c->mst_node->lsave_offs = cpu_to_le32(c->lsave_offs);
138 c->mst_node->lscan_lnum = cpu_to_le32(c->lscan_lnum);
139 c->mst_node->empty_lebs = cpu_to_le32(lst.empty_lebs);
140 c->mst_node->idx_lebs = cpu_to_le32(lst.idx_lebs);
141 c->mst_node->total_free = cpu_to_le64(lst.total_free);
142 c->mst_node->total_dirty = cpu_to_le64(lst.total_dirty);
143 c->mst_node->total_used = cpu_to_le64(lst.total_used);
144 c->mst_node->total_dead = cpu_to_le64(lst.total_dead);
145 c->mst_node->total_dark = cpu_to_le64(lst.total_dark);
146 if (c->no_orphs)
147 c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
148 else
149 c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_NO_ORPHS);
150 err = ubifs_write_master(c);
151 mutex_unlock(&c->mst_mutex);
152 if (err)
153 goto out;
154
155 err = ubifs_log_post_commit(c, old_ltail_lnum);
156 if (err)
157 goto out;
158 err = ubifs_gc_end_commit(c);
159 if (err)
160 goto out;
161 err = ubifs_lpt_post_commit(c);
162 if (err)
163 goto out;
164
165 spin_lock(&c->cs_lock);
166 c->cmt_state = COMMIT_RESTING;
167 wake_up(&c->cmt_wq);
168 dbg_cmt("commit end");
169 spin_unlock(&c->cs_lock);
170
171 return 0;
172
173out_up:
174 up_write(&c->commit_sem);
175out:
176 ubifs_err("commit failed, error %d", err);
177 spin_lock(&c->cs_lock);
178 c->cmt_state = COMMIT_BROKEN;
179 wake_up(&c->cmt_wq);
180 spin_unlock(&c->cs_lock);
181 ubifs_ro_mode(c, err);
182 return err;
183}
184
185
186
187
188
189
190
191
192static int run_bg_commit(struct ubifs_info *c)
193{
194 spin_lock(&c->cs_lock);
195
196
197
198
199 if (c->cmt_state != COMMIT_BACKGROUND &&
200 c->cmt_state != COMMIT_REQUIRED)
201 goto out;
202 spin_unlock(&c->cs_lock);
203
204 down_write(&c->commit_sem);
205 spin_lock(&c->cs_lock);
206 if (c->cmt_state == COMMIT_REQUIRED)
207 c->cmt_state = COMMIT_RUNNING_REQUIRED;
208 else if (c->cmt_state == COMMIT_BACKGROUND)
209 c->cmt_state = COMMIT_RUNNING_BACKGROUND;
210 else
211 goto out_cmt_unlock;
212 spin_unlock(&c->cs_lock);
213
214 return do_commit(c);
215
216out_cmt_unlock:
217 up_write(&c->commit_sem);
218out:
219 spin_unlock(&c->cs_lock);
220 return 0;
221}
222
223
224
225
226
227
228
229
230
231
232
233
234
235int ubifs_bg_thread(void *info)
236{
237 int err;
238 struct ubifs_info *c = info;
239
240 dbg_msg("background thread \"%s\" started, PID %d",
241 c->bgt_name, current->pid);
242 set_freezable();
243
244 while (1) {
245 if (kthread_should_stop())
246 break;
247
248 if (try_to_freeze())
249 continue;
250
251 set_current_state(TASK_INTERRUPTIBLE);
252
253 if (!c->need_bgt) {
254
255
256
257
258
259 if (kthread_should_stop())
260 break;
261 schedule();
262 continue;
263 } else
264 __set_current_state(TASK_RUNNING);
265
266 c->need_bgt = 0;
267 err = ubifs_bg_wbufs_sync(c);
268 if (err)
269 ubifs_ro_mode(c, err);
270
271 run_bg_commit(c);
272 cond_resched();
273 }
274
275 dbg_msg("background thread \"%s\" stops", c->bgt_name);
276 return 0;
277}
278
279
280
281
282
283
284
285
286void ubifs_commit_required(struct ubifs_info *c)
287{
288 spin_lock(&c->cs_lock);
289 switch (c->cmt_state) {
290 case COMMIT_RESTING:
291 case COMMIT_BACKGROUND:
292 dbg_cmt("old: %s, new: %s", dbg_cstate(c->cmt_state),
293 dbg_cstate(COMMIT_REQUIRED));
294 c->cmt_state = COMMIT_REQUIRED;
295 break;
296 case COMMIT_RUNNING_BACKGROUND:
297 dbg_cmt("old: %s, new: %s", dbg_cstate(c->cmt_state),
298 dbg_cstate(COMMIT_RUNNING_REQUIRED));
299 c->cmt_state = COMMIT_RUNNING_REQUIRED;
300 break;
301 case COMMIT_REQUIRED:
302 case COMMIT_RUNNING_REQUIRED:
303 case COMMIT_BROKEN:
304 break;
305 }
306 spin_unlock(&c->cs_lock);
307}
308
309
310
311
312
313
314
315
316void ubifs_request_bg_commit(struct ubifs_info *c)
317{
318 spin_lock(&c->cs_lock);
319 if (c->cmt_state == COMMIT_RESTING) {
320 dbg_cmt("old: %s, new: %s", dbg_cstate(c->cmt_state),
321 dbg_cstate(COMMIT_BACKGROUND));
322 c->cmt_state = COMMIT_BACKGROUND;
323 spin_unlock(&c->cs_lock);
324 ubifs_wake_up_bgt(c);
325 } else
326 spin_unlock(&c->cs_lock);
327}
328
329
330
331
332
333
334
335static int wait_for_commit(struct ubifs_info *c)
336{
337 dbg_cmt("pid %d goes sleep", current->pid);
338
339
340
341
342
343
344
345
346 wait_event(c->cmt_wq, c->cmt_state != COMMIT_RUNNING_BACKGROUND &&
347 c->cmt_state != COMMIT_RUNNING_REQUIRED);
348 dbg_cmt("commit finished, pid %d woke up", current->pid);
349 return 0;
350}
351
352
353
354
355
356
357
358
359int ubifs_run_commit(struct ubifs_info *c)
360{
361 int err = 0;
362
363 spin_lock(&c->cs_lock);
364 if (c->cmt_state == COMMIT_BROKEN) {
365 err = -EINVAL;
366 goto out;
367 }
368
369 if (c->cmt_state == COMMIT_RUNNING_BACKGROUND)
370
371
372
373
374 c->cmt_state = COMMIT_RUNNING_REQUIRED;
375
376 if (c->cmt_state == COMMIT_RUNNING_REQUIRED) {
377 spin_unlock(&c->cs_lock);
378 return wait_for_commit(c);
379 }
380 spin_unlock(&c->cs_lock);
381
382
383
384 down_write(&c->commit_sem);
385 spin_lock(&c->cs_lock);
386
387
388
389
390 if (c->cmt_state == COMMIT_BROKEN) {
391 err = -EINVAL;
392 goto out_cmt_unlock;
393 }
394
395 if (c->cmt_state == COMMIT_RUNNING_BACKGROUND)
396 c->cmt_state = COMMIT_RUNNING_REQUIRED;
397
398 if (c->cmt_state == COMMIT_RUNNING_REQUIRED) {
399 up_write(&c->commit_sem);
400 spin_unlock(&c->cs_lock);
401 return wait_for_commit(c);
402 }
403 c->cmt_state = COMMIT_RUNNING_REQUIRED;
404 spin_unlock(&c->cs_lock);
405
406 err = do_commit(c);
407 return err;
408
409out_cmt_unlock:
410 up_write(&c->commit_sem);
411out:
412 spin_unlock(&c->cs_lock);
413 return err;
414}
415
416
417
418
419
420
421
422
423
424
425
426
427int ubifs_gc_should_commit(struct ubifs_info *c)
428{
429 int ret = 0;
430
431 spin_lock(&c->cs_lock);
432 if (c->cmt_state == COMMIT_BACKGROUND) {
433 dbg_cmt("commit required now");
434 c->cmt_state = COMMIT_REQUIRED;
435 } else
436 dbg_cmt("commit not requested");
437 if (c->cmt_state == COMMIT_REQUIRED)
438 ret = 1;
439 spin_unlock(&c->cs_lock);
440 return ret;
441}
442
443#ifdef CONFIG_UBIFS_FS_DEBUG
444
445
446
447
448
449
450
451
452
453
454
455struct idx_node {
456 struct list_head list;
457 int iip;
458 union ubifs_key upper_key;
459 struct ubifs_idx_node idx __attribute__((aligned(8)));
460};
461
462
463
464
465
466
467
468
469
470
471
472int dbg_old_index_check_init(struct ubifs_info *c, struct ubifs_zbranch *zroot)
473{
474 struct ubifs_idx_node *idx;
475 int lnum, offs, len, err = 0;
476 struct ubifs_debug_info *d = c->dbg;
477
478 d->old_zroot = *zroot;
479 lnum = d->old_zroot.lnum;
480 offs = d->old_zroot.offs;
481 len = d->old_zroot.len;
482
483 idx = kmalloc(c->max_idx_node_sz, GFP_NOFS);
484 if (!idx)
485 return -ENOMEM;
486
487 err = ubifs_read_node(c, idx, UBIFS_IDX_NODE, len, lnum, offs);
488 if (err)
489 goto out;
490
491 d->old_zroot_level = le16_to_cpu(idx->level);
492 d->old_zroot_sqnum = le64_to_cpu(idx->ch.sqnum);
493out:
494 kfree(idx);
495 return err;
496}
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot)
512{
513 int lnum, offs, len, err = 0, uninitialized_var(last_level), child_cnt;
514 int first = 1, iip;
515 struct ubifs_debug_info *d = c->dbg;
516 union ubifs_key uninitialized_var(lower_key), upper_key, l_key, u_key;
517 unsigned long long uninitialized_var(last_sqnum);
518 struct ubifs_idx_node *idx;
519 struct list_head list;
520 struct idx_node *i;
521 size_t sz;
522
523 if (!(ubifs_chk_flags & UBIFS_CHK_OLD_IDX))
524 return 0;
525
526 INIT_LIST_HEAD(&list);
527
528 sz = sizeof(struct idx_node) + ubifs_idx_node_sz(c, c->fanout) -
529 UBIFS_IDX_NODE_SZ;
530
531
532 lnum = d->old_zroot.lnum;
533 offs = d->old_zroot.offs;
534 len = d->old_zroot.len;
535 iip = 0;
536
537
538
539
540
541 while (1) {
542 struct ubifs_branch *br;
543
544
545 i = kmalloc(sz, GFP_NOFS);
546 if (!i) {
547 err = -ENOMEM;
548 goto out_free;
549 }
550 i->iip = iip;
551
552 list_add_tail(&i->list, &list);
553
554 idx = &i->idx;
555 err = ubifs_read_node(c, idx, UBIFS_IDX_NODE, len, lnum, offs);
556 if (err)
557 goto out_free;
558
559 child_cnt = le16_to_cpu(idx->child_cnt);
560 if (child_cnt < 1 || child_cnt > c->fanout) {
561 err = 1;
562 goto out_dump;
563 }
564 if (first) {
565 first = 0;
566
567 if (le16_to_cpu(idx->level) != d->old_zroot_level) {
568 err = 2;
569 goto out_dump;
570 }
571 if (le64_to_cpu(idx->ch.sqnum) != d->old_zroot_sqnum) {
572 err = 3;
573 goto out_dump;
574 }
575
576 last_level = le16_to_cpu(idx->level) + 1;
577 last_sqnum = le64_to_cpu(idx->ch.sqnum) + 1;
578 key_read(c, ubifs_idx_key(c, idx), &lower_key);
579 highest_ino_key(c, &upper_key, INUM_WATERMARK);
580 }
581 key_copy(c, &upper_key, &i->upper_key);
582 if (le16_to_cpu(idx->level) != last_level - 1) {
583 err = 3;
584 goto out_dump;
585 }
586
587
588
589
590 if (le64_to_cpu(idx->ch.sqnum) >= last_sqnum) {
591 err = 4;
592 goto out_dump;
593 }
594
595 key_read(c, ubifs_idx_key(c, idx), &l_key);
596 br = ubifs_idx_branch(c, idx, child_cnt - 1);
597 key_read(c, &br->key, &u_key);
598 if (keys_cmp(c, &lower_key, &l_key) > 0) {
599 err = 5;
600 goto out_dump;
601 }
602 if (keys_cmp(c, &upper_key, &u_key) < 0) {
603 err = 6;
604 goto out_dump;
605 }
606 if (keys_cmp(c, &upper_key, &u_key) == 0)
607 if (!is_hash_key(c, &u_key)) {
608 err = 7;
609 goto out_dump;
610 }
611
612 if (le16_to_cpu(idx->level) == 0) {
613
614 while (1) {
615
616 list_del(&i->list);
617 kfree(i);
618
619 if (list_empty(&list))
620 goto out;
621
622 i = list_entry(list.prev, struct idx_node,
623 list);
624 idx = &i->idx;
625
626 if (iip + 1 < le16_to_cpu(idx->child_cnt)) {
627 iip = iip + 1;
628 break;
629 } else
630
631 iip = i->iip;
632 }
633 } else
634
635 iip = 0;
636
637
638
639
640 last_level = le16_to_cpu(idx->level);
641 last_sqnum = le64_to_cpu(idx->ch.sqnum);
642 br = ubifs_idx_branch(c, idx, iip);
643 lnum = le32_to_cpu(br->lnum);
644 offs = le32_to_cpu(br->offs);
645 len = le32_to_cpu(br->len);
646 key_read(c, &br->key, &lower_key);
647 if (iip + 1 < le16_to_cpu(idx->child_cnt)) {
648 br = ubifs_idx_branch(c, idx, iip + 1);
649 key_read(c, &br->key, &upper_key);
650 } else
651 key_copy(c, &i->upper_key, &upper_key);
652 }
653out:
654 err = dbg_old_index_check_init(c, zroot);
655 if (err)
656 goto out_free;
657
658 return 0;
659
660out_dump:
661 dbg_err("dumping index node (iip=%d)", i->iip);
662 dbg_dump_node(c, idx);
663 list_del(&i->list);
664 kfree(i);
665 if (!list_empty(&list)) {
666 i = list_entry(list.prev, struct idx_node, list);
667 dbg_err("dumping parent index node");
668 dbg_dump_node(c, &i->idx);
669 }
670out_free:
671 while (!list_empty(&list)) {
672 i = list_entry(list.next, struct idx_node, list);
673 list_del(&i->list);
674 kfree(i);
675 }
676 ubifs_err("failed, error %d", err);
677 if (err > 0)
678 err = -EINVAL;
679 return err;
680}
681
682#endif
683