1
2
3
4
5
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_trans_resv.h"
11#include "xfs_mount.h"
12#include "xfs_btree.h"
13#include "xfs_log_format.h"
14#include "xfs_trans.h"
15#include "xfs_sb.h"
16#include "xfs_alloc.h"
17#include "xfs_alloc_btree.h"
18#include "xfs_ialloc.h"
19#include "xfs_ialloc_btree.h"
20#include "xfs_rmap.h"
21#include "xfs_rmap_btree.h"
22#include "xfs_refcount_btree.h"
23#include "xfs_ag.h"
24#include "scrub/scrub.h"
25#include "scrub/common.h"
26#include "scrub/trace.h"
27#include "scrub/repair.h"
28#include "scrub/bitmap.h"
29
30
31
32
33int
34xrep_superblock(
35 struct xfs_scrub *sc)
36{
37 struct xfs_mount *mp = sc->mp;
38 struct xfs_buf *bp;
39 xfs_agnumber_t agno;
40 int error;
41
42
43 agno = sc->sm->sm_agno;
44 if (agno == 0)
45 return -EOPNOTSUPP;
46
47 error = xfs_sb_get_secondary(mp, sc->tp, agno, &bp);
48 if (error)
49 return error;
50
51
52 xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
53 xfs_sb_to_disk(bp->b_addr, &mp->m_sb);
54
55
56 xfs_trans_buf_set_type(sc->tp, bp, XFS_BLFT_SB_BUF);
57 xfs_trans_log_buf(sc->tp, bp, 0, BBTOB(bp->b_length) - 1);
58 return error;
59}
60
61
62
63struct xrep_agf_allocbt {
64 struct xfs_scrub *sc;
65 xfs_agblock_t freeblks;
66 xfs_agblock_t longest;
67};
68
69
70STATIC int
71xrep_agf_walk_allocbt(
72 struct xfs_btree_cur *cur,
73 struct xfs_alloc_rec_incore *rec,
74 void *priv)
75{
76 struct xrep_agf_allocbt *raa = priv;
77 int error = 0;
78
79 if (xchk_should_terminate(raa->sc, &error))
80 return error;
81
82 raa->freeblks += rec->ar_blockcount;
83 if (rec->ar_blockcount > raa->longest)
84 raa->longest = rec->ar_blockcount;
85 return error;
86}
87
88
89STATIC int
90xrep_agf_check_agfl_block(
91 struct xfs_mount *mp,
92 xfs_agblock_t agbno,
93 void *priv)
94{
95 struct xfs_scrub *sc = priv;
96
97 if (!xfs_verify_agbno(mp, sc->sa.agno, agbno))
98 return -EFSCORRUPTED;
99 return 0;
100}
101
102
103
104
105
106enum {
107 XREP_AGF_BNOBT = 0,
108 XREP_AGF_CNTBT,
109 XREP_AGF_RMAPBT,
110 XREP_AGF_REFCOUNTBT,
111 XREP_AGF_END,
112 XREP_AGF_MAX
113};
114
115
116static inline bool
117xrep_check_btree_root(
118 struct xfs_scrub *sc,
119 struct xrep_find_ag_btree *fab)
120{
121 struct xfs_mount *mp = sc->mp;
122 xfs_agnumber_t agno = sc->sm->sm_agno;
123
124 return xfs_verify_agbno(mp, agno, fab->root) &&
125 fab->height <= XFS_BTREE_MAXLEVELS;
126}
127
128
129
130
131
132
133
134
135
136
137STATIC int
138xrep_agf_find_btrees(
139 struct xfs_scrub *sc,
140 struct xfs_buf *agf_bp,
141 struct xrep_find_ag_btree *fab,
142 struct xfs_buf *agfl_bp)
143{
144 struct xfs_agf *old_agf = agf_bp->b_addr;
145 int error;
146
147
148 error = xrep_find_ag_btree_roots(sc, agf_bp, fab, agfl_bp);
149 if (error)
150 return error;
151
152
153 if (!xrep_check_btree_root(sc, &fab[XREP_AGF_BNOBT]) ||
154 !xrep_check_btree_root(sc, &fab[XREP_AGF_CNTBT]) ||
155 !xrep_check_btree_root(sc, &fab[XREP_AGF_RMAPBT]))
156 return -EFSCORRUPTED;
157
158
159
160
161
162 if (fab[XREP_AGF_RMAPBT].root !=
163 be32_to_cpu(old_agf->agf_roots[XFS_BTNUM_RMAPi]))
164 return -EFSCORRUPTED;
165
166
167 if (xfs_sb_version_hasreflink(&sc->mp->m_sb) &&
168 !xrep_check_btree_root(sc, &fab[XREP_AGF_REFCOUNTBT]))
169 return -EFSCORRUPTED;
170
171 return 0;
172}
173
174
175
176
177
178STATIC void
179xrep_agf_init_header(
180 struct xfs_scrub *sc,
181 struct xfs_buf *agf_bp,
182 struct xfs_agf *old_agf)
183{
184 struct xfs_mount *mp = sc->mp;
185 struct xfs_agf *agf = agf_bp->b_addr;
186
187 memcpy(old_agf, agf, sizeof(*old_agf));
188 memset(agf, 0, BBTOB(agf_bp->b_length));
189 agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
190 agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
191 agf->agf_seqno = cpu_to_be32(sc->sa.agno);
192 agf->agf_length = cpu_to_be32(xfs_ag_block_count(mp, sc->sa.agno));
193 agf->agf_flfirst = old_agf->agf_flfirst;
194 agf->agf_fllast = old_agf->agf_fllast;
195 agf->agf_flcount = old_agf->agf_flcount;
196 if (xfs_sb_version_hascrc(&mp->m_sb))
197 uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid);
198
199
200 ASSERT(sc->sa.pag->pagf_init);
201 sc->sa.pag->pagf_init = 0;
202}
203
204
205STATIC void
206xrep_agf_set_roots(
207 struct xfs_scrub *sc,
208 struct xfs_agf *agf,
209 struct xrep_find_ag_btree *fab)
210{
211 agf->agf_roots[XFS_BTNUM_BNOi] =
212 cpu_to_be32(fab[XREP_AGF_BNOBT].root);
213 agf->agf_levels[XFS_BTNUM_BNOi] =
214 cpu_to_be32(fab[XREP_AGF_BNOBT].height);
215
216 agf->agf_roots[XFS_BTNUM_CNTi] =
217 cpu_to_be32(fab[XREP_AGF_CNTBT].root);
218 agf->agf_levels[XFS_BTNUM_CNTi] =
219 cpu_to_be32(fab[XREP_AGF_CNTBT].height);
220
221 agf->agf_roots[XFS_BTNUM_RMAPi] =
222 cpu_to_be32(fab[XREP_AGF_RMAPBT].root);
223 agf->agf_levels[XFS_BTNUM_RMAPi] =
224 cpu_to_be32(fab[XREP_AGF_RMAPBT].height);
225
226 if (xfs_sb_version_hasreflink(&sc->mp->m_sb)) {
227 agf->agf_refcount_root =
228 cpu_to_be32(fab[XREP_AGF_REFCOUNTBT].root);
229 agf->agf_refcount_level =
230 cpu_to_be32(fab[XREP_AGF_REFCOUNTBT].height);
231 }
232}
233
234
235STATIC int
236xrep_agf_calc_from_btrees(
237 struct xfs_scrub *sc,
238 struct xfs_buf *agf_bp)
239{
240 struct xrep_agf_allocbt raa = { .sc = sc };
241 struct xfs_btree_cur *cur = NULL;
242 struct xfs_agf *agf = agf_bp->b_addr;
243 struct xfs_mount *mp = sc->mp;
244 xfs_agblock_t btreeblks;
245 xfs_agblock_t blocks;
246 int error;
247
248
249 cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp,
250 sc->sa.pag, XFS_BTNUM_BNO);
251 error = xfs_alloc_query_all(cur, xrep_agf_walk_allocbt, &raa);
252 if (error)
253 goto err;
254 error = xfs_btree_count_blocks(cur, &blocks);
255 if (error)
256 goto err;
257 xfs_btree_del_cursor(cur, error);
258 btreeblks = blocks - 1;
259 agf->agf_freeblks = cpu_to_be32(raa.freeblks);
260 agf->agf_longest = cpu_to_be32(raa.longest);
261
262
263 cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp,
264 sc->sa.pag, XFS_BTNUM_CNT);
265 error = xfs_btree_count_blocks(cur, &blocks);
266 if (error)
267 goto err;
268 xfs_btree_del_cursor(cur, error);
269 btreeblks += blocks - 1;
270
271
272 cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag);
273 error = xfs_btree_count_blocks(cur, &blocks);
274 if (error)
275 goto err;
276 xfs_btree_del_cursor(cur, error);
277 agf->agf_rmap_blocks = cpu_to_be32(blocks);
278 btreeblks += blocks - 1;
279
280 agf->agf_btreeblks = cpu_to_be32(btreeblks);
281
282
283 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
284 cur = xfs_refcountbt_init_cursor(mp, sc->tp, agf_bp,
285 sc->sa.pag);
286 error = xfs_btree_count_blocks(cur, &blocks);
287 if (error)
288 goto err;
289 xfs_btree_del_cursor(cur, error);
290 agf->agf_refcount_blocks = cpu_to_be32(blocks);
291 }
292
293 return 0;
294err:
295 xfs_btree_del_cursor(cur, error);
296 return error;
297}
298
299
300STATIC int
301xrep_agf_commit_new(
302 struct xfs_scrub *sc,
303 struct xfs_buf *agf_bp)
304{
305 struct xfs_perag *pag;
306 struct xfs_agf *agf = agf_bp->b_addr;
307
308
309 xfs_force_summary_recalc(sc->mp);
310
311
312 xfs_trans_buf_set_type(sc->tp, agf_bp, XFS_BLFT_AGF_BUF);
313 xfs_trans_log_buf(sc->tp, agf_bp, 0, BBTOB(agf_bp->b_length) - 1);
314
315
316 pag = sc->sa.pag;
317 pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
318 pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
319 pag->pagf_longest = be32_to_cpu(agf->agf_longest);
320 pag->pagf_levels[XFS_BTNUM_BNOi] =
321 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
322 pag->pagf_levels[XFS_BTNUM_CNTi] =
323 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
324 pag->pagf_levels[XFS_BTNUM_RMAPi] =
325 be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]);
326 pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
327 pag->pagf_init = 1;
328
329 return 0;
330}
331
332
333int
334xrep_agf(
335 struct xfs_scrub *sc)
336{
337 struct xrep_find_ag_btree fab[XREP_AGF_MAX] = {
338 [XREP_AGF_BNOBT] = {
339 .rmap_owner = XFS_RMAP_OWN_AG,
340 .buf_ops = &xfs_bnobt_buf_ops,
341 },
342 [XREP_AGF_CNTBT] = {
343 .rmap_owner = XFS_RMAP_OWN_AG,
344 .buf_ops = &xfs_cntbt_buf_ops,
345 },
346 [XREP_AGF_RMAPBT] = {
347 .rmap_owner = XFS_RMAP_OWN_AG,
348 .buf_ops = &xfs_rmapbt_buf_ops,
349 },
350 [XREP_AGF_REFCOUNTBT] = {
351 .rmap_owner = XFS_RMAP_OWN_REFC,
352 .buf_ops = &xfs_refcountbt_buf_ops,
353 },
354 [XREP_AGF_END] = {
355 .buf_ops = NULL,
356 },
357 };
358 struct xfs_agf old_agf;
359 struct xfs_mount *mp = sc->mp;
360 struct xfs_buf *agf_bp;
361 struct xfs_buf *agfl_bp;
362 struct xfs_agf *agf;
363 int error;
364
365
366 if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
367 return -EOPNOTSUPP;
368
369 xchk_perag_get(sc->mp, &sc->sa);
370
371
372
373
374 error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp,
375 XFS_AG_DADDR(mp, sc->sa.agno, XFS_AGF_DADDR(mp)),
376 XFS_FSS_TO_BB(mp, 1), 0, &agf_bp, NULL);
377 if (error)
378 return error;
379 agf_bp->b_ops = &xfs_agf_buf_ops;
380 agf = agf_bp->b_addr;
381
382
383
384
385
386
387
388
389
390
391 error = xfs_alloc_read_agfl(mp, sc->tp, sc->sa.agno, &agfl_bp);
392 if (error)
393 return error;
394
395
396
397
398
399 error = xfs_agfl_walk(sc->mp, agf_bp->b_addr, agfl_bp,
400 xrep_agf_check_agfl_block, sc);
401 if (error)
402 return error;
403
404
405
406
407
408 error = xrep_agf_find_btrees(sc, agf_bp, fab, agfl_bp);
409 if (error)
410 return error;
411
412
413 xrep_agf_init_header(sc, agf_bp, &old_agf);
414 xrep_agf_set_roots(sc, agf, fab);
415 error = xrep_agf_calc_from_btrees(sc, agf_bp);
416 if (error)
417 goto out_revert;
418
419
420 return xrep_agf_commit_new(sc, agf_bp);
421
422out_revert:
423
424 sc->sa.pag->pagf_init = 0;
425 memcpy(agf, &old_agf, sizeof(old_agf));
426 return error;
427}
428
429
430
431struct xrep_agfl {
432
433 struct xbitmap agmetablocks;
434
435
436 struct xbitmap *freesp;
437
438 struct xfs_scrub *sc;
439};
440
441
442STATIC int
443xrep_agfl_walk_rmap(
444 struct xfs_btree_cur *cur,
445 struct xfs_rmap_irec *rec,
446 void *priv)
447{
448 struct xrep_agfl *ra = priv;
449 xfs_fsblock_t fsb;
450 int error = 0;
451
452 if (xchk_should_terminate(ra->sc, &error))
453 return error;
454
455
456 if (rec->rm_owner == XFS_RMAP_OWN_AG) {
457 fsb = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_ag.pag->pag_agno,
458 rec->rm_startblock);
459 error = xbitmap_set(ra->freesp, fsb, rec->rm_blockcount);
460 if (error)
461 return error;
462 }
463
464 return xbitmap_set_btcur_path(&ra->agmetablocks, cur);
465}
466
467
468
469
470
471
472
473
474
475
476STATIC int
477xrep_agfl_collect_blocks(
478 struct xfs_scrub *sc,
479 struct xfs_buf *agf_bp,
480 struct xbitmap *agfl_extents,
481 xfs_agblock_t *flcount)
482{
483 struct xrep_agfl ra;
484 struct xfs_mount *mp = sc->mp;
485 struct xfs_btree_cur *cur;
486 int error;
487
488 ra.sc = sc;
489 ra.freesp = agfl_extents;
490 xbitmap_init(&ra.agmetablocks);
491
492
493 cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag);
494 error = xfs_rmap_query_all(cur, xrep_agfl_walk_rmap, &ra);
495 if (error)
496 goto err;
497 xfs_btree_del_cursor(cur, error);
498
499
500 cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp,
501 sc->sa.pag, XFS_BTNUM_BNO);
502 error = xbitmap_set_btblocks(&ra.agmetablocks, cur);
503 if (error)
504 goto err;
505 xfs_btree_del_cursor(cur, error);
506
507
508 cur = xfs_allocbt_init_cursor(mp, sc->tp, agf_bp,
509 sc->sa.pag, XFS_BTNUM_CNT);
510 error = xbitmap_set_btblocks(&ra.agmetablocks, cur);
511 if (error)
512 goto err;
513
514 xfs_btree_del_cursor(cur, error);
515
516
517
518
519
520 error = xbitmap_disunion(agfl_extents, &ra.agmetablocks);
521 xbitmap_destroy(&ra.agmetablocks);
522 if (error)
523 return error;
524
525
526
527
528
529 *flcount = min_t(uint64_t, xbitmap_hweight(agfl_extents),
530 xfs_agfl_size(mp));
531 return 0;
532
533err:
534 xbitmap_destroy(&ra.agmetablocks);
535 xfs_btree_del_cursor(cur, error);
536 return error;
537}
538
539
540STATIC void
541xrep_agfl_update_agf(
542 struct xfs_scrub *sc,
543 struct xfs_buf *agf_bp,
544 xfs_agblock_t flcount)
545{
546 struct xfs_agf *agf = agf_bp->b_addr;
547
548 ASSERT(flcount <= xfs_agfl_size(sc->mp));
549
550
551 xfs_force_summary_recalc(sc->mp);
552
553
554 if (sc->sa.pag->pagf_init)
555 sc->sa.pag->pagf_flcount = flcount;
556 agf->agf_flfirst = cpu_to_be32(0);
557 agf->agf_flcount = cpu_to_be32(flcount);
558 agf->agf_fllast = cpu_to_be32(flcount - 1);
559
560 xfs_alloc_log_agf(sc->tp, agf_bp,
561 XFS_AGF_FLFIRST | XFS_AGF_FLLAST | XFS_AGF_FLCOUNT);
562}
563
564
565STATIC void
566xrep_agfl_init_header(
567 struct xfs_scrub *sc,
568 struct xfs_buf *agfl_bp,
569 struct xbitmap *agfl_extents,
570 xfs_agblock_t flcount)
571{
572 struct xfs_mount *mp = sc->mp;
573 __be32 *agfl_bno;
574 struct xbitmap_range *br;
575 struct xbitmap_range *n;
576 struct xfs_agfl *agfl;
577 xfs_agblock_t agbno;
578 unsigned int fl_off;
579
580 ASSERT(flcount <= xfs_agfl_size(mp));
581
582
583
584
585
586 agfl = XFS_BUF_TO_AGFL(agfl_bp);
587 memset(agfl, 0xFF, BBTOB(agfl_bp->b_length));
588 agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC);
589 agfl->agfl_seqno = cpu_to_be32(sc->sa.agno);
590 uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid);
591
592
593
594
595
596
597 fl_off = 0;
598 agfl_bno = xfs_buf_to_agfl_bno(agfl_bp);
599 for_each_xbitmap_extent(br, n, agfl_extents) {
600 agbno = XFS_FSB_TO_AGBNO(mp, br->start);
601
602 trace_xrep_agfl_insert(mp, sc->sa.agno, agbno, br->len);
603
604 while (br->len > 0 && fl_off < flcount) {
605 agfl_bno[fl_off] = cpu_to_be32(agbno);
606 fl_off++;
607 agbno++;
608
609
610
611
612
613 br->start++;
614 br->len--;
615 }
616
617 if (br->len)
618 break;
619 list_del(&br->list);
620 kmem_free(br);
621 }
622
623
624 xfs_trans_buf_set_type(sc->tp, agfl_bp, XFS_BLFT_AGFL_BUF);
625 xfs_trans_log_buf(sc->tp, agfl_bp, 0, BBTOB(agfl_bp->b_length) - 1);
626}
627
628
629int
630xrep_agfl(
631 struct xfs_scrub *sc)
632{
633 struct xbitmap agfl_extents;
634 struct xfs_mount *mp = sc->mp;
635 struct xfs_buf *agf_bp;
636 struct xfs_buf *agfl_bp;
637 xfs_agblock_t flcount;
638 int error;
639
640
641 if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
642 return -EOPNOTSUPP;
643
644 xchk_perag_get(sc->mp, &sc->sa);
645 xbitmap_init(&agfl_extents);
646
647
648
649
650
651
652 error = xfs_alloc_read_agf(mp, sc->tp, sc->sa.agno, 0, &agf_bp);
653 if (error)
654 return error;
655
656
657
658
659
660 error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp,
661 XFS_AG_DADDR(mp, sc->sa.agno, XFS_AGFL_DADDR(mp)),
662 XFS_FSS_TO_BB(mp, 1), 0, &agfl_bp, NULL);
663 if (error)
664 return error;
665 agfl_bp->b_ops = &xfs_agfl_buf_ops;
666
667
668 error = xrep_agfl_collect_blocks(sc, agf_bp, &agfl_extents, &flcount);
669 if (error)
670 goto err;
671
672
673
674
675
676
677 xrep_agfl_update_agf(sc, agf_bp, flcount);
678 xrep_agfl_init_header(sc, agfl_bp, &agfl_extents, flcount);
679
680
681
682
683
684
685 sc->sa.agf_bp = agf_bp;
686 sc->sa.agfl_bp = agfl_bp;
687 error = xrep_roll_ag_trans(sc);
688 if (error)
689 goto err;
690
691
692 error = xrep_reap_extents(sc, &agfl_extents, &XFS_RMAP_OINFO_AG,
693 XFS_AG_RESV_AGFL);
694err:
695 xbitmap_destroy(&agfl_extents);
696 return error;
697}
698
699
700
701
702
703
704
705enum {
706 XREP_AGI_INOBT = 0,
707 XREP_AGI_FINOBT,
708 XREP_AGI_END,
709 XREP_AGI_MAX
710};
711
712
713
714
715
716STATIC int
717xrep_agi_find_btrees(
718 struct xfs_scrub *sc,
719 struct xrep_find_ag_btree *fab)
720{
721 struct xfs_buf *agf_bp;
722 struct xfs_mount *mp = sc->mp;
723 int error;
724
725
726 error = xfs_alloc_read_agf(mp, sc->tp, sc->sa.agno, 0, &agf_bp);
727 if (error)
728 return error;
729
730
731 error = xrep_find_ag_btree_roots(sc, agf_bp, fab, NULL);
732 if (error)
733 return error;
734
735
736 if (!xrep_check_btree_root(sc, &fab[XREP_AGI_INOBT]))
737 return -EFSCORRUPTED;
738
739
740 if (xfs_sb_version_hasfinobt(&mp->m_sb) &&
741 !xrep_check_btree_root(sc, &fab[XREP_AGI_FINOBT]))
742 return -EFSCORRUPTED;
743
744 return 0;
745}
746
747
748
749
750
751STATIC void
752xrep_agi_init_header(
753 struct xfs_scrub *sc,
754 struct xfs_buf *agi_bp,
755 struct xfs_agi *old_agi)
756{
757 struct xfs_agi *agi = agi_bp->b_addr;
758 struct xfs_mount *mp = sc->mp;
759
760 memcpy(old_agi, agi, sizeof(*old_agi));
761 memset(agi, 0, BBTOB(agi_bp->b_length));
762 agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
763 agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION);
764 agi->agi_seqno = cpu_to_be32(sc->sa.agno);
765 agi->agi_length = cpu_to_be32(xfs_ag_block_count(mp, sc->sa.agno));
766 agi->agi_newino = cpu_to_be32(NULLAGINO);
767 agi->agi_dirino = cpu_to_be32(NULLAGINO);
768 if (xfs_sb_version_hascrc(&mp->m_sb))
769 uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid);
770
771
772 memcpy(&agi->agi_unlinked, &old_agi->agi_unlinked,
773 sizeof(agi->agi_unlinked));
774
775
776 ASSERT(sc->sa.pag->pagi_init);
777 sc->sa.pag->pagi_init = 0;
778}
779
780
781STATIC void
782xrep_agi_set_roots(
783 struct xfs_scrub *sc,
784 struct xfs_agi *agi,
785 struct xrep_find_ag_btree *fab)
786{
787 agi->agi_root = cpu_to_be32(fab[XREP_AGI_INOBT].root);
788 agi->agi_level = cpu_to_be32(fab[XREP_AGI_INOBT].height);
789
790 if (xfs_sb_version_hasfinobt(&sc->mp->m_sb)) {
791 agi->agi_free_root = cpu_to_be32(fab[XREP_AGI_FINOBT].root);
792 agi->agi_free_level = cpu_to_be32(fab[XREP_AGI_FINOBT].height);
793 }
794}
795
796
797STATIC int
798xrep_agi_calc_from_btrees(
799 struct xfs_scrub *sc,
800 struct xfs_buf *agi_bp)
801{
802 struct xfs_btree_cur *cur;
803 struct xfs_agi *agi = agi_bp->b_addr;
804 struct xfs_mount *mp = sc->mp;
805 xfs_agino_t count;
806 xfs_agino_t freecount;
807 int error;
808
809 cur = xfs_inobt_init_cursor(mp, sc->tp, agi_bp,
810 sc->sa.pag, XFS_BTNUM_INO);
811 error = xfs_ialloc_count_inodes(cur, &count, &freecount);
812 if (error)
813 goto err;
814 if (xfs_sb_version_hasinobtcounts(&mp->m_sb)) {
815 xfs_agblock_t blocks;
816
817 error = xfs_btree_count_blocks(cur, &blocks);
818 if (error)
819 goto err;
820 agi->agi_iblocks = cpu_to_be32(blocks);
821 }
822 xfs_btree_del_cursor(cur, error);
823
824 agi->agi_count = cpu_to_be32(count);
825 agi->agi_freecount = cpu_to_be32(freecount);
826
827 if (xfs_sb_version_hasfinobt(&mp->m_sb) &&
828 xfs_sb_version_hasinobtcounts(&mp->m_sb)) {
829 xfs_agblock_t blocks;
830
831 cur = xfs_inobt_init_cursor(mp, sc->tp, agi_bp,
832 sc->sa.pag, XFS_BTNUM_FINO);
833 error = xfs_btree_count_blocks(cur, &blocks);
834 if (error)
835 goto err;
836 xfs_btree_del_cursor(cur, error);
837 agi->agi_fblocks = cpu_to_be32(blocks);
838 }
839
840 return 0;
841err:
842 xfs_btree_del_cursor(cur, error);
843 return error;
844}
845
846
847STATIC int
848xrep_agi_commit_new(
849 struct xfs_scrub *sc,
850 struct xfs_buf *agi_bp)
851{
852 struct xfs_perag *pag;
853 struct xfs_agi *agi = agi_bp->b_addr;
854
855
856 xfs_force_summary_recalc(sc->mp);
857
858
859 xfs_trans_buf_set_type(sc->tp, agi_bp, XFS_BLFT_AGI_BUF);
860 xfs_trans_log_buf(sc->tp, agi_bp, 0, BBTOB(agi_bp->b_length) - 1);
861
862
863 pag = sc->sa.pag;
864 pag->pagi_count = be32_to_cpu(agi->agi_count);
865 pag->pagi_freecount = be32_to_cpu(agi->agi_freecount);
866 pag->pagi_init = 1;
867
868 return 0;
869}
870
871
872int
873xrep_agi(
874 struct xfs_scrub *sc)
875{
876 struct xrep_find_ag_btree fab[XREP_AGI_MAX] = {
877 [XREP_AGI_INOBT] = {
878 .rmap_owner = XFS_RMAP_OWN_INOBT,
879 .buf_ops = &xfs_inobt_buf_ops,
880 },
881 [XREP_AGI_FINOBT] = {
882 .rmap_owner = XFS_RMAP_OWN_INOBT,
883 .buf_ops = &xfs_finobt_buf_ops,
884 },
885 [XREP_AGI_END] = {
886 .buf_ops = NULL
887 },
888 };
889 struct xfs_agi old_agi;
890 struct xfs_mount *mp = sc->mp;
891 struct xfs_buf *agi_bp;
892 struct xfs_agi *agi;
893 int error;
894
895
896 if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
897 return -EOPNOTSUPP;
898
899 xchk_perag_get(sc->mp, &sc->sa);
900
901
902
903
904 error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp,
905 XFS_AG_DADDR(mp, sc->sa.agno, XFS_AGI_DADDR(mp)),
906 XFS_FSS_TO_BB(mp, 1), 0, &agi_bp, NULL);
907 if (error)
908 return error;
909 agi_bp->b_ops = &xfs_agi_buf_ops;
910 agi = agi_bp->b_addr;
911
912
913 error = xrep_agi_find_btrees(sc, fab);
914 if (error)
915 return error;
916
917
918 xrep_agi_init_header(sc, agi_bp, &old_agi);
919 xrep_agi_set_roots(sc, agi, fab);
920 error = xrep_agi_calc_from_btrees(sc, agi_bp);
921 if (error)
922 goto out_revert;
923
924
925 return xrep_agi_commit_new(sc, agi_bp);
926
927out_revert:
928
929 sc->sa.pag->pagi_init = 0;
930 memcpy(agi, &old_agi, sizeof(old_agi));
931 return error;
932}
933