linux/fs/btrfs/scrub.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2011 STRATO.  All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public
   6 * License v2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public
  14 * License along with this program; if not, write to the
  15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16 * Boston, MA 021110-1307, USA.
  17 */
  18
  19#include <linux/blkdev.h>
  20#include <linux/ratelimit.h>
  21#include "ctree.h"
  22#include "volumes.h"
  23#include "disk-io.h"
  24#include "ordered-data.h"
  25#include "transaction.h"
  26#include "backref.h"
  27#include "extent_io.h"
  28#include "check-integrity.h"
  29#include "rcu-string.h"
  30
  31/*
  32 * This is only the first step towards a full-features scrub. It reads all
  33 * extent and super block and verifies the checksums. In case a bad checksum
  34 * is found or the extent cannot be read, good data will be written back if
  35 * any can be found.
  36 *
  37 * Future enhancements:
  38 *  - In case an unrepairable extent is encountered, track which files are
  39 *    affected and report them
  40 *  - track and record media errors, throw out bad devices
  41 *  - add a mode to also read unallocated space
  42 */
  43
  44struct scrub_block;
  45struct scrub_dev;
  46
  47#define SCRUB_PAGES_PER_BIO     16      /* 64k per bio */
  48#define SCRUB_BIOS_PER_DEV      16      /* 1 MB per device in flight */
  49#define SCRUB_MAX_PAGES_PER_BLOCK       16      /* 64k per node/leaf/sector */
  50
  51struct scrub_page {
  52        struct scrub_block      *sblock;
  53        struct page             *page;
  54        struct btrfs_device     *dev;
  55        u64                     flags;  /* extent flags */
  56        u64                     generation;
  57        u64                     logical;
  58        u64                     physical;
  59        struct {
  60                unsigned int    mirror_num:8;
  61                unsigned int    have_csum:1;
  62                unsigned int    io_error:1;
  63        };
  64        u8                      csum[BTRFS_CSUM_SIZE];
  65};
  66
  67struct scrub_bio {
  68        int                     index;
  69        struct scrub_dev        *sdev;
  70        struct bio              *bio;
  71        int                     err;
  72        u64                     logical;
  73        u64                     physical;
  74        struct scrub_page       *pagev[SCRUB_PAGES_PER_BIO];
  75        int                     page_count;
  76        int                     next_free;
  77        struct btrfs_work       work;
  78};
  79
  80struct scrub_block {
  81        struct scrub_page       pagev[SCRUB_MAX_PAGES_PER_BLOCK];
  82        int                     page_count;
  83        atomic_t                outstanding_pages;
  84        atomic_t                ref_count; /* free mem on transition to zero */
  85        struct scrub_dev        *sdev;
  86        struct {
  87                unsigned int    header_error:1;
  88                unsigned int    checksum_error:1;
  89                unsigned int    no_io_error_seen:1;
  90                unsigned int    generation_error:1; /* also sets header_error */
  91        };
  92};
  93
  94struct scrub_dev {
  95        struct scrub_bio        *bios[SCRUB_BIOS_PER_DEV];
  96        struct btrfs_device     *dev;
  97        int                     first_free;
  98        int                     curr;
  99        atomic_t                in_flight;
 100        atomic_t                fixup_cnt;
 101        spinlock_t              list_lock;
 102        wait_queue_head_t       list_wait;
 103        u16                     csum_size;
 104        struct list_head        csum_list;
 105        atomic_t                cancel_req;
 106        int                     readonly;
 107        int                     pages_per_bio; /* <= SCRUB_PAGES_PER_BIO */
 108        u32                     sectorsize;
 109        u32                     nodesize;
 110        u32                     leafsize;
 111        /*
 112         * statistics
 113         */
 114        struct btrfs_scrub_progress stat;
 115        spinlock_t              stat_lock;
 116};
 117
 118struct scrub_fixup_nodatasum {
 119        struct scrub_dev        *sdev;
 120        u64                     logical;
 121        struct btrfs_root       *root;
 122        struct btrfs_work       work;
 123        int                     mirror_num;
 124};
 125
 126struct scrub_warning {
 127        struct btrfs_path       *path;
 128        u64                     extent_item_size;
 129        char                    *scratch_buf;
 130        char                    *msg_buf;
 131        const char              *errstr;
 132        sector_t                sector;
 133        u64                     logical;
 134        struct btrfs_device     *dev;
 135        int                     msg_bufsize;
 136        int                     scratch_bufsize;
 137};
 138
 139
 140static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
 141static int scrub_setup_recheck_block(struct scrub_dev *sdev,
 142                                     struct btrfs_mapping_tree *map_tree,
 143                                     u64 length, u64 logical,
 144                                     struct scrub_block *sblock);
 145static int scrub_recheck_block(struct btrfs_fs_info *fs_info,
 146                               struct scrub_block *sblock, int is_metadata,
 147                               int have_csum, u8 *csum, u64 generation,
 148                               u16 csum_size);
 149static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
 150                                         struct scrub_block *sblock,
 151                                         int is_metadata, int have_csum,
 152                                         const u8 *csum, u64 generation,
 153                                         u16 csum_size);
 154static void scrub_complete_bio_end_io(struct bio *bio, int err);
 155static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
 156                                             struct scrub_block *sblock_good,
 157                                             int force_write);
 158static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
 159                                            struct scrub_block *sblock_good,
 160                                            int page_num, int force_write);
 161static int scrub_checksum_data(struct scrub_block *sblock);
 162static int scrub_checksum_tree_block(struct scrub_block *sblock);
 163static int scrub_checksum_super(struct scrub_block *sblock);
 164static void scrub_block_get(struct scrub_block *sblock);
 165static void scrub_block_put(struct scrub_block *sblock);
 166static int scrub_add_page_to_bio(struct scrub_dev *sdev,
 167                                 struct scrub_page *spage);
 168static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len,
 169                       u64 physical, u64 flags, u64 gen, int mirror_num,
 170                       u8 *csum, int force);
 171static void scrub_bio_end_io(struct bio *bio, int err);
 172static void scrub_bio_end_io_worker(struct btrfs_work *work);
 173static void scrub_block_complete(struct scrub_block *sblock);
 174
 175
 176static void scrub_free_csums(struct scrub_dev *sdev)
 177{
 178        while (!list_empty(&sdev->csum_list)) {
 179                struct btrfs_ordered_sum *sum;
 180                sum = list_first_entry(&sdev->csum_list,
 181                                       struct btrfs_ordered_sum, list);
 182                list_del(&sum->list);
 183                kfree(sum);
 184        }
 185}
 186
 187static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev)
 188{
 189        int i;
 190
 191        if (!sdev)
 192                return;
 193
 194        /* this can happen when scrub is cancelled */
 195        if (sdev->curr != -1) {
 196                struct scrub_bio *sbio = sdev->bios[sdev->curr];
 197
 198                for (i = 0; i < sbio->page_count; i++) {
 199                        BUG_ON(!sbio->pagev[i]);
 200                        BUG_ON(!sbio->pagev[i]->page);
 201                        scrub_block_put(sbio->pagev[i]->sblock);
 202                }
 203                bio_put(sbio->bio);
 204        }
 205
 206        for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
 207                struct scrub_bio *sbio = sdev->bios[i];
 208
 209                if (!sbio)
 210                        break;
 211                kfree(sbio);
 212        }
 213
 214        scrub_free_csums(sdev);
 215        kfree(sdev);
 216}
 217
 218static noinline_for_stack
 219struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
 220{
 221        struct scrub_dev *sdev;
 222        int             i;
 223        struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
 224        int pages_per_bio;
 225
 226        pages_per_bio = min_t(int, SCRUB_PAGES_PER_BIO,
 227                              bio_get_nr_vecs(dev->bdev));
 228        sdev = kzalloc(sizeof(*sdev), GFP_NOFS);
 229        if (!sdev)
 230                goto nomem;
 231        sdev->dev = dev;
 232        sdev->pages_per_bio = pages_per_bio;
 233        sdev->curr = -1;
 234        for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
 235                struct scrub_bio *sbio;
 236
 237                sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
 238                if (!sbio)
 239                        goto nomem;
 240                sdev->bios[i] = sbio;
 241
 242                sbio->index = i;
 243                sbio->sdev = sdev;
 244                sbio->page_count = 0;
 245                sbio->work.func = scrub_bio_end_io_worker;
 246
 247                if (i != SCRUB_BIOS_PER_DEV-1)
 248                        sdev->bios[i]->next_free = i + 1;
 249                else
 250                        sdev->bios[i]->next_free = -1;
 251        }
 252        sdev->first_free = 0;
 253        sdev->nodesize = dev->dev_root->nodesize;
 254        sdev->leafsize = dev->dev_root->leafsize;
 255        sdev->sectorsize = dev->dev_root->sectorsize;
 256        atomic_set(&sdev->in_flight, 0);
 257        atomic_set(&sdev->fixup_cnt, 0);
 258        atomic_set(&sdev->cancel_req, 0);
 259        sdev->csum_size = btrfs_super_csum_size(fs_info->super_copy);
 260        INIT_LIST_HEAD(&sdev->csum_list);
 261
 262        spin_lock_init(&sdev->list_lock);
 263        spin_lock_init(&sdev->stat_lock);
 264        init_waitqueue_head(&sdev->list_wait);
 265        return sdev;
 266
 267nomem:
 268        scrub_free_dev(sdev);
 269        return ERR_PTR(-ENOMEM);
 270}
 271
 272static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx)
 273{
 274        u64 isize;
 275        u32 nlink;
 276        int ret;
 277        int i;
 278        struct extent_buffer *eb;
 279        struct btrfs_inode_item *inode_item;
 280        struct scrub_warning *swarn = ctx;
 281        struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info;
 282        struct inode_fs_paths *ipath = NULL;
 283        struct btrfs_root *local_root;
 284        struct btrfs_key root_key;
 285
 286        root_key.objectid = root;
 287        root_key.type = BTRFS_ROOT_ITEM_KEY;
 288        root_key.offset = (u64)-1;
 289        local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
 290        if (IS_ERR(local_root)) {
 291                ret = PTR_ERR(local_root);
 292                goto err;
 293        }
 294
 295        ret = inode_item_info(inum, 0, local_root, swarn->path);
 296        if (ret) {
 297                btrfs_release_path(swarn->path);
 298                goto err;
 299        }
 300
 301        eb = swarn->path->nodes[0];
 302        inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
 303                                        struct btrfs_inode_item);
 304        isize = btrfs_inode_size(eb, inode_item);
 305        nlink = btrfs_inode_nlink(eb, inode_item);
 306        btrfs_release_path(swarn->path);
 307
 308        ipath = init_ipath(4096, local_root, swarn->path);
 309        if (IS_ERR(ipath)) {
 310                ret = PTR_ERR(ipath);
 311                ipath = NULL;
 312                goto err;
 313        }
 314        ret = paths_from_inode(inum, ipath);
 315
 316        if (ret < 0)
 317                goto err;
 318
 319        /*
 320         * we deliberately ignore the bit ipath might have been too small to
 321         * hold all of the paths here
 322         */
 323        for (i = 0; i < ipath->fspath->elem_cnt; ++i)
 324                printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev "
 325                        "%s, sector %llu, root %llu, inode %llu, offset %llu, "
 326                        "length %llu, links %u (path: %s)\n", swarn->errstr,
 327                        swarn->logical, rcu_str_deref(swarn->dev->name),
 328                        (unsigned long long)swarn->sector, root, inum, offset,
 329                        min(isize - offset, (u64)PAGE_SIZE), nlink,
 330                        (char *)(unsigned long)ipath->fspath->val[i]);
 331
 332        free_ipath(ipath);
 333        return 0;
 334
 335err:
 336        printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev "
 337                "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
 338                "resolving failed with ret=%d\n", swarn->errstr,
 339                swarn->logical, rcu_str_deref(swarn->dev->name),
 340                (unsigned long long)swarn->sector, root, inum, offset, ret);
 341
 342        free_ipath(ipath);
 343        return 0;
 344}
 345
 346static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
 347{
 348        struct btrfs_device *dev = sblock->sdev->dev;
 349        struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
 350        struct btrfs_path *path;
 351        struct btrfs_key found_key;
 352        struct extent_buffer *eb;
 353        struct btrfs_extent_item *ei;
 354        struct scrub_warning swarn;
 355        u32 item_size;
 356        int ret;
 357        u64 ref_root;
 358        u8 ref_level;
 359        unsigned long ptr = 0;
 360        const int bufsize = 4096;
 361        u64 extent_item_pos;
 362
 363        path = btrfs_alloc_path();
 364
 365        swarn.scratch_buf = kmalloc(bufsize, GFP_NOFS);
 366        swarn.msg_buf = kmalloc(bufsize, GFP_NOFS);
 367        BUG_ON(sblock->page_count < 1);
 368        swarn.sector = (sblock->pagev[0].physical) >> 9;
 369        swarn.logical = sblock->pagev[0].logical;
 370        swarn.errstr = errstr;
 371        swarn.dev = dev;
 372        swarn.msg_bufsize = bufsize;
 373        swarn.scratch_bufsize = bufsize;
 374
 375        if (!path || !swarn.scratch_buf || !swarn.msg_buf)
 376                goto out;
 377
 378        ret = extent_from_logical(fs_info, swarn.logical, path, &found_key);
 379        if (ret < 0)
 380                goto out;
 381
 382        extent_item_pos = swarn.logical - found_key.objectid;
 383        swarn.extent_item_size = found_key.offset;
 384
 385        eb = path->nodes[0];
 386        ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
 387        item_size = btrfs_item_size_nr(eb, path->slots[0]);
 388        btrfs_release_path(path);
 389
 390        if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
 391                do {
 392                        ret = tree_backref_for_extent(&ptr, eb, ei, item_size,
 393                                                        &ref_root, &ref_level);
 394                        printk_in_rcu(KERN_WARNING
 395                                "btrfs: %s at logical %llu on dev %s, "
 396                                "sector %llu: metadata %s (level %d) in tree "
 397                                "%llu\n", errstr, swarn.logical,
 398                                rcu_str_deref(dev->name),
 399                                (unsigned long long)swarn.sector,
 400                                ref_level ? "node" : "leaf",
 401                                ret < 0 ? -1 : ref_level,
 402                                ret < 0 ? -1 : ref_root);
 403                } while (ret != 1);
 404        } else {
 405                swarn.path = path;
 406                iterate_extent_inodes(fs_info, found_key.objectid,
 407                                        extent_item_pos, 1,
 408                                        scrub_print_warning_inode, &swarn);
 409        }
 410
 411out:
 412        btrfs_free_path(path);
 413        kfree(swarn.scratch_buf);
 414        kfree(swarn.msg_buf);
 415}
 416
 417static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *ctx)
 418{
 419        struct page *page = NULL;
 420        unsigned long index;
 421        struct scrub_fixup_nodatasum *fixup = ctx;
 422        int ret;
 423        int corrected = 0;
 424        struct btrfs_key key;
 425        struct inode *inode = NULL;
 426        u64 end = offset + PAGE_SIZE - 1;
 427        struct btrfs_root *local_root;
 428
 429        key.objectid = root;
 430        key.type = BTRFS_ROOT_ITEM_KEY;
 431        key.offset = (u64)-1;
 432        local_root = btrfs_read_fs_root_no_name(fixup->root->fs_info, &key);
 433        if (IS_ERR(local_root))
 434                return PTR_ERR(local_root);
 435
 436        key.type = BTRFS_INODE_ITEM_KEY;
 437        key.objectid = inum;
 438        key.offset = 0;
 439        inode = btrfs_iget(fixup->root->fs_info->sb, &key, local_root, NULL);
 440        if (IS_ERR(inode))
 441                return PTR_ERR(inode);
 442
 443        index = offset >> PAGE_CACHE_SHIFT;
 444
 445        page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
 446        if (!page) {
 447                ret = -ENOMEM;
 448                goto out;
 449        }
 450
 451        if (PageUptodate(page)) {
 452                struct btrfs_mapping_tree *map_tree;
 453                if (PageDirty(page)) {
 454                        /*
 455                         * we need to write the data to the defect sector. the
 456                         * data that was in that sector is not in memory,
 457                         * because the page was modified. we must not write the
 458                         * modified page to that sector.
 459                         *
 460                         * TODO: what could be done here: wait for the delalloc
 461                         *       runner to write out that page (might involve
 462                         *       COW) and see whether the sector is still
 463                         *       referenced afterwards.
 464                         *
 465                         * For the meantime, we'll treat this error
 466                         * incorrectable, although there is a chance that a
 467                         * later scrub will find the bad sector again and that
 468                         * there's no dirty page in memory, then.
 469                         */
 470                        ret = -EIO;
 471                        goto out;
 472                }
 473                map_tree = &BTRFS_I(inode)->root->fs_info->mapping_tree;
 474                ret = repair_io_failure(map_tree, offset, PAGE_SIZE,
 475                                        fixup->logical, page,
 476                                        fixup->mirror_num);
 477                unlock_page(page);
 478                corrected = !ret;
 479        } else {
 480                /*
 481                 * we need to get good data first. the general readpage path
 482                 * will call repair_io_failure for us, we just have to make
 483                 * sure we read the bad mirror.
 484                 */
 485                ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
 486                                        EXTENT_DAMAGED, GFP_NOFS);
 487                if (ret) {
 488                        /* set_extent_bits should give proper error */
 489                        WARN_ON(ret > 0);
 490                        if (ret > 0)
 491                                ret = -EFAULT;
 492                        goto out;
 493                }
 494
 495                ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
 496                                                btrfs_get_extent,
 497                                                fixup->mirror_num);
 498                wait_on_page_locked(page);
 499
 500                corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
 501                                                end, EXTENT_DAMAGED, 0, NULL);
 502                if (!corrected)
 503                        clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
 504                                                EXTENT_DAMAGED, GFP_NOFS);
 505        }
 506
 507out:
 508        if (page)
 509                put_page(page);
 510        if (inode)
 511                iput(inode);
 512
 513        if (ret < 0)
 514                return ret;
 515
 516        if (ret == 0 && corrected) {
 517                /*
 518                 * we only need to call readpage for one of the inodes belonging
 519                 * to this extent. so make iterate_extent_inodes stop
 520                 */
 521                return 1;
 522        }
 523
 524        return -EIO;
 525}
 526
 527static void scrub_fixup_nodatasum(struct btrfs_work *work)
 528{
 529        int ret;
 530        struct scrub_fixup_nodatasum *fixup;
 531        struct scrub_dev *sdev;
 532        struct btrfs_trans_handle *trans = NULL;
 533        struct btrfs_fs_info *fs_info;
 534        struct btrfs_path *path;
 535        int uncorrectable = 0;
 536
 537        fixup = container_of(work, struct scrub_fixup_nodatasum, work);
 538        sdev = fixup->sdev;
 539        fs_info = fixup->root->fs_info;
 540
 541        path = btrfs_alloc_path();
 542        if (!path) {
 543                spin_lock(&sdev->stat_lock);
 544                ++sdev->stat.malloc_errors;
 545                spin_unlock(&sdev->stat_lock);
 546                uncorrectable = 1;
 547                goto out;
 548        }
 549
 550        trans = btrfs_join_transaction(fixup->root);
 551        if (IS_ERR(trans)) {
 552                uncorrectable = 1;
 553                goto out;
 554        }
 555
 556        /*
 557         * the idea is to trigger a regular read through the standard path. we
 558         * read a page from the (failed) logical address by specifying the
 559         * corresponding copynum of the failed sector. thus, that readpage is
 560         * expected to fail.
 561         * that is the point where on-the-fly error correction will kick in
 562         * (once it's finished) and rewrite the failed sector if a good copy
 563         * can be found.
 564         */
 565        ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info,
 566                                                path, scrub_fixup_readpage,
 567                                                fixup);
 568        if (ret < 0) {
 569                uncorrectable = 1;
 570                goto out;
 571        }
 572        WARN_ON(ret != 1);
 573
 574        spin_lock(&sdev->stat_lock);
 575        ++sdev->stat.corrected_errors;
 576        spin_unlock(&sdev->stat_lock);
 577
 578out:
 579        if (trans && !IS_ERR(trans))
 580                btrfs_end_transaction(trans, fixup->root);
 581        if (uncorrectable) {
 582                spin_lock(&sdev->stat_lock);
 583                ++sdev->stat.uncorrectable_errors;
 584                spin_unlock(&sdev->stat_lock);
 585
 586                printk_ratelimited_in_rcu(KERN_ERR
 587                        "btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n",
 588                        (unsigned long long)fixup->logical,
 589                        rcu_str_deref(sdev->dev->name));
 590        }
 591
 592        btrfs_free_path(path);
 593        kfree(fixup);
 594
 595        /* see caller why we're pretending to be paused in the scrub counters */
 596        mutex_lock(&fs_info->scrub_lock);
 597        atomic_dec(&fs_info->scrubs_running);
 598        atomic_dec(&fs_info->scrubs_paused);
 599        mutex_unlock(&fs_info->scrub_lock);
 600        atomic_dec(&sdev->fixup_cnt);
 601        wake_up(&fs_info->scrub_pause_wait);
 602        wake_up(&sdev->list_wait);
 603}
 604
 605/*
 606 * scrub_handle_errored_block gets called when either verification of the
 607 * pages failed or the bio failed to read, e.g. with EIO. In the latter
 608 * case, this function handles all pages in the bio, even though only one
 609 * may be bad.
 610 * The goal of this function is to repair the errored block by using the
 611 * contents of one of the mirrors.
 612 */
 613static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
 614{
 615        struct scrub_dev *sdev = sblock_to_check->sdev;
 616        struct btrfs_fs_info *fs_info;
 617        u64 length;
 618        u64 logical;
 619        u64 generation;
 620        unsigned int failed_mirror_index;
 621        unsigned int is_metadata;
 622        unsigned int have_csum;
 623        u8 *csum;
 624        struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
 625        struct scrub_block *sblock_bad;
 626        int ret;
 627        int mirror_index;
 628        int page_num;
 629        int success;
 630        static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
 631                                      DEFAULT_RATELIMIT_BURST);
 632
 633        BUG_ON(sblock_to_check->page_count < 1);
 634        fs_info = sdev->dev->dev_root->fs_info;
 635        length = sblock_to_check->page_count * PAGE_SIZE;
 636        logical = sblock_to_check->pagev[0].logical;
 637        generation = sblock_to_check->pagev[0].generation;
 638        BUG_ON(sblock_to_check->pagev[0].mirror_num < 1);
 639        failed_mirror_index = sblock_to_check->pagev[0].mirror_num - 1;
 640        is_metadata = !(sblock_to_check->pagev[0].flags &
 641                        BTRFS_EXTENT_FLAG_DATA);
 642        have_csum = sblock_to_check->pagev[0].have_csum;
 643        csum = sblock_to_check->pagev[0].csum;
 644
 645        /*
 646         * read all mirrors one after the other. This includes to
 647         * re-read the extent or metadata block that failed (that was
 648         * the cause that this fixup code is called) another time,
 649         * page by page this time in order to know which pages
 650         * caused I/O errors and which ones are good (for all mirrors).
 651         * It is the goal to handle the situation when more than one
 652         * mirror contains I/O errors, but the errors do not
 653         * overlap, i.e. the data can be repaired by selecting the
 654         * pages from those mirrors without I/O error on the
 655         * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
 656         * would be that mirror #1 has an I/O error on the first page,
 657         * the second page is good, and mirror #2 has an I/O error on
 658         * the second page, but the first page is good.
 659         * Then the first page of the first mirror can be repaired by
 660         * taking the first page of the second mirror, and the
 661         * second page of the second mirror can be repaired by
 662         * copying the contents of the 2nd page of the 1st mirror.
 663         * One more note: if the pages of one mirror contain I/O
 664         * errors, the checksum cannot be verified. In order to get
 665         * the best data for repairing, the first attempt is to find
 666         * a mirror without I/O errors and with a validated checksum.
 667         * Only if this is not possible, the pages are picked from
 668         * mirrors with I/O errors without considering the checksum.
 669         * If the latter is the case, at the end, the checksum of the
 670         * repaired area is verified in order to correctly maintain
 671         * the statistics.
 672         */
 673
 674        sblocks_for_recheck = kzalloc(BTRFS_MAX_MIRRORS *
 675                                     sizeof(*sblocks_for_recheck),
 676                                     GFP_NOFS);
 677        if (!sblocks_for_recheck) {
 678                spin_lock(&sdev->stat_lock);
 679                sdev->stat.malloc_errors++;
 680                sdev->stat.read_errors++;
 681                sdev->stat.uncorrectable_errors++;
 682                spin_unlock(&sdev->stat_lock);
 683                btrfs_dev_stat_inc_and_print(sdev->dev,
 684                                             BTRFS_DEV_STAT_READ_ERRS);
 685                goto out;
 686        }
 687
 688        /* setup the context, map the logical blocks and alloc the pages */
 689        ret = scrub_setup_recheck_block(sdev, &fs_info->mapping_tree, length,
 690                                        logical, sblocks_for_recheck);
 691        if (ret) {
 692                spin_lock(&sdev->stat_lock);
 693                sdev->stat.read_errors++;
 694                sdev->stat.uncorrectable_errors++;
 695                spin_unlock(&sdev->stat_lock);
 696                btrfs_dev_stat_inc_and_print(sdev->dev,
 697                                             BTRFS_DEV_STAT_READ_ERRS);
 698                goto out;
 699        }
 700        BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
 701        sblock_bad = sblocks_for_recheck + failed_mirror_index;
 702
 703        /* build and submit the bios for the failed mirror, check checksums */
 704        ret = scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum,
 705                                  csum, generation, sdev->csum_size);
 706        if (ret) {
 707                spin_lock(&sdev->stat_lock);
 708                sdev->stat.read_errors++;
 709                sdev->stat.uncorrectable_errors++;
 710                spin_unlock(&sdev->stat_lock);
 711                btrfs_dev_stat_inc_and_print(sdev->dev,
 712                                             BTRFS_DEV_STAT_READ_ERRS);
 713                goto out;
 714        }
 715
 716        if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
 717            sblock_bad->no_io_error_seen) {
 718                /*
 719                 * the error disappeared after reading page by page, or
 720                 * the area was part of a huge bio and other parts of the
 721                 * bio caused I/O errors, or the block layer merged several
 722                 * read requests into one and the error is caused by a
 723                 * different bio (usually one of the two latter cases is
 724                 * the cause)
 725                 */
 726                spin_lock(&sdev->stat_lock);
 727                sdev->stat.unverified_errors++;
 728                spin_unlock(&sdev->stat_lock);
 729
 730                goto out;
 731        }
 732
 733        if (!sblock_bad->no_io_error_seen) {
 734                spin_lock(&sdev->stat_lock);
 735                sdev->stat.read_errors++;
 736                spin_unlock(&sdev->stat_lock);
 737                if (__ratelimit(&_rs))
 738                        scrub_print_warning("i/o error", sblock_to_check);
 739                btrfs_dev_stat_inc_and_print(sdev->dev,
 740                                             BTRFS_DEV_STAT_READ_ERRS);
 741        } else if (sblock_bad->checksum_error) {
 742                spin_lock(&sdev->stat_lock);
 743                sdev->stat.csum_errors++;
 744                spin_unlock(&sdev->stat_lock);
 745                if (__ratelimit(&_rs))
 746                        scrub_print_warning("checksum error", sblock_to_check);
 747                btrfs_dev_stat_inc_and_print(sdev->dev,
 748                                             BTRFS_DEV_STAT_CORRUPTION_ERRS);
 749        } else if (sblock_bad->header_error) {
 750                spin_lock(&sdev->stat_lock);
 751                sdev->stat.verify_errors++;
 752                spin_unlock(&sdev->stat_lock);
 753                if (__ratelimit(&_rs))
 754                        scrub_print_warning("checksum/header error",
 755                                            sblock_to_check);
 756                if (sblock_bad->generation_error)
 757                        btrfs_dev_stat_inc_and_print(sdev->dev,
 758                                BTRFS_DEV_STAT_GENERATION_ERRS);
 759                else
 760                        btrfs_dev_stat_inc_and_print(sdev->dev,
 761                                BTRFS_DEV_STAT_CORRUPTION_ERRS);
 762        }
 763
 764        if (sdev->readonly)
 765                goto did_not_correct_error;
 766
 767        if (!is_metadata && !have_csum) {
 768                struct scrub_fixup_nodatasum *fixup_nodatasum;
 769
 770                /*
 771                 * !is_metadata and !have_csum, this means that the data
 772                 * might not be COW'ed, that it might be modified
 773                 * concurrently. The general strategy to work on the
 774                 * commit root does not help in the case when COW is not
 775                 * used.
 776                 */
 777                fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
 778                if (!fixup_nodatasum)
 779                        goto did_not_correct_error;
 780                fixup_nodatasum->sdev = sdev;
 781                fixup_nodatasum->logical = logical;
 782                fixup_nodatasum->root = fs_info->extent_root;
 783                fixup_nodatasum->mirror_num = failed_mirror_index + 1;
 784                /*
 785                 * increment scrubs_running to prevent cancel requests from
 786                 * completing as long as a fixup worker is running. we must also
 787                 * increment scrubs_paused to prevent deadlocking on pause
 788                 * requests used for transactions commits (as the worker uses a
 789                 * transaction context). it is safe to regard the fixup worker
 790                 * as paused for all matters practical. effectively, we only
 791                 * avoid cancellation requests from completing.
 792                 */
 793                mutex_lock(&fs_info->scrub_lock);
 794                atomic_inc(&fs_info->scrubs_running);
 795                atomic_inc(&fs_info->scrubs_paused);
 796                mutex_unlock(&fs_info->scrub_lock);
 797                atomic_inc(&sdev->fixup_cnt);
 798                fixup_nodatasum->work.func = scrub_fixup_nodatasum;
 799                btrfs_queue_worker(&fs_info->scrub_workers,
 800                                   &fixup_nodatasum->work);
 801                goto out;
 802        }
 803
 804        /*
 805         * now build and submit the bios for the other mirrors, check
 806         * checksums
 807         */
 808        for (mirror_index = 0;
 809             mirror_index < BTRFS_MAX_MIRRORS &&
 810             sblocks_for_recheck[mirror_index].page_count > 0;
 811             mirror_index++) {
 812                if (mirror_index == failed_mirror_index)
 813                        continue;
 814
 815                /* build and submit the bios, check checksums */
 816                ret = scrub_recheck_block(fs_info,
 817                                          sblocks_for_recheck + mirror_index,
 818                                          is_metadata, have_csum, csum,
 819                                          generation, sdev->csum_size);
 820                if (ret)
 821                        goto did_not_correct_error;
 822        }
 823
 824        /*
 825         * first try to pick the mirror which is completely without I/O
 826         * errors and also does not have a checksum error.
 827         * If one is found, and if a checksum is present, the full block
 828         * that is known to contain an error is rewritten. Afterwards
 829         * the block is known to be corrected.
 830         * If a mirror is found which is completely correct, and no
 831         * checksum is present, only those pages are rewritten that had
 832         * an I/O error in the block to be repaired, since it cannot be
 833         * determined, which copy of the other pages is better (and it
 834         * could happen otherwise that a correct page would be
 835         * overwritten by a bad one).
 836         */
 837        for (mirror_index = 0;
 838             mirror_index < BTRFS_MAX_MIRRORS &&
 839             sblocks_for_recheck[mirror_index].page_count > 0;
 840             mirror_index++) {
 841                struct scrub_block *sblock_other = sblocks_for_recheck +
 842                                                   mirror_index;
 843
 844                if (!sblock_other->header_error &&
 845                    !sblock_other->checksum_error &&
 846                    sblock_other->no_io_error_seen) {
 847                        int force_write = is_metadata || have_csum;
 848
 849                        ret = scrub_repair_block_from_good_copy(sblock_bad,
 850                                                                sblock_other,
 851                                                                force_write);
 852                        if (0 == ret)
 853                                goto corrected_error;
 854                }
 855        }
 856
 857        /*
 858         * in case of I/O errors in the area that is supposed to be
 859         * repaired, continue by picking good copies of those pages.
 860         * Select the good pages from mirrors to rewrite bad pages from
 861         * the area to fix. Afterwards verify the checksum of the block
 862         * that is supposed to be repaired. This verification step is
 863         * only done for the purpose of statistic counting and for the
 864         * final scrub report, whether errors remain.
 865         * A perfect algorithm could make use of the checksum and try
 866         * all possible combinations of pages from the different mirrors
 867         * until the checksum verification succeeds. For example, when
 868         * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
 869         * of mirror #2 is readable but the final checksum test fails,
 870         * then the 2nd page of mirror #3 could be tried, whether now
 871         * the final checksum succeedes. But this would be a rare
 872         * exception and is therefore not implemented. At least it is
 873         * avoided that the good copy is overwritten.
 874         * A more useful improvement would be to pick the sectors
 875         * without I/O error based on sector sizes (512 bytes on legacy
 876         * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
 877         * mirror could be repaired by taking 512 byte of a different
 878         * mirror, even if other 512 byte sectors in the same PAGE_SIZE
 879         * area are unreadable.
 880         */
 881
 882        /* can only fix I/O errors from here on */
 883        if (sblock_bad->no_io_error_seen)
 884                goto did_not_correct_error;
 885
 886        success = 1;
 887        for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
 888                struct scrub_page *page_bad = sblock_bad->pagev + page_num;
 889
 890                if (!page_bad->io_error)
 891                        continue;
 892
 893                for (mirror_index = 0;
 894                     mirror_index < BTRFS_MAX_MIRRORS &&
 895                     sblocks_for_recheck[mirror_index].page_count > 0;
 896                     mirror_index++) {
 897                        struct scrub_block *sblock_other = sblocks_for_recheck +
 898                                                           mirror_index;
 899                        struct scrub_page *page_other = sblock_other->pagev +
 900                                                        page_num;
 901
 902                        if (!page_other->io_error) {
 903                                ret = scrub_repair_page_from_good_copy(
 904                                        sblock_bad, sblock_other, page_num, 0);
 905                                if (0 == ret) {
 906                                        page_bad->io_error = 0;
 907                                        break; /* succeeded for this page */
 908                                }
 909                        }
 910                }
 911
 912                if (page_bad->io_error) {
 913                        /* did not find a mirror to copy the page from */
 914                        success = 0;
 915                }
 916        }
 917
 918        if (success) {
 919                if (is_metadata || have_csum) {
 920                        /*
 921                         * need to verify the checksum now that all
 922                         * sectors on disk are repaired (the write
 923                         * request for data to be repaired is on its way).
 924                         * Just be lazy and use scrub_recheck_block()
 925                         * which re-reads the data before the checksum
 926                         * is verified, but most likely the data comes out
 927                         * of the page cache.
 928                         */
 929                        ret = scrub_recheck_block(fs_info, sblock_bad,
 930                                                  is_metadata, have_csum, csum,
 931                                                  generation, sdev->csum_size);
 932                        if (!ret && !sblock_bad->header_error &&
 933                            !sblock_bad->checksum_error &&
 934                            sblock_bad->no_io_error_seen)
 935                                goto corrected_error;
 936                        else
 937                                goto did_not_correct_error;
 938                } else {
 939corrected_error:
 940                        spin_lock(&sdev->stat_lock);
 941                        sdev->stat.corrected_errors++;
 942                        spin_unlock(&sdev->stat_lock);
 943                        printk_ratelimited_in_rcu(KERN_ERR
 944                                "btrfs: fixed up error at logical %llu on dev %s\n",
 945                                (unsigned long long)logical,
 946                                rcu_str_deref(sdev->dev->name));
 947                }
 948        } else {
 949did_not_correct_error:
 950                spin_lock(&sdev->stat_lock);
 951                sdev->stat.uncorrectable_errors++;
 952                spin_unlock(&sdev->stat_lock);
 953                printk_ratelimited_in_rcu(KERN_ERR
 954                        "btrfs: unable to fixup (regular) error at logical %llu on dev %s\n",
 955                        (unsigned long long)logical,
 956                        rcu_str_deref(sdev->dev->name));
 957        }
 958
 959out:
 960        if (sblocks_for_recheck) {
 961                for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
 962                     mirror_index++) {
 963                        struct scrub_block *sblock = sblocks_for_recheck +
 964                                                     mirror_index;
 965                        int page_index;
 966
 967                        for (page_index = 0; page_index < SCRUB_PAGES_PER_BIO;
 968                             page_index++)
 969                                if (sblock->pagev[page_index].page)
 970                                        __free_page(
 971                                                sblock->pagev[page_index].page);
 972                }
 973                kfree(sblocks_for_recheck);
 974        }
 975
 976        return 0;
 977}
 978
 979static int scrub_setup_recheck_block(struct scrub_dev *sdev,
 980                                     struct btrfs_mapping_tree *map_tree,
 981                                     u64 length, u64 logical,
 982                                     struct scrub_block *sblocks_for_recheck)
 983{
 984        int page_index;
 985        int mirror_index;
 986        int ret;
 987
 988        /*
 989         * note: the three members sdev, ref_count and outstanding_pages
 990         * are not used (and not set) in the blocks that are used for
 991         * the recheck procedure
 992         */
 993
 994        page_index = 0;
 995        while (length > 0) {
 996                u64 sublen = min_t(u64, length, PAGE_SIZE);
 997                u64 mapped_length = sublen;
 998                struct btrfs_bio *bbio = NULL;
 999
1000                /*
1001                 * with a length of PAGE_SIZE, each returned stripe
1002                 * represents one mirror
1003                 */
1004                ret = btrfs_map_block(map_tree, WRITE, logical, &mapped_length,
1005                                      &bbio, 0);
1006                if (ret || !bbio || mapped_length < sublen) {
1007                        kfree(bbio);
1008                        return -EIO;
1009                }
1010
1011                BUG_ON(page_index >= SCRUB_PAGES_PER_BIO);
1012                for (mirror_index = 0; mirror_index < (int)bbio->num_stripes;
1013                     mirror_index++) {
1014                        struct scrub_block *sblock;
1015                        struct scrub_page *page;
1016
1017                        if (mirror_index >= BTRFS_MAX_MIRRORS)
1018                                continue;
1019
1020                        sblock = sblocks_for_recheck + mirror_index;
1021                        page = sblock->pagev + page_index;
1022                        page->logical = logical;
1023                        page->physical = bbio->stripes[mirror_index].physical;
1024                        /* for missing devices, dev->bdev is NULL */
1025                        page->dev = bbio->stripes[mirror_index].dev;
1026                        page->mirror_num = mirror_index + 1;
1027                        page->page = alloc_page(GFP_NOFS);
1028                        if (!page->page) {
1029                                spin_lock(&sdev->stat_lock);
1030                                sdev->stat.malloc_errors++;
1031                                spin_unlock(&sdev->stat_lock);
1032                                return -ENOMEM;
1033                        }
1034                        sblock->page_count++;
1035                }
1036                kfree(bbio);
1037                length -= sublen;
1038                logical += sublen;
1039                page_index++;
1040        }
1041
1042        return 0;
1043}
1044
1045/*
1046 * this function will check the on disk data for checksum errors, header
1047 * errors and read I/O errors. If any I/O errors happen, the exact pages
1048 * which are errored are marked as being bad. The goal is to enable scrub
1049 * to take those pages that are not errored from all the mirrors so that
1050 * the pages that are errored in the just handled mirror can be repaired.
1051 */
1052static int scrub_recheck_block(struct btrfs_fs_info *fs_info,
1053                               struct scrub_block *sblock, int is_metadata,
1054                               int have_csum, u8 *csum, u64 generation,
1055                               u16 csum_size)
1056{
1057        int page_num;
1058
1059        sblock->no_io_error_seen = 1;
1060        sblock->header_error = 0;
1061        sblock->checksum_error = 0;
1062
1063        for (page_num = 0; page_num < sblock->page_count; page_num++) {
1064                struct bio *bio;
1065                int ret;
1066                struct scrub_page *page = sblock->pagev + page_num;
1067                DECLARE_COMPLETION_ONSTACK(complete);
1068
1069                if (page->dev->bdev == NULL) {
1070                        page->io_error = 1;
1071                        sblock->no_io_error_seen = 0;
1072                        continue;
1073                }
1074
1075                BUG_ON(!page->page);
1076                bio = bio_alloc(GFP_NOFS, 1);
1077                if (!bio)
1078                        return -EIO;
1079                bio->bi_bdev = page->dev->bdev;
1080                bio->bi_sector = page->physical >> 9;
1081                bio->bi_end_io = scrub_complete_bio_end_io;
1082                bio->bi_private = &complete;
1083
1084                ret = bio_add_page(bio, page->page, PAGE_SIZE, 0);
1085                if (PAGE_SIZE != ret) {
1086                        bio_put(bio);
1087                        return -EIO;
1088                }
1089                btrfsic_submit_bio(READ, bio);
1090
1091                /* this will also unplug the queue */
1092                wait_for_completion(&complete);
1093
1094                page->io_error = !test_bit(BIO_UPTODATE, &bio->bi_flags);
1095                if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1096                        sblock->no_io_error_seen = 0;
1097                bio_put(bio);
1098        }
1099
1100        if (sblock->no_io_error_seen)
1101                scrub_recheck_block_checksum(fs_info, sblock, is_metadata,
1102                                             have_csum, csum, generation,
1103                                             csum_size);
1104
1105        return 0;
1106}
1107
1108static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
1109                                         struct scrub_block *sblock,
1110                                         int is_metadata, int have_csum,
1111                                         const u8 *csum, u64 generation,
1112                                         u16 csum_size)
1113{
1114        int page_num;
1115        u8 calculated_csum[BTRFS_CSUM_SIZE];
1116        u32 crc = ~(u32)0;
1117        struct btrfs_root *root = fs_info->extent_root;
1118        void *mapped_buffer;
1119
1120        BUG_ON(!sblock->pagev[0].page);
1121        if (is_metadata) {
1122                struct btrfs_header *h;
1123
1124                mapped_buffer = kmap_atomic(sblock->pagev[0].page);
1125                h = (struct btrfs_header *)mapped_buffer;
1126
1127                if (sblock->pagev[0].logical != le64_to_cpu(h->bytenr) ||
1128                    memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE) ||
1129                    memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1130                           BTRFS_UUID_SIZE)) {
1131                        sblock->header_error = 1;
1132                } else if (generation != le64_to_cpu(h->generation)) {
1133                        sblock->header_error = 1;
1134                        sblock->generation_error = 1;
1135                }
1136                csum = h->csum;
1137        } else {
1138                if (!have_csum)
1139                        return;
1140
1141                mapped_buffer = kmap_atomic(sblock->pagev[0].page);
1142        }
1143
1144        for (page_num = 0;;) {
1145                if (page_num == 0 && is_metadata)
1146                        crc = btrfs_csum_data(root,
1147                                ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE,
1148                                crc, PAGE_SIZE - BTRFS_CSUM_SIZE);
1149                else
1150                        crc = btrfs_csum_data(root, mapped_buffer, crc,
1151                                              PAGE_SIZE);
1152
1153                kunmap_atomic(mapped_buffer);
1154                page_num++;
1155                if (page_num >= sblock->page_count)
1156                        break;
1157                BUG_ON(!sblock->pagev[page_num].page);
1158
1159                mapped_buffer = kmap_atomic(sblock->pagev[page_num].page);
1160        }
1161
1162        btrfs_csum_final(crc, calculated_csum);
1163        if (memcmp(calculated_csum, csum, csum_size))
1164                sblock->checksum_error = 1;
1165}
1166
1167static void scrub_complete_bio_end_io(struct bio *bio, int err)
1168{
1169        complete((struct completion *)bio->bi_private);
1170}
1171
1172static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1173                                             struct scrub_block *sblock_good,
1174                                             int force_write)
1175{
1176        int page_num;
1177        int ret = 0;
1178
1179        for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1180                int ret_sub;
1181
1182                ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1183                                                           sblock_good,
1184                                                           page_num,
1185                                                           force_write);
1186                if (ret_sub)
1187                        ret = ret_sub;
1188        }
1189
1190        return ret;
1191}
1192
1193static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1194                                            struct scrub_block *sblock_good,
1195                                            int page_num, int force_write)
1196{
1197        struct scrub_page *page_bad = sblock_bad->pagev + page_num;
1198        struct scrub_page *page_good = sblock_good->pagev + page_num;
1199
1200        BUG_ON(sblock_bad->pagev[page_num].page == NULL);
1201        BUG_ON(sblock_good->pagev[page_num].page == NULL);
1202        if (force_write || sblock_bad->header_error ||
1203            sblock_bad->checksum_error || page_bad->io_error) {
1204                struct bio *bio;
1205                int ret;
1206                DECLARE_COMPLETION_ONSTACK(complete);
1207
1208                bio = bio_alloc(GFP_NOFS, 1);
1209                if (!bio)
1210                        return -EIO;
1211                bio->bi_bdev = page_bad->dev->bdev;
1212                bio->bi_sector = page_bad->physical >> 9;
1213                bio->bi_end_io = scrub_complete_bio_end_io;
1214                bio->bi_private = &complete;
1215
1216                ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1217                if (PAGE_SIZE != ret) {
1218                        bio_put(bio);
1219                        return -EIO;
1220                }
1221                btrfsic_submit_bio(WRITE, bio);
1222
1223                /* this will also unplug the queue */
1224                wait_for_completion(&complete);
1225                if (!bio_flagged(bio, BIO_UPTODATE)) {
1226                        btrfs_dev_stat_inc_and_print(page_bad->dev,
1227                                BTRFS_DEV_STAT_WRITE_ERRS);
1228                        bio_put(bio);
1229                        return -EIO;
1230                }
1231                bio_put(bio);
1232        }
1233
1234        return 0;
1235}
1236
1237static void scrub_checksum(struct scrub_block *sblock)
1238{
1239        u64 flags;
1240        int ret;
1241
1242        BUG_ON(sblock->page_count < 1);
1243        flags = sblock->pagev[0].flags;
1244        ret = 0;
1245        if (flags & BTRFS_EXTENT_FLAG_DATA)
1246                ret = scrub_checksum_data(sblock);
1247        else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1248                ret = scrub_checksum_tree_block(sblock);
1249        else if (flags & BTRFS_EXTENT_FLAG_SUPER)
1250                (void)scrub_checksum_super(sblock);
1251        else
1252                WARN_ON(1);
1253        if (ret)
1254                scrub_handle_errored_block(sblock);
1255}
1256
1257static int scrub_checksum_data(struct scrub_block *sblock)
1258{
1259        struct scrub_dev *sdev = sblock->sdev;
1260        u8 csum[BTRFS_CSUM_SIZE];
1261        u8 *on_disk_csum;
1262        struct page *page;
1263        void *buffer;
1264        u32 crc = ~(u32)0;
1265        int fail = 0;
1266        struct btrfs_root *root = sdev->dev->dev_root;
1267        u64 len;
1268        int index;
1269
1270        BUG_ON(sblock->page_count < 1);
1271        if (!sblock->pagev[0].have_csum)
1272                return 0;
1273
1274        on_disk_csum = sblock->pagev[0].csum;
1275        page = sblock->pagev[0].page;
1276        buffer = kmap_atomic(page);
1277
1278        len = sdev->sectorsize;
1279        index = 0;
1280        for (;;) {
1281                u64 l = min_t(u64, len, PAGE_SIZE);
1282
1283                crc = btrfs_csum_data(root, buffer, crc, l);
1284                kunmap_atomic(buffer);
1285                len -= l;
1286                if (len == 0)
1287                        break;
1288                index++;
1289                BUG_ON(index >= sblock->page_count);
1290                BUG_ON(!sblock->pagev[index].page);
1291                page = sblock->pagev[index].page;
1292                buffer = kmap_atomic(page);
1293        }
1294
1295        btrfs_csum_final(crc, csum);
1296        if (memcmp(csum, on_disk_csum, sdev->csum_size))
1297                fail = 1;
1298
1299        return fail;
1300}
1301
1302static int scrub_checksum_tree_block(struct scrub_block *sblock)
1303{
1304        struct scrub_dev *sdev = sblock->sdev;
1305        struct btrfs_header *h;
1306        struct btrfs_root *root = sdev->dev->dev_root;
1307        struct btrfs_fs_info *fs_info = root->fs_info;
1308        u8 calculated_csum[BTRFS_CSUM_SIZE];
1309        u8 on_disk_csum[BTRFS_CSUM_SIZE];
1310        struct page *page;
1311        void *mapped_buffer;
1312        u64 mapped_size;
1313        void *p;
1314        u32 crc = ~(u32)0;
1315        int fail = 0;
1316        int crc_fail = 0;
1317        u64 len;
1318        int index;
1319
1320        BUG_ON(sblock->page_count < 1);
1321        page = sblock->pagev[0].page;
1322        mapped_buffer = kmap_atomic(page);
1323        h = (struct btrfs_header *)mapped_buffer;
1324        memcpy(on_disk_csum, h->csum, sdev->csum_size);
1325
1326        /*
1327         * we don't use the getter functions here, as we
1328         * a) don't have an extent buffer and
1329         * b) the page is already kmapped
1330         */
1331
1332        if (sblock->pagev[0].logical != le64_to_cpu(h->bytenr))
1333                ++fail;
1334
1335        if (sblock->pagev[0].generation != le64_to_cpu(h->generation))
1336                ++fail;
1337
1338        if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
1339                ++fail;
1340
1341        if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1342                   BTRFS_UUID_SIZE))
1343                ++fail;
1344
1345        BUG_ON(sdev->nodesize != sdev->leafsize);
1346        len = sdev->nodesize - BTRFS_CSUM_SIZE;
1347        mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1348        p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1349        index = 0;
1350        for (;;) {
1351                u64 l = min_t(u64, len, mapped_size);
1352
1353                crc = btrfs_csum_data(root, p, crc, l);
1354                kunmap_atomic(mapped_buffer);
1355                len -= l;
1356                if (len == 0)
1357                        break;
1358                index++;
1359                BUG_ON(index >= sblock->page_count);
1360                BUG_ON(!sblock->pagev[index].page);
1361                page = sblock->pagev[index].page;
1362                mapped_buffer = kmap_atomic(page);
1363                mapped_size = PAGE_SIZE;
1364                p = mapped_buffer;
1365        }
1366
1367        btrfs_csum_final(crc, calculated_csum);
1368        if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size))
1369                ++crc_fail;
1370
1371        return fail || crc_fail;
1372}
1373
1374static int scrub_checksum_super(struct scrub_block *sblock)
1375{
1376        struct btrfs_super_block *s;
1377        struct scrub_dev *sdev = sblock->sdev;
1378        struct btrfs_root *root = sdev->dev->dev_root;
1379        struct btrfs_fs_info *fs_info = root->fs_info;
1380        u8 calculated_csum[BTRFS_CSUM_SIZE];
1381        u8 on_disk_csum[BTRFS_CSUM_SIZE];
1382        struct page *page;
1383        void *mapped_buffer;
1384        u64 mapped_size;
1385        void *p;
1386        u32 crc = ~(u32)0;
1387        int fail_gen = 0;
1388        int fail_cor = 0;
1389        u64 len;
1390        int index;
1391
1392        BUG_ON(sblock->page_count < 1);
1393        page = sblock->pagev[0].page;
1394        mapped_buffer = kmap_atomic(page);
1395        s = (struct btrfs_super_block *)mapped_buffer;
1396        memcpy(on_disk_csum, s->csum, sdev->csum_size);
1397
1398        if (sblock->pagev[0].logical != le64_to_cpu(s->bytenr))
1399                ++fail_cor;
1400
1401        if (sblock->pagev[0].generation != le64_to_cpu(s->generation))
1402                ++fail_gen;
1403
1404        if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
1405                ++fail_cor;
1406
1407        len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
1408        mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1409        p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1410        index = 0;
1411        for (;;) {
1412                u64 l = min_t(u64, len, mapped_size);
1413
1414                crc = btrfs_csum_data(root, p, crc, l);
1415                kunmap_atomic(mapped_buffer);
1416                len -= l;
1417                if (len == 0)
1418                        break;
1419                index++;
1420                BUG_ON(index >= sblock->page_count);
1421                BUG_ON(!sblock->pagev[index].page);
1422                page = sblock->pagev[index].page;
1423                mapped_buffer = kmap_atomic(page);
1424                mapped_size = PAGE_SIZE;
1425                p = mapped_buffer;
1426        }
1427
1428        btrfs_csum_final(crc, calculated_csum);
1429        if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size))
1430                ++fail_cor;
1431
1432        if (fail_cor + fail_gen) {
1433                /*
1434                 * if we find an error in a super block, we just report it.
1435                 * They will get written with the next transaction commit
1436                 * anyway
1437                 */
1438                spin_lock(&sdev->stat_lock);
1439                ++sdev->stat.super_errors;
1440                spin_unlock(&sdev->stat_lock);
1441                if (fail_cor)
1442                        btrfs_dev_stat_inc_and_print(sdev->dev,
1443                                BTRFS_DEV_STAT_CORRUPTION_ERRS);
1444                else
1445                        btrfs_dev_stat_inc_and_print(sdev->dev,
1446                                BTRFS_DEV_STAT_GENERATION_ERRS);
1447        }
1448
1449        return fail_cor + fail_gen;
1450}
1451
1452static void scrub_block_get(struct scrub_block *sblock)
1453{
1454        atomic_inc(&sblock->ref_count);
1455}
1456
1457static void scrub_block_put(struct scrub_block *sblock)
1458{
1459        if (atomic_dec_and_test(&sblock->ref_count)) {
1460                int i;
1461
1462                for (i = 0; i < sblock->page_count; i++)
1463                        if (sblock->pagev[i].page)
1464                                __free_page(sblock->pagev[i].page);
1465                kfree(sblock);
1466        }
1467}
1468
1469static void scrub_submit(struct scrub_dev *sdev)
1470{
1471        struct scrub_bio *sbio;
1472
1473        if (sdev->curr == -1)
1474                return;
1475
1476        sbio = sdev->bios[sdev->curr];
1477        sdev->curr = -1;
1478        atomic_inc(&sdev->in_flight);
1479
1480        btrfsic_submit_bio(READ, sbio->bio);
1481}
1482
1483static int scrub_add_page_to_bio(struct scrub_dev *sdev,
1484                                 struct scrub_page *spage)
1485{
1486        struct scrub_block *sblock = spage->sblock;
1487        struct scrub_bio *sbio;
1488        int ret;
1489
1490again:
1491        /*
1492         * grab a fresh bio or wait for one to become available
1493         */
1494        while (sdev->curr == -1) {
1495                spin_lock(&sdev->list_lock);
1496                sdev->curr = sdev->first_free;
1497                if (sdev->curr != -1) {
1498                        sdev->first_free = sdev->bios[sdev->curr]->next_free;
1499                        sdev->bios[sdev->curr]->next_free = -1;
1500                        sdev->bios[sdev->curr]->page_count = 0;
1501                        spin_unlock(&sdev->list_lock);
1502                } else {
1503                        spin_unlock(&sdev->list_lock);
1504                        wait_event(sdev->list_wait, sdev->first_free != -1);
1505                }
1506        }
1507        sbio = sdev->bios[sdev->curr];
1508        if (sbio->page_count == 0) {
1509                struct bio *bio;
1510
1511                sbio->physical = spage->physical;
1512                sbio->logical = spage->logical;
1513                bio = sbio->bio;
1514                if (!bio) {
1515                        bio = bio_alloc(GFP_NOFS, sdev->pages_per_bio);
1516                        if (!bio)
1517                                return -ENOMEM;
1518                        sbio->bio = bio;
1519                }
1520
1521                bio->bi_private = sbio;
1522                bio->bi_end_io = scrub_bio_end_io;
1523                bio->bi_bdev = sdev->dev->bdev;
1524                bio->bi_sector = spage->physical >> 9;
1525                sbio->err = 0;
1526        } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1527                   spage->physical ||
1528                   sbio->logical + sbio->page_count * PAGE_SIZE !=
1529                   spage->logical) {
1530                scrub_submit(sdev);
1531                goto again;
1532        }
1533
1534        sbio->pagev[sbio->page_count] = spage;
1535        ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1536        if (ret != PAGE_SIZE) {
1537                if (sbio->page_count < 1) {
1538                        bio_put(sbio->bio);
1539                        sbio->bio = NULL;
1540                        return -EIO;
1541                }
1542                scrub_submit(sdev);
1543                goto again;
1544        }
1545
1546        scrub_block_get(sblock); /* one for the added page */
1547        atomic_inc(&sblock->outstanding_pages);
1548        sbio->page_count++;
1549        if (sbio->page_count == sdev->pages_per_bio)
1550                scrub_submit(sdev);
1551
1552        return 0;
1553}
1554
1555static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len,
1556                       u64 physical, u64 flags, u64 gen, int mirror_num,
1557                       u8 *csum, int force)
1558{
1559        struct scrub_block *sblock;
1560        int index;
1561
1562        sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
1563        if (!sblock) {
1564                spin_lock(&sdev->stat_lock);
1565                sdev->stat.malloc_errors++;
1566                spin_unlock(&sdev->stat_lock);
1567                return -ENOMEM;
1568        }
1569
1570        /* one ref inside this function, plus one for each page later on */
1571        atomic_set(&sblock->ref_count, 1);
1572        sblock->sdev = sdev;
1573        sblock->no_io_error_seen = 1;
1574
1575        for (index = 0; len > 0; index++) {
1576                struct scrub_page *spage = sblock->pagev + index;
1577                u64 l = min_t(u64, len, PAGE_SIZE);
1578
1579                BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
1580                spage->page = alloc_page(GFP_NOFS);
1581                if (!spage->page) {
1582                        spin_lock(&sdev->stat_lock);
1583                        sdev->stat.malloc_errors++;
1584                        spin_unlock(&sdev->stat_lock);
1585                        while (index > 0) {
1586                                index--;
1587                                __free_page(sblock->pagev[index].page);
1588                        }
1589                        kfree(sblock);
1590                        return -ENOMEM;
1591                }
1592                spage->sblock = sblock;
1593                spage->dev = sdev->dev;
1594                spage->flags = flags;
1595                spage->generation = gen;
1596                spage->logical = logical;
1597                spage->physical = physical;
1598                spage->mirror_num = mirror_num;
1599                if (csum) {
1600                        spage->have_csum = 1;
1601                        memcpy(spage->csum, csum, sdev->csum_size);
1602                } else {
1603                        spage->have_csum = 0;
1604                }
1605                sblock->page_count++;
1606                len -= l;
1607                logical += l;
1608                physical += l;
1609        }
1610
1611        BUG_ON(sblock->page_count == 0);
1612        for (index = 0; index < sblock->page_count; index++) {
1613                struct scrub_page *spage = sblock->pagev + index;
1614                int ret;
1615
1616                ret = scrub_add_page_to_bio(sdev, spage);
1617                if (ret) {
1618                        scrub_block_put(sblock);
1619                        return ret;
1620                }
1621        }
1622
1623        if (force)
1624                scrub_submit(sdev);
1625
1626        /* last one frees, either here or in bio completion for last page */
1627        scrub_block_put(sblock);
1628        return 0;
1629}
1630
1631static void scrub_bio_end_io(struct bio *bio, int err)
1632{
1633        struct scrub_bio *sbio = bio->bi_private;
1634        struct scrub_dev *sdev = sbio->sdev;
1635        struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
1636
1637        sbio->err = err;
1638        sbio->bio = bio;
1639
1640        btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work);
1641}
1642
1643static void scrub_bio_end_io_worker(struct btrfs_work *work)
1644{
1645        struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1646        struct scrub_dev *sdev = sbio->sdev;
1647        int i;
1648
1649        BUG_ON(sbio->page_count > SCRUB_PAGES_PER_BIO);
1650        if (sbio->err) {
1651                for (i = 0; i < sbio->page_count; i++) {
1652                        struct scrub_page *spage = sbio->pagev[i];
1653
1654                        spage->io_error = 1;
1655                        spage->sblock->no_io_error_seen = 0;
1656                }
1657        }
1658
1659        /* now complete the scrub_block items that have all pages completed */
1660        for (i = 0; i < sbio->page_count; i++) {
1661                struct scrub_page *spage = sbio->pagev[i];
1662                struct scrub_block *sblock = spage->sblock;
1663
1664                if (atomic_dec_and_test(&sblock->outstanding_pages))
1665                        scrub_block_complete(sblock);
1666                scrub_block_put(sblock);
1667        }
1668
1669        if (sbio->err) {
1670                /* what is this good for??? */
1671                sbio->bio->bi_flags &= ~(BIO_POOL_MASK - 1);
1672                sbio->bio->bi_flags |= 1 << BIO_UPTODATE;
1673                sbio->bio->bi_phys_segments = 0;
1674                sbio->bio->bi_idx = 0;
1675
1676                for (i = 0; i < sbio->page_count; i++) {
1677                        struct bio_vec *bi;
1678                        bi = &sbio->bio->bi_io_vec[i];
1679                        bi->bv_offset = 0;
1680                        bi->bv_len = PAGE_SIZE;
1681                }
1682        }
1683
1684        bio_put(sbio->bio);
1685        sbio->bio = NULL;
1686        spin_lock(&sdev->list_lock);
1687        sbio->next_free = sdev->first_free;
1688        sdev->first_free = sbio->index;
1689        spin_unlock(&sdev->list_lock);
1690        atomic_dec(&sdev->in_flight);
1691        wake_up(&sdev->list_wait);
1692}
1693
1694static void scrub_block_complete(struct scrub_block *sblock)
1695{
1696        if (!sblock->no_io_error_seen)
1697                scrub_handle_errored_block(sblock);
1698        else
1699                scrub_checksum(sblock);
1700}
1701
1702static int scrub_find_csum(struct scrub_dev *sdev, u64 logical, u64 len,
1703                           u8 *csum)
1704{
1705        struct btrfs_ordered_sum *sum = NULL;
1706        int ret = 0;
1707        unsigned long i;
1708        unsigned long num_sectors;
1709
1710        while (!list_empty(&sdev->csum_list)) {
1711                sum = list_first_entry(&sdev->csum_list,
1712                                       struct btrfs_ordered_sum, list);
1713                if (sum->bytenr > logical)
1714                        return 0;
1715                if (sum->bytenr + sum->len > logical)
1716                        break;
1717
1718                ++sdev->stat.csum_discards;
1719                list_del(&sum->list);
1720                kfree(sum);
1721                sum = NULL;
1722        }
1723        if (!sum)
1724                return 0;
1725
1726        num_sectors = sum->len / sdev->sectorsize;
1727        for (i = 0; i < num_sectors; ++i) {
1728                if (sum->sums[i].bytenr == logical) {
1729                        memcpy(csum, &sum->sums[i].sum, sdev->csum_size);
1730                        ret = 1;
1731                        break;
1732                }
1733        }
1734        if (ret && i == num_sectors - 1) {
1735                list_del(&sum->list);
1736                kfree(sum);
1737        }
1738        return ret;
1739}
1740
1741/* scrub extent tries to collect up to 64 kB for each bio */
1742static int scrub_extent(struct scrub_dev *sdev, u64 logical, u64 len,
1743                        u64 physical, u64 flags, u64 gen, int mirror_num)
1744{
1745        int ret;
1746        u8 csum[BTRFS_CSUM_SIZE];
1747        u32 blocksize;
1748
1749        if (flags & BTRFS_EXTENT_FLAG_DATA) {
1750                blocksize = sdev->sectorsize;
1751                spin_lock(&sdev->stat_lock);
1752                sdev->stat.data_extents_scrubbed++;
1753                sdev->stat.data_bytes_scrubbed += len;
1754                spin_unlock(&sdev->stat_lock);
1755        } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1756                BUG_ON(sdev->nodesize != sdev->leafsize);
1757                blocksize = sdev->nodesize;
1758                spin_lock(&sdev->stat_lock);
1759                sdev->stat.tree_extents_scrubbed++;
1760                sdev->stat.tree_bytes_scrubbed += len;
1761                spin_unlock(&sdev->stat_lock);
1762        } else {
1763                blocksize = sdev->sectorsize;
1764                BUG_ON(1);
1765        }
1766
1767        while (len) {
1768                u64 l = min_t(u64, len, blocksize);
1769                int have_csum = 0;
1770
1771                if (flags & BTRFS_EXTENT_FLAG_DATA) {
1772                        /* push csums to sbio */
1773                        have_csum = scrub_find_csum(sdev, logical, l, csum);
1774                        if (have_csum == 0)
1775                                ++sdev->stat.no_csum;
1776                }
1777                ret = scrub_pages(sdev, logical, l, physical, flags, gen,
1778                                  mirror_num, have_csum ? csum : NULL, 0);
1779                if (ret)
1780                        return ret;
1781                len -= l;
1782                logical += l;
1783                physical += l;
1784        }
1785        return 0;
1786}
1787
1788static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
1789        struct map_lookup *map, int num, u64 base, u64 length)
1790{
1791        struct btrfs_path *path;
1792        struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
1793        struct btrfs_root *root = fs_info->extent_root;
1794        struct btrfs_root *csum_root = fs_info->csum_root;
1795        struct btrfs_extent_item *extent;
1796        struct blk_plug plug;
1797        u64 flags;
1798        int ret;
1799        int slot;
1800        int i;
1801        u64 nstripes;
1802        struct extent_buffer *l;
1803        struct btrfs_key key;
1804        u64 physical;
1805        u64 logical;
1806        u64 generation;
1807        int mirror_num;
1808        struct reada_control *reada1;
1809        struct reada_control *reada2;
1810        struct btrfs_key key_start;
1811        struct btrfs_key key_end;
1812
1813        u64 increment = map->stripe_len;
1814        u64 offset;
1815
1816        nstripes = length;
1817        offset = 0;
1818        do_div(nstripes, map->stripe_len);
1819        if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
1820                offset = map->stripe_len * num;
1821                increment = map->stripe_len * map->num_stripes;
1822                mirror_num = 1;
1823        } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1824                int factor = map->num_stripes / map->sub_stripes;
1825                offset = map->stripe_len * (num / map->sub_stripes);
1826                increment = map->stripe_len * factor;
1827                mirror_num = num % map->sub_stripes + 1;
1828        } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
1829                increment = map->stripe_len;
1830                mirror_num = num % map->num_stripes + 1;
1831        } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
1832                increment = map->stripe_len;
1833                mirror_num = num % map->num_stripes + 1;
1834        } else {
1835                increment = map->stripe_len;
1836                mirror_num = 1;
1837        }
1838
1839        path = btrfs_alloc_path();
1840        if (!path)
1841                return -ENOMEM;
1842
1843        /*
1844         * work on commit root. The related disk blocks are static as
1845         * long as COW is applied. This means, it is save to rewrite
1846         * them to repair disk errors without any race conditions
1847         */
1848        path->search_commit_root = 1;
1849        path->skip_locking = 1;
1850
1851        /*
1852         * trigger the readahead for extent tree csum tree and wait for
1853         * completion. During readahead, the scrub is officially paused
1854         * to not hold off transaction commits
1855         */
1856        logical = base + offset;
1857
1858        wait_event(sdev->list_wait,
1859                   atomic_read(&sdev->in_flight) == 0);
1860        atomic_inc(&fs_info->scrubs_paused);
1861        wake_up(&fs_info->scrub_pause_wait);
1862
1863        /* FIXME it might be better to start readahead at commit root */
1864        key_start.objectid = logical;
1865        key_start.type = BTRFS_EXTENT_ITEM_KEY;
1866        key_start.offset = (u64)0;
1867        key_end.objectid = base + offset + nstripes * increment;
1868        key_end.type = BTRFS_EXTENT_ITEM_KEY;
1869        key_end.offset = (u64)0;
1870        reada1 = btrfs_reada_add(root, &key_start, &key_end);
1871
1872        key_start.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
1873        key_start.type = BTRFS_EXTENT_CSUM_KEY;
1874        key_start.offset = logical;
1875        key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
1876        key_end.type = BTRFS_EXTENT_CSUM_KEY;
1877        key_end.offset = base + offset + nstripes * increment;
1878        reada2 = btrfs_reada_add(csum_root, &key_start, &key_end);
1879
1880        if (!IS_ERR(reada1))
1881                btrfs_reada_wait(reada1);
1882        if (!IS_ERR(reada2))
1883                btrfs_reada_wait(reada2);
1884
1885        mutex_lock(&fs_info->scrub_lock);
1886        while (atomic_read(&fs_info->scrub_pause_req)) {
1887                mutex_unlock(&fs_info->scrub_lock);
1888                wait_event(fs_info->scrub_pause_wait,
1889                   atomic_read(&fs_info->scrub_pause_req) == 0);
1890                mutex_lock(&fs_info->scrub_lock);
1891        }
1892        atomic_dec(&fs_info->scrubs_paused);
1893        mutex_unlock(&fs_info->scrub_lock);
1894        wake_up(&fs_info->scrub_pause_wait);
1895
1896        /*
1897         * collect all data csums for the stripe to avoid seeking during
1898         * the scrub. This might currently (crc32) end up to be about 1MB
1899         */
1900        blk_start_plug(&plug);
1901
1902        /*
1903         * now find all extents for each stripe and scrub them
1904         */
1905        logical = base + offset;
1906        physical = map->stripes[num].physical;
1907        ret = 0;
1908        for (i = 0; i < nstripes; ++i) {
1909                /*
1910                 * canceled?
1911                 */
1912                if (atomic_read(&fs_info->scrub_cancel_req) ||
1913                    atomic_read(&sdev->cancel_req)) {
1914                        ret = -ECANCELED;
1915                        goto out;
1916                }
1917                /*
1918                 * check to see if we have to pause
1919                 */
1920                if (atomic_read(&fs_info->scrub_pause_req)) {
1921                        /* push queued extents */
1922                        scrub_submit(sdev);
1923                        wait_event(sdev->list_wait,
1924                                   atomic_read(&sdev->in_flight) == 0);
1925                        atomic_inc(&fs_info->scrubs_paused);
1926                        wake_up(&fs_info->scrub_pause_wait);
1927                        mutex_lock(&fs_info->scrub_lock);
1928                        while (atomic_read(&fs_info->scrub_pause_req)) {
1929                                mutex_unlock(&fs_info->scrub_lock);
1930                                wait_event(fs_info->scrub_pause_wait,
1931                                   atomic_read(&fs_info->scrub_pause_req) == 0);
1932                                mutex_lock(&fs_info->scrub_lock);
1933                        }
1934                        atomic_dec(&fs_info->scrubs_paused);
1935                        mutex_unlock(&fs_info->scrub_lock);
1936                        wake_up(&fs_info->scrub_pause_wait);
1937                }
1938
1939                ret = btrfs_lookup_csums_range(csum_root, logical,
1940                                               logical + map->stripe_len - 1,
1941                                               &sdev->csum_list, 1);
1942                if (ret)
1943                        goto out;
1944
1945                key.objectid = logical;
1946                key.type = BTRFS_EXTENT_ITEM_KEY;
1947                key.offset = (u64)0;
1948
1949                ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1950                if (ret < 0)
1951                        goto out;
1952                if (ret > 0) {
1953                        ret = btrfs_previous_item(root, path, 0,
1954                                                  BTRFS_EXTENT_ITEM_KEY);
1955                        if (ret < 0)
1956                                goto out;
1957                        if (ret > 0) {
1958                                /* there's no smaller item, so stick with the
1959                                 * larger one */
1960                                btrfs_release_path(path);
1961                                ret = btrfs_search_slot(NULL, root, &key,
1962                                                        path, 0, 0);
1963                                if (ret < 0)
1964                                        goto out;
1965                        }
1966                }
1967
1968                while (1) {
1969                        l = path->nodes[0];
1970                        slot = path->slots[0];
1971                        if (slot >= btrfs_header_nritems(l)) {
1972                                ret = btrfs_next_leaf(root, path);
1973                                if (ret == 0)
1974                                        continue;
1975                                if (ret < 0)
1976                                        goto out;
1977
1978                                break;
1979                        }
1980                        btrfs_item_key_to_cpu(l, &key, slot);
1981
1982                        if (key.objectid + key.offset <= logical)
1983                                goto next;
1984
1985                        if (key.objectid >= logical + map->stripe_len)
1986                                break;
1987
1988                        if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY)
1989                                goto next;
1990
1991                        extent = btrfs_item_ptr(l, slot,
1992                                                struct btrfs_extent_item);
1993                        flags = btrfs_extent_flags(l, extent);
1994                        generation = btrfs_extent_generation(l, extent);
1995
1996                        if (key.objectid < logical &&
1997                            (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
1998                                printk(KERN_ERR
1999                                       "btrfs scrub: tree block %llu spanning "
2000                                       "stripes, ignored. logical=%llu\n",
2001                                       (unsigned long long)key.objectid,
2002                                       (unsigned long long)logical);
2003                                goto next;
2004                        }
2005
2006                        /*
2007                         * trim extent to this stripe
2008                         */
2009                        if (key.objectid < logical) {
2010                                key.offset -= logical - key.objectid;
2011                                key.objectid = logical;
2012                        }
2013                        if (key.objectid + key.offset >
2014                            logical + map->stripe_len) {
2015                                key.offset = logical + map->stripe_len -
2016                                             key.objectid;
2017                        }
2018
2019                        ret = scrub_extent(sdev, key.objectid, key.offset,
2020                                           key.objectid - logical + physical,
2021                                           flags, generation, mirror_num);
2022                        if (ret)
2023                                goto out;
2024
2025next:
2026                        path->slots[0]++;
2027                }
2028                btrfs_release_path(path);
2029                logical += increment;
2030                physical += map->stripe_len;
2031                spin_lock(&sdev->stat_lock);
2032                sdev->stat.last_physical = physical;
2033                spin_unlock(&sdev->stat_lock);
2034        }
2035        /* push queued extents */
2036        scrub_submit(sdev);
2037
2038out:
2039        blk_finish_plug(&plug);
2040        btrfs_free_path(path);
2041        return ret < 0 ? ret : 0;
2042}
2043
2044static noinline_for_stack int scrub_chunk(struct scrub_dev *sdev,
2045        u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset, u64 length,
2046        u64 dev_offset)
2047{
2048        struct btrfs_mapping_tree *map_tree =
2049                &sdev->dev->dev_root->fs_info->mapping_tree;
2050        struct map_lookup *map;
2051        struct extent_map *em;
2052        int i;
2053        int ret = -EINVAL;
2054
2055        read_lock(&map_tree->map_tree.lock);
2056        em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
2057        read_unlock(&map_tree->map_tree.lock);
2058
2059        if (!em)
2060                return -EINVAL;
2061
2062        map = (struct map_lookup *)em->bdev;
2063        if (em->start != chunk_offset)
2064                goto out;
2065
2066        if (em->len < length)
2067                goto out;
2068
2069        for (i = 0; i < map->num_stripes; ++i) {
2070                if (map->stripes[i].dev == sdev->dev &&
2071                    map->stripes[i].physical == dev_offset) {
2072                        ret = scrub_stripe(sdev, map, i, chunk_offset, length);
2073                        if (ret)
2074                                goto out;
2075                }
2076        }
2077out:
2078        free_extent_map(em);
2079
2080        return ret;
2081}
2082
2083static noinline_for_stack
2084int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
2085{
2086        struct btrfs_dev_extent *dev_extent = NULL;
2087        struct btrfs_path *path;
2088        struct btrfs_root *root = sdev->dev->dev_root;
2089        struct btrfs_fs_info *fs_info = root->fs_info;
2090        u64 length;
2091        u64 chunk_tree;
2092        u64 chunk_objectid;
2093        u64 chunk_offset;
2094        int ret;
2095        int slot;
2096        struct extent_buffer *l;
2097        struct btrfs_key key;
2098        struct btrfs_key found_key;
2099        struct btrfs_block_group_cache *cache;
2100
2101        path = btrfs_alloc_path();
2102        if (!path)
2103                return -ENOMEM;
2104
2105        path->reada = 2;
2106        path->search_commit_root = 1;
2107        path->skip_locking = 1;
2108
2109        key.objectid = sdev->dev->devid;
2110        key.offset = 0ull;
2111        key.type = BTRFS_DEV_EXTENT_KEY;
2112
2113
2114        while (1) {
2115                ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2116                if (ret < 0)
2117                        break;
2118                if (ret > 0) {
2119                        if (path->slots[0] >=
2120                            btrfs_header_nritems(path->nodes[0])) {
2121                                ret = btrfs_next_leaf(root, path);
2122                                if (ret)
2123                                        break;
2124                        }
2125                }
2126
2127                l = path->nodes[0];
2128                slot = path->slots[0];
2129
2130                btrfs_item_key_to_cpu(l, &found_key, slot);
2131
2132                if (found_key.objectid != sdev->dev->devid)
2133                        break;
2134
2135                if (btrfs_key_type(&found_key) != BTRFS_DEV_EXTENT_KEY)
2136                        break;
2137
2138                if (found_key.offset >= end)
2139                        break;
2140
2141                if (found_key.offset < key.offset)
2142                        break;
2143
2144                dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2145                length = btrfs_dev_extent_length(l, dev_extent);
2146
2147                if (found_key.offset + length <= start) {
2148                        key.offset = found_key.offset + length;
2149                        btrfs_release_path(path);
2150                        continue;
2151                }
2152
2153                chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
2154                chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
2155                chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
2156
2157                /*
2158                 * get a reference on the corresponding block group to prevent
2159                 * the chunk from going away while we scrub it
2160                 */
2161                cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2162                if (!cache) {
2163                        ret = -ENOENT;
2164                        break;
2165                }
2166                ret = scrub_chunk(sdev, chunk_tree, chunk_objectid,
2167                                  chunk_offset, length, found_key.offset);
2168                btrfs_put_block_group(cache);
2169                if (ret)
2170                        break;
2171
2172                key.offset = found_key.offset + length;
2173                btrfs_release_path(path);
2174        }
2175
2176        btrfs_free_path(path);
2177
2178        /*
2179         * ret can still be 1 from search_slot or next_leaf,
2180         * that's not an error
2181         */
2182        return ret < 0 ? ret : 0;
2183}
2184
2185static noinline_for_stack int scrub_supers(struct scrub_dev *sdev)
2186{
2187        int     i;
2188        u64     bytenr;
2189        u64     gen;
2190        int     ret;
2191        struct btrfs_device *device = sdev->dev;
2192        struct btrfs_root *root = device->dev_root;
2193
2194        if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
2195                return -EIO;
2196
2197        gen = root->fs_info->last_trans_committed;
2198
2199        for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
2200                bytenr = btrfs_sb_offset(i);
2201                if (bytenr + BTRFS_SUPER_INFO_SIZE > device->total_bytes)
2202                        break;
2203
2204                ret = scrub_pages(sdev, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
2205                                     BTRFS_EXTENT_FLAG_SUPER, gen, i, NULL, 1);
2206                if (ret)
2207                        return ret;
2208        }
2209        wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0);
2210
2211        return 0;
2212}
2213
2214/*
2215 * get a reference count on fs_info->scrub_workers. start worker if necessary
2216 */
2217static noinline_for_stack int scrub_workers_get(struct btrfs_root *root)
2218{
2219        struct btrfs_fs_info *fs_info = root->fs_info;
2220        int ret = 0;
2221
2222        mutex_lock(&fs_info->scrub_lock);
2223        if (fs_info->scrub_workers_refcnt == 0) {
2224                btrfs_init_workers(&fs_info->scrub_workers, "scrub",
2225                           fs_info->thread_pool_size, &fs_info->generic_worker);
2226                fs_info->scrub_workers.idle_thresh = 4;
2227                ret = btrfs_start_workers(&fs_info->scrub_workers);
2228                if (ret)
2229                        goto out;
2230        }
2231        ++fs_info->scrub_workers_refcnt;
2232out:
2233        mutex_unlock(&fs_info->scrub_lock);
2234
2235        return ret;
2236}
2237
2238static noinline_for_stack void scrub_workers_put(struct btrfs_root *root)
2239{
2240        struct btrfs_fs_info *fs_info = root->fs_info;
2241
2242        mutex_lock(&fs_info->scrub_lock);
2243        if (--fs_info->scrub_workers_refcnt == 0)
2244                btrfs_stop_workers(&fs_info->scrub_workers);
2245        WARN_ON(fs_info->scrub_workers_refcnt < 0);
2246        mutex_unlock(&fs_info->scrub_lock);
2247}
2248
2249
2250int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
2251                    struct btrfs_scrub_progress *progress, int readonly)
2252{
2253        struct scrub_dev *sdev;
2254        struct btrfs_fs_info *fs_info = root->fs_info;
2255        int ret;
2256        struct btrfs_device *dev;
2257
2258        if (btrfs_fs_closing(root->fs_info))
2259                return -EINVAL;
2260
2261        /*
2262         * check some assumptions
2263         */
2264        if (root->nodesize != root->leafsize) {
2265                printk(KERN_ERR
2266                       "btrfs_scrub: size assumption nodesize == leafsize (%d == %d) fails\n",
2267                       root->nodesize, root->leafsize);
2268                return -EINVAL;
2269        }
2270
2271        if (root->nodesize > BTRFS_STRIPE_LEN) {
2272                /*
2273                 * in this case scrub is unable to calculate the checksum
2274                 * the way scrub is implemented. Do not handle this
2275                 * situation at all because it won't ever happen.
2276                 */
2277                printk(KERN_ERR
2278                       "btrfs_scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails\n",
2279                       root->nodesize, BTRFS_STRIPE_LEN);
2280                return -EINVAL;
2281        }
2282
2283        if (root->sectorsize != PAGE_SIZE) {
2284                /* not supported for data w/o checksums */
2285                printk(KERN_ERR
2286                       "btrfs_scrub: size assumption sectorsize != PAGE_SIZE (%d != %lld) fails\n",
2287                       root->sectorsize, (unsigned long long)PAGE_SIZE);
2288                return -EINVAL;
2289        }
2290
2291        ret = scrub_workers_get(root);
2292        if (ret)
2293                return ret;
2294
2295        mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2296        dev = btrfs_find_device(root, devid, NULL, NULL);
2297        if (!dev || dev->missing) {
2298                mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2299                scrub_workers_put(root);
2300                return -ENODEV;
2301        }
2302        mutex_lock(&fs_info->scrub_lock);
2303
2304        if (!dev->in_fs_metadata) {
2305                mutex_unlock(&fs_info->scrub_lock);
2306                mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2307                scrub_workers_put(root);
2308                return -ENODEV;
2309        }
2310
2311        if (dev->scrub_device) {
2312                mutex_unlock(&fs_info->scrub_lock);
2313                mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2314                scrub_workers_put(root);
2315                return -EINPROGRESS;
2316        }
2317        sdev = scrub_setup_dev(dev);
2318        if (IS_ERR(sdev)) {
2319                mutex_unlock(&fs_info->scrub_lock);
2320                mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2321                scrub_workers_put(root);
2322                return PTR_ERR(sdev);
2323        }
2324        sdev->readonly = readonly;
2325        dev->scrub_device = sdev;
2326
2327        atomic_inc(&fs_info->scrubs_running);
2328        mutex_unlock(&fs_info->scrub_lock);
2329        mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2330
2331        down_read(&fs_info->scrub_super_lock);
2332        ret = scrub_supers(sdev);
2333        up_read(&fs_info->scrub_super_lock);
2334
2335        if (!ret)
2336                ret = scrub_enumerate_chunks(sdev, start, end);
2337
2338        wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0);
2339        atomic_dec(&fs_info->scrubs_running);
2340        wake_up(&fs_info->scrub_pause_wait);
2341
2342        wait_event(sdev->list_wait, atomic_read(&sdev->fixup_cnt) == 0);
2343
2344        if (progress)
2345                memcpy(progress, &sdev->stat, sizeof(*progress));
2346
2347        mutex_lock(&fs_info->scrub_lock);
2348        dev->scrub_device = NULL;
2349        mutex_unlock(&fs_info->scrub_lock);
2350
2351        scrub_free_dev(sdev);
2352        scrub_workers_put(root);
2353
2354        return ret;
2355}
2356
2357void btrfs_scrub_pause(struct btrfs_root *root)
2358{
2359        struct btrfs_fs_info *fs_info = root->fs_info;
2360
2361        mutex_lock(&fs_info->scrub_lock);
2362        atomic_inc(&fs_info->scrub_pause_req);
2363        while (atomic_read(&fs_info->scrubs_paused) !=
2364               atomic_read(&fs_info->scrubs_running)) {
2365                mutex_unlock(&fs_info->scrub_lock);
2366                wait_event(fs_info->scrub_pause_wait,
2367                           atomic_read(&fs_info->scrubs_paused) ==
2368                           atomic_read(&fs_info->scrubs_running));
2369                mutex_lock(&fs_info->scrub_lock);
2370        }
2371        mutex_unlock(&fs_info->scrub_lock);
2372}
2373
2374void btrfs_scrub_continue(struct btrfs_root *root)
2375{
2376        struct btrfs_fs_info *fs_info = root->fs_info;
2377
2378        atomic_dec(&fs_info->scrub_pause_req);
2379        wake_up(&fs_info->scrub_pause_wait);
2380}
2381
2382void btrfs_scrub_pause_super(struct btrfs_root *root)
2383{
2384        down_write(&root->fs_info->scrub_super_lock);
2385}
2386
2387void btrfs_scrub_continue_super(struct btrfs_root *root)
2388{
2389        up_write(&root->fs_info->scrub_super_lock);
2390}
2391
2392int __btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
2393{
2394
2395        mutex_lock(&fs_info->scrub_lock);
2396        if (!atomic_read(&fs_info->scrubs_running)) {
2397                mutex_unlock(&fs_info->scrub_lock);
2398                return -ENOTCONN;
2399        }
2400
2401        atomic_inc(&fs_info->scrub_cancel_req);
2402        while (atomic_read(&fs_info->scrubs_running)) {
2403                mutex_unlock(&fs_info->scrub_lock);
2404                wait_event(fs_info->scrub_pause_wait,
2405                           atomic_read(&fs_info->scrubs_running) == 0);
2406                mutex_lock(&fs_info->scrub_lock);
2407        }
2408        atomic_dec(&fs_info->scrub_cancel_req);
2409        mutex_unlock(&fs_info->scrub_lock);
2410
2411        return 0;
2412}
2413
2414int btrfs_scrub_cancel(struct btrfs_root *root)
2415{
2416        return __btrfs_scrub_cancel(root->fs_info);
2417}
2418
2419int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev)
2420{
2421        struct btrfs_fs_info *fs_info = root->fs_info;
2422        struct scrub_dev *sdev;
2423
2424        mutex_lock(&fs_info->scrub_lock);
2425        sdev = dev->scrub_device;
2426        if (!sdev) {
2427                mutex_unlock(&fs_info->scrub_lock);
2428                return -ENOTCONN;
2429        }
2430        atomic_inc(&sdev->cancel_req);
2431        while (dev->scrub_device) {
2432                mutex_unlock(&fs_info->scrub_lock);
2433                wait_event(fs_info->scrub_pause_wait,
2434                           dev->scrub_device == NULL);
2435                mutex_lock(&fs_info->scrub_lock);
2436        }
2437        mutex_unlock(&fs_info->scrub_lock);
2438
2439        return 0;
2440}
2441
2442int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid)
2443{
2444        struct btrfs_fs_info *fs_info = root->fs_info;
2445        struct btrfs_device *dev;
2446        int ret;
2447
2448        /*
2449         * we have to hold the device_list_mutex here so the device
2450         * does not go away in cancel_dev. FIXME: find a better solution
2451         */
2452        mutex_lock(&fs_info->fs_devices->device_list_mutex);
2453        dev = btrfs_find_device(root, devid, NULL, NULL);
2454        if (!dev) {
2455                mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2456                return -ENODEV;
2457        }
2458        ret = btrfs_scrub_cancel_dev(root, dev);
2459        mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2460
2461        return ret;
2462}
2463
2464int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
2465                         struct btrfs_scrub_progress *progress)
2466{
2467        struct btrfs_device *dev;
2468        struct scrub_dev *sdev = NULL;
2469
2470        mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2471        dev = btrfs_find_device(root, devid, NULL, NULL);
2472        if (dev)
2473                sdev = dev->scrub_device;
2474        if (sdev)
2475                memcpy(progress, &sdev->stat, sizeof(*progress));
2476        mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2477
2478        return dev ? (sdev ? 0 : -ENOTCONN) : -ENODEV;
2479}
2480
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.