linux/drivers/md/raid5.c
<<
>>
Prefs
   1/*
   2 * raid5.c : Multiple Devices driver for Linux
   3 *         Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
   4 *         Copyright (C) 1999, 2000 Ingo Molnar
   5 *         Copyright (C) 2002, 2003 H. Peter Anvin
   6 *
   7 * RAID-4/5/6 management functions.
   8 * Thanks to Penguin Computing for making the RAID-6 development possible
   9 * by donating a test server!
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of the GNU General Public License as published by
  13 * the Free Software Foundation; either version 2, or (at your option)
  14 * any later version.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * (for example /usr/src/linux/COPYING); if not, write to the Free
  18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  19 */
  20
  21/*
  22 * BITMAP UNPLUGGING:
  23 *
  24 * The sequencing for updating the bitmap reliably is a little
  25 * subtle (and I got it wrong the first time) so it deserves some
  26 * explanation.
  27 *
  28 * We group bitmap updates into batches.  Each batch has a number.
  29 * We may write out several batches at once, but that isn't very important.
  30 * conf->seq_write is the number of the last batch successfully written.
  31 * conf->seq_flush is the number of the last batch that was closed to
  32 *    new additions.
  33 * When we discover that we will need to write to any block in a stripe
  34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
  35 * the number of the batch it will be in. This is seq_flush+1.
  36 * When we are ready to do a write, if that batch hasn't been written yet,
  37 *   we plug the array and queue the stripe for later.
  38 * When an unplug happens, we increment bm_flush, thus closing the current
  39 *   batch.
  40 * When we notice that bm_flush > bm_write, we write out all pending updates
  41 * to the bitmap, and advance bm_write to where bm_flush was.
  42 * This may occasionally write a bit out twice, but is sure never to
  43 * miss any bits.
  44 */
  45
  46#include <linux/blkdev.h>
  47#include <linux/kthread.h>
  48#include <linux/raid/pq.h>
  49#include <linux/async_tx.h>
  50#include <linux/async.h>
  51#include <linux/seq_file.h>
  52#include <linux/cpu.h>
  53#include <linux/slab.h>
  54#include <linux/ratelimit.h>
  55#include "md.h"
  56#include "raid5.h"
  57#include "raid0.h"
  58#include "bitmap.h"
  59
  60/*
  61 * Stripe cache
  62 */
  63
  64#define NR_STRIPES              256
  65#define STRIPE_SIZE             PAGE_SIZE
  66#define STRIPE_SHIFT            (PAGE_SHIFT - 9)
  67#define STRIPE_SECTORS          (STRIPE_SIZE>>9)
  68#define IO_THRESHOLD            1
  69#define BYPASS_THRESHOLD        1
  70#define NR_HASH                 (PAGE_SIZE / sizeof(struct hlist_head))
  71#define HASH_MASK               (NR_HASH - 1)
  72
  73#define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
  74
  75/* bio's attached to a stripe+device for I/O are linked together in bi_sector
  76 * order without overlap.  There may be several bio's per stripe+device, and
  77 * a bio could span several devices.
  78 * When walking this list for a particular stripe+device, we must never proceed
  79 * beyond a bio that extends past this device, as the next bio might no longer
  80 * be valid.
  81 * This macro is used to determine the 'next' bio in the list, given the sector
  82 * of the current stripe+device
  83 */
  84#define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL)
  85/*
  86 * The following can be used to debug the driver
  87 */
  88#define RAID5_PARANOIA  1
  89#if RAID5_PARANOIA && defined(CONFIG_SMP)
  90# define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock)
  91#else
  92# define CHECK_DEVLOCK()
  93#endif
  94
  95#ifdef DEBUG
  96#define inline
  97#define __inline__
  98#endif
  99
 100/*
 101 * We maintain a biased count of active stripes in the bottom 16 bits of
 102 * bi_phys_segments, and a count of processed stripes in the upper 16 bits
 103 */
 104static inline int raid5_bi_phys_segments(struct bio *bio)
 105{
 106        return bio->bi_phys_segments & 0xffff;
 107}
 108
 109static inline int raid5_bi_hw_segments(struct bio *bio)
 110{
 111        return (bio->bi_phys_segments >> 16) & 0xffff;
 112}
 113
 114static inline int raid5_dec_bi_phys_segments(struct bio *bio)
 115{
 116        --bio->bi_phys_segments;
 117        return raid5_bi_phys_segments(bio);
 118}
 119
 120static inline int raid5_dec_bi_hw_segments(struct bio *bio)
 121{
 122        unsigned short val = raid5_bi_hw_segments(bio);
 123
 124        --val;
 125        bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio);
 126        return val;
 127}
 128
 129static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt)
 130{
 131        bio->bi_phys_segments = raid5_bi_phys_segments(bio) | (cnt << 16);
 132}
 133
 134/* Find first data disk in a raid6 stripe */
 135static inline int raid6_d0(struct stripe_head *sh)
 136{
 137        if (sh->ddf_layout)
 138                /* ddf always start from first device */
 139                return 0;
 140        /* md starts just after Q block */
 141        if (sh->qd_idx == sh->disks - 1)
 142                return 0;
 143        else
 144                return sh->qd_idx + 1;
 145}
 146static inline int raid6_next_disk(int disk, int raid_disks)
 147{
 148        disk++;
 149        return (disk < raid_disks) ? disk : 0;
 150}
 151
 152/* When walking through the disks in a raid5, starting at raid6_d0,
 153 * We need to map each disk to a 'slot', where the data disks are slot
 154 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
 155 * is raid_disks-1.  This help does that mapping.
 156 */
 157static int raid6_idx_to_slot(int idx, struct stripe_head *sh,
 158                             int *count, int syndrome_disks)
 159{
 160        int slot = *count;
 161
 162        if (sh->ddf_layout)
 163                (*count)++;
 164        if (idx == sh->pd_idx)
 165                return syndrome_disks;
 166        if (idx == sh->qd_idx)
 167                return syndrome_disks + 1;
 168        if (!sh->ddf_layout)
 169                (*count)++;
 170        return slot;
 171}
 172
 173static void return_io(struct bio *return_bi)
 174{
 175        struct bio *bi = return_bi;
 176        while (bi) {
 177
 178                return_bi = bi->bi_next;
 179                bi->bi_next = NULL;
 180                bi->bi_size = 0;
 181                bio_endio(bi, 0);
 182                bi = return_bi;
 183        }
 184}
 185
 186static void print_raid5_conf (raid5_conf_t *conf);
 187
 188static int stripe_operations_active(struct stripe_head *sh)
 189{
 190        return sh->check_state || sh->reconstruct_state ||
 191               test_bit(STRIPE_BIOFILL_RUN, &sh->state) ||
 192               test_bit(STRIPE_COMPUTE_RUN, &sh->state);
 193}
 194
 195static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
 196{
 197        if (atomic_dec_and_test(&sh->count)) {
 198                BUG_ON(!list_empty(&sh->lru));
 199                BUG_ON(atomic_read(&conf->active_stripes)==0);
 200                if (test_bit(STRIPE_HANDLE, &sh->state)) {
 201                        if (test_bit(STRIPE_DELAYED, &sh->state))
 202                                list_add_tail(&sh->lru, &conf->delayed_list);
 203                        else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
 204                                   sh->bm_seq - conf->seq_write > 0)
 205                                list_add_tail(&sh->lru, &conf->bitmap_list);
 206                        else {
 207                                clear_bit(STRIPE_BIT_DELAY, &sh->state);
 208                                list_add_tail(&sh->lru, &conf->handle_list);
 209                        }
 210                        md_wakeup_thread(conf->mddev->thread);
 211                } else {
 212                        BUG_ON(stripe_operations_active(sh));
 213                        if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
 214                                atomic_dec(&conf->preread_active_stripes);
 215                                if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD)
 216                                        md_wakeup_thread(conf->mddev->thread);
 217                        }
 218                        atomic_dec(&conf->active_stripes);
 219                        if (!test_bit(STRIPE_EXPANDING, &sh->state)) {
 220                                list_add_tail(&sh->lru, &conf->inactive_list);
 221                                wake_up(&conf->wait_for_stripe);
 222                                if (conf->retry_read_aligned)
 223                                        md_wakeup_thread(conf->mddev->thread);
 224                        }
 225                }
 226        }
 227}
 228
 229static void release_stripe(struct stripe_head *sh)
 230{
 231        raid5_conf_t *conf = sh->raid_conf;
 232        unsigned long flags;
 233
 234        spin_lock_irqsave(&conf->device_lock, flags);
 235        __release_stripe(conf, sh);
 236        spin_unlock_irqrestore(&conf->device_lock, flags);
 237}
 238
 239static inline void remove_hash(struct stripe_head *sh)
 240{
 241        pr_debug("remove_hash(), stripe %llu\n",
 242                (unsigned long long)sh->sector);
 243
 244        hlist_del_init(&sh->hash);
 245}
 246
 247static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh)
 248{
 249        struct hlist_head *hp = stripe_hash(conf, sh->sector);
 250
 251        pr_debug("insert_hash(), stripe %llu\n",
 252                (unsigned long long)sh->sector);
 253
 254        CHECK_DEVLOCK();
 255        hlist_add_head(&sh->hash, hp);
 256}
 257
 258
 259/* find an idle stripe, make sure it is unhashed, and return it. */
 260static struct stripe_head *get_free_stripe(raid5_conf_t *conf)
 261{
 262        struct stripe_head *sh = NULL;
 263        struct list_head *first;
 264
 265        CHECK_DEVLOCK();
 266        if (list_empty(&conf->inactive_list))
 267                goto out;
 268        first = conf->inactive_list.next;
 269        sh = list_entry(first, struct stripe_head, lru);
 270        list_del_init(first);
 271        remove_hash(sh);
 272        atomic_inc(&conf->active_stripes);
 273out:
 274        return sh;
 275}
 276
 277static void shrink_buffers(struct stripe_head *sh)
 278{
 279        struct page *p;
 280        int i;
 281        int num = sh->raid_conf->pool_size;
 282
 283        for (i = 0; i < num ; i++) {
 284                p = sh->dev[i].page;
 285                if (!p)
 286                        continue;
 287                sh->dev[i].page = NULL;
 288                put_page(p);
 289        }
 290}
 291
 292static int grow_buffers(struct stripe_head *sh)
 293{
 294        int i;
 295        int num = sh->raid_conf->pool_size;
 296
 297        for (i = 0; i < num; i++) {
 298                struct page *page;
 299
 300                if (!(page = alloc_page(GFP_KERNEL))) {
 301                        return 1;
 302                }
 303                sh->dev[i].page = page;
 304        }
 305        return 0;
 306}
 307
 308static void raid5_build_block(struct stripe_head *sh, int i, int previous);
 309static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
 310                            struct stripe_head *sh);
 311
 312static void init_stripe(struct stripe_head *sh, sector_t sector, int previous)
 313{
 314        raid5_conf_t *conf = sh->raid_conf;
 315        int i;
 316
 317        BUG_ON(atomic_read(&sh->count) != 0);
 318        BUG_ON(test_bit(STRIPE_HANDLE, &sh->state));
 319        BUG_ON(stripe_operations_active(sh));
 320
 321        CHECK_DEVLOCK();
 322        pr_debug("init_stripe called, stripe %llu\n",
 323                (unsigned long long)sh->sector);
 324
 325        remove_hash(sh);
 326
 327        sh->generation = conf->generation - previous;
 328        sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
 329        sh->sector = sector;
 330        stripe_set_idx(sector, conf, previous, sh);
 331        sh->state = 0;
 332
 333
 334        for (i = sh->disks; i--; ) {
 335                struct r5dev *dev = &sh->dev[i];
 336
 337                if (dev->toread || dev->read || dev->towrite || dev->written ||
 338                    test_bit(R5_LOCKED, &dev->flags)) {
 339                        printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n",
 340                               (unsigned long long)sh->sector, i, dev->toread,
 341                               dev->read, dev->towrite, dev->written,
 342                               test_bit(R5_LOCKED, &dev->flags));
 343                        WARN_ON(1);
 344                }
 345                dev->flags = 0;
 346                raid5_build_block(sh, i, previous);
 347        }
 348        insert_hash(conf, sh);
 349}
 350
 351static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector,
 352                                         short generation)
 353{
 354        struct stripe_head *sh;
 355        struct hlist_node *hn;
 356
 357        CHECK_DEVLOCK();
 358        pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector);
 359        hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash)
 360                if (sh->sector == sector && sh->generation == generation)
 361                        return sh;
 362        pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector);
 363        return NULL;
 364}
 365
 366/*
 367 * Need to check if array has failed when deciding whether to:
 368 *  - start an array
 369 *  - remove non-faulty devices
 370 *  - add a spare
 371 *  - allow a reshape
 372 * This determination is simple when no reshape is happening.
 373 * However if there is a reshape, we need to carefully check
 374 * both the before and after sections.
 375 * This is because some failed devices may only affect one
 376 * of the two sections, and some non-in_sync devices may
 377 * be insync in the section most affected by failed devices.
 378 */
 379static int has_failed(raid5_conf_t *conf)
 380{
 381        int degraded;
 382        int i;
 383        if (conf->mddev->reshape_position == MaxSector)
 384                return conf->mddev->degraded > conf->max_degraded;
 385
 386        rcu_read_lock();
 387        degraded = 0;
 388        for (i = 0; i < conf->previous_raid_disks; i++) {
 389                mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
 390                if (!rdev || test_bit(Faulty, &rdev->flags))
 391                        degraded++;
 392                else if (test_bit(In_sync, &rdev->flags))
 393                        ;
 394                else
 395                        /* not in-sync or faulty.
 396                         * If the reshape increases the number of devices,
 397                         * this is being recovered by the reshape, so
 398                         * this 'previous' section is not in_sync.
 399                         * If the number of devices is being reduced however,
 400                         * the device can only be part of the array if
 401                         * we are reverting a reshape, so this section will
 402                         * be in-sync.
 403                         */
 404                        if (conf->raid_disks >= conf->previous_raid_disks)
 405                                degraded++;
 406        }
 407        rcu_read_unlock();
 408        if (degraded > conf->max_degraded)
 409                return 1;
 410        rcu_read_lock();
 411        degraded = 0;
 412        for (i = 0; i < conf->raid_disks; i++) {
 413                mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
 414                if (!rdev || test_bit(Faulty, &rdev->flags))
 415                        degraded++;
 416                else if (test_bit(In_sync, &rdev->flags))
 417                        ;
 418                else
 419                        /* not in-sync or faulty.
 420                         * If reshape increases the number of devices, this
 421                         * section has already been recovered, else it
 422                         * almost certainly hasn't.
 423                         */
 424                        if (conf->raid_disks <= conf->previous_raid_disks)
 425                                degraded++;
 426        }
 427        rcu_read_unlock();
 428        if (degraded > conf->max_degraded)
 429                return 1;
 430        return 0;
 431}
 432
 433static struct stripe_head *
 434get_active_stripe(raid5_conf_t *conf, sector_t sector,
 435                  int previous, int noblock, int noquiesce)
 436{
 437        struct stripe_head *sh;
 438
 439        pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector);
 440
 441        spin_lock_irq(&conf->device_lock);
 442
 443        do {
 444                wait_event_lock_irq(conf->wait_for_stripe,
 445                                    conf->quiesce == 0 || noquiesce,
 446                                    conf->device_lock, /* nothing */);
 447                sh = __find_stripe(conf, sector, conf->generation - previous);
 448                if (!sh) {
 449                        if (!conf->inactive_blocked)
 450                                sh = get_free_stripe(conf);
 451                        if (noblock && sh == NULL)
 452                                break;
 453                        if (!sh) {
 454                                conf->inactive_blocked = 1;
 455                                wait_event_lock_irq(conf->wait_for_stripe,
 456                                                    !list_empty(&conf->inactive_list) &&
 457                                                    (atomic_read(&conf->active_stripes)
 458                                                     < (conf->max_nr_stripes *3/4)
 459                                                     || !conf->inactive_blocked),
 460                                                    conf->device_lock,
 461                                                    );
 462                                conf->inactive_blocked = 0;
 463                        } else
 464                                init_stripe(sh, sector, previous);
 465                } else {
 466                        if (atomic_read(&sh->count)) {
 467                                BUG_ON(!list_empty(&sh->lru)
 468                                    && !test_bit(STRIPE_EXPANDING, &sh->state));
 469                        } else {
 470                                if (!test_bit(STRIPE_HANDLE, &sh->state))
 471                                        atomic_inc(&conf->active_stripes);
 472                                if (list_empty(&sh->lru) &&
 473                                    !test_bit(STRIPE_EXPANDING, &sh->state))
 474                                        BUG();
 475                                list_del_init(&sh->lru);
 476                        }
 477                }
 478        } while (sh == NULL);
 479
 480        if (sh)
 481                atomic_inc(&sh->count);
 482
 483        spin_unlock_irq(&conf->device_lock);
 484        return sh;
 485}
 486
 487static void
 488raid5_end_read_request(struct bio *bi, int error);
 489static void
 490raid5_end_write_request(struct bio *bi, int error);
 491
 492static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
 493{
 494        raid5_conf_t *conf = sh->raid_conf;
 495        int i, disks = sh->disks;
 496
 497        might_sleep();
 498
 499        for (i = disks; i--; ) {
 500                int rw;
 501                struct bio *bi;
 502                mdk_rdev_t *rdev;
 503                if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
 504                        if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
 505                                rw = WRITE_FUA;
 506                        else
 507                                rw = WRITE;
 508                } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
 509                        rw = READ;
 510                else
 511                        continue;
 512
 513                bi = &sh->dev[i].req;
 514
 515                bi->bi_rw = rw;
 516                if (rw & WRITE)
 517                        bi->bi_end_io = raid5_end_write_request;
 518                else
 519                        bi->bi_end_io = raid5_end_read_request;
 520
 521                rcu_read_lock();
 522                rdev = rcu_dereference(conf->disks[i].rdev);
 523                if (rdev && test_bit(Faulty, &rdev->flags))
 524                        rdev = NULL;
 525                if (rdev)
 526                        atomic_inc(&rdev->nr_pending);
 527                rcu_read_unlock();
 528
 529                /* We have already checked bad blocks for reads.  Now
 530                 * need to check for writes.
 531                 */
 532                while ((rw & WRITE) && rdev &&
 533                       test_bit(WriteErrorSeen, &rdev->flags)) {
 534                        sector_t first_bad;
 535                        int bad_sectors;
 536                        int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
 537                                              &first_bad, &bad_sectors);
 538                        if (!bad)
 539                                break;
 540
 541                        if (bad < 0) {
 542                                set_bit(BlockedBadBlocks, &rdev->flags);
 543                                if (!conf->mddev->external &&
 544                                    conf->mddev->flags) {
 545                                        /* It is very unlikely, but we might
 546                                         * still need to write out the
 547                                         * bad block log - better give it
 548                                         * a chance*/
 549                                        md_check_recovery(conf->mddev);
 550                                }
 551                                md_wait_for_blocked_rdev(rdev, conf->mddev);
 552                        } else {
 553                                /* Acknowledged bad block - skip the write */
 554                                rdev_dec_pending(rdev, conf->mddev);
 555                                rdev = NULL;
 556                        }
 557                }
 558
 559                if (rdev) {
 560                        if (s->syncing || s->expanding || s->expanded)
 561                                md_sync_acct(rdev->bdev, STRIPE_SECTORS);
 562
 563                        set_bit(STRIPE_IO_STARTED, &sh->state);
 564
 565                        bi->bi_bdev = rdev->bdev;
 566                        pr_debug("%s: for %llu schedule op %ld on disc %d\n",
 567                                __func__, (unsigned long long)sh->sector,
 568                                bi->bi_rw, i);
 569                        atomic_inc(&sh->count);
 570                        bi->bi_sector = sh->sector + rdev->data_offset;
 571                        bi->bi_flags = 1 << BIO_UPTODATE;
 572                        bi->bi_vcnt = 1;
 573                        bi->bi_max_vecs = 1;
 574                        bi->bi_idx = 0;
 575                        bi->bi_io_vec = &sh->dev[i].vec;
 576                        bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
 577                        bi->bi_io_vec[0].bv_offset = 0;
 578                        bi->bi_size = STRIPE_SIZE;
 579                        bi->bi_next = NULL;
 580                        generic_make_request(bi);
 581                } else {
 582                        if (rw & WRITE)
 583                                set_bit(STRIPE_DEGRADED, &sh->state);
 584                        pr_debug("skip op %ld on disc %d for sector %llu\n",
 585                                bi->bi_rw, i, (unsigned long long)sh->sector);
 586                        clear_bit(R5_LOCKED, &sh->dev[i].flags);
 587                        set_bit(STRIPE_HANDLE, &sh->state);
 588                }
 589        }
 590}
 591
 592static struct dma_async_tx_descriptor *
 593async_copy_data(int frombio, struct bio *bio, struct page *page,
 594        sector_t sector, struct dma_async_tx_descriptor *tx)
 595{
 596        struct bio_vec *bvl;
 597        struct page *bio_page;
 598        int i;
 599        int page_offset;
 600        struct async_submit_ctl submit;
 601        enum async_tx_flags flags = 0;
 602
 603        if (bio->bi_sector >= sector)
 604                page_offset = (signed)(bio->bi_sector - sector) * 512;
 605        else
 606                page_offset = (signed)(sector - bio->bi_sector) * -512;
 607
 608        if (frombio)
 609                flags |= ASYNC_TX_FENCE;
 610        init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
 611
 612        bio_for_each_segment(bvl, bio, i) {
 613                int len = bvl->bv_len;
 614                int clen;
 615                int b_offset = 0;
 616
 617                if (page_offset < 0) {
 618                        b_offset = -page_offset;
 619                        page_offset += b_offset;
 620                        len -= b_offset;
 621                }
 622
 623                if (len > 0 && page_offset + len > STRIPE_SIZE)
 624                        clen = STRIPE_SIZE - page_offset;
 625                else
 626                        clen = len;
 627
 628                if (clen > 0) {
 629                        b_offset += bvl->bv_offset;
 630                        bio_page = bvl->bv_page;
 631                        if (frombio)
 632                                tx = async_memcpy(page, bio_page, page_offset,
 633                                                  b_offset, clen, &submit);
 634                        else
 635                                tx = async_memcpy(bio_page, page, b_offset,
 636                                                  page_offset, clen, &submit);
 637                }
 638                /* chain the operations */
 639                submit.depend_tx = tx;
 640
 641                if (clen < len) /* hit end of page */
 642                        break;
 643                page_offset +=  len;
 644        }
 645
 646        return tx;
 647}
 648
 649static void ops_complete_biofill(void *stripe_head_ref)
 650{
 651        struct stripe_head *sh = stripe_head_ref;
 652        struct bio *return_bi = NULL;
 653        raid5_conf_t *conf = sh->raid_conf;
 654        int i;
 655
 656        pr_debug("%s: stripe %llu\n", __func__,
 657                (unsigned long long)sh->sector);
 658
 659        /* clear completed biofills */
 660        spin_lock_irq(&conf->device_lock);
 661        for (i = sh->disks; i--; ) {
 662                struct r5dev *dev = &sh->dev[i];
 663
 664                /* acknowledge completion of a biofill operation */
 665                /* and check if we need to reply to a read request,
 666                 * new R5_Wantfill requests are held off until
 667                 * !STRIPE_BIOFILL_RUN
 668                 */
 669                if (test_and_clear_bit(R5_Wantfill, &dev->flags)) {
 670                        struct bio *rbi, *rbi2;
 671
 672                        BUG_ON(!dev->read);
 673                        rbi = dev->read;
 674                        dev->read = NULL;
 675                        while (rbi && rbi->bi_sector <
 676                                dev->sector + STRIPE_SECTORS) {
 677                                rbi2 = r5_next_bio(rbi, dev->sector);
 678                                if (!raid5_dec_bi_phys_segments(rbi)) {
 679                                        rbi->bi_next = return_bi;
 680                                        return_bi = rbi;
 681                                }
 682                                rbi = rbi2;
 683                        }
 684                }
 685        }
 686        spin_unlock_irq(&conf->device_lock);
 687        clear_bit(STRIPE_BIOFILL_RUN, &sh->state);
 688
 689        return_io(return_bi);
 690
 691        set_bit(STRIPE_HANDLE, &sh->state);
 692        release_stripe(sh);
 693}
 694
 695static void ops_run_biofill(struct stripe_head *sh)
 696{
 697        struct dma_async_tx_descriptor *tx = NULL;
 698        raid5_conf_t *conf = sh->raid_conf;
 699        struct async_submit_ctl submit;
 700        int i;
 701
 702        pr_debug("%s: stripe %llu\n", __func__,
 703                (unsigned long long)sh->sector);
 704
 705        for (i = sh->disks; i--; ) {
 706                struct r5dev *dev = &sh->dev[i];
 707                if (test_bit(R5_Wantfill, &dev->flags)) {
 708                        struct bio *rbi;
 709                        spin_lock_irq(&conf->device_lock);
 710                        dev->read = rbi = dev->toread;
 711                        dev->toread = NULL;
 712                        spin_unlock_irq(&conf->device_lock);
 713                        while (rbi && rbi->bi_sector <
 714                                dev->sector + STRIPE_SECTORS) {
 715                                tx = async_copy_data(0, rbi, dev->page,
 716                                        dev->sector, tx);
 717                                rbi = r5_next_bio(rbi, dev->sector);
 718                        }
 719                }
 720        }
 721
 722        atomic_inc(&sh->count);
 723        init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL);
 724        async_trigger_callback(&submit);
 725}
 726
 727static void mark_target_uptodate(struct stripe_head *sh, int target)
 728{
 729        struct r5dev *tgt;
 730
 731        if (target < 0)
 732                return;
 733
 734        tgt = &sh->dev[target];
 735        set_bit(R5_UPTODATE, &tgt->flags);
 736        BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
 737        clear_bit(R5_Wantcompute, &tgt->flags);
 738}
 739
 740static void ops_complete_compute(void *stripe_head_ref)
 741{
 742        struct stripe_head *sh = stripe_head_ref;
 743
 744        pr_debug("%s: stripe %llu\n", __func__,
 745                (unsigned long long)sh->sector);
 746
 747        /* mark the computed target(s) as uptodate */
 748        mark_target_uptodate(sh, sh->ops.target);
 749        mark_target_uptodate(sh, sh->ops.target2);
 750
 751        clear_bit(STRIPE_COMPUTE_RUN, &sh->state);
 752        if (sh->check_state == check_state_compute_run)
 753                sh->check_state = check_state_compute_result;
 754        set_bit(STRIPE_HANDLE, &sh->state);
 755        release_stripe(sh);
 756}
 757
 758/* return a pointer to the address conversion region of the scribble buffer */
 759static addr_conv_t *to_addr_conv(struct stripe_head *sh,
 760                                 struct raid5_percpu *percpu)
 761{
 762        return percpu->scribble + sizeof(struct page *) * (sh->disks + 2);
 763}
 764
 765static struct dma_async_tx_descriptor *
 766ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
 767{
 768        int disks = sh->disks;
 769        struct page **xor_srcs = percpu->scribble;
 770        int target = sh->ops.target;
 771        struct r5dev *tgt = &sh->dev[target];
 772        struct page *xor_dest = tgt->page;
 773        int count = 0;
 774        struct dma_async_tx_descriptor *tx;
 775        struct async_submit_ctl submit;
 776        int i;
 777
 778        pr_debug("%s: stripe %llu block: %d\n",
 779                __func__, (unsigned long long)sh->sector, target);
 780        BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
 781
 782        for (i = disks; i--; )
 783                if (i != target)
 784                        xor_srcs[count++] = sh->dev[i].page;
 785
 786        atomic_inc(&sh->count);
 787
 788        init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL,
 789                          ops_complete_compute, sh, to_addr_conv(sh, percpu));
 790        if (unlikely(count == 1))
 791                tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
 792        else
 793                tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
 794
 795        return tx;
 796}
 797
 798/* set_syndrome_sources - populate source buffers for gen_syndrome
 799 * @srcs - (struct page *) array of size sh->disks
 800 * @sh - stripe_head to parse
 801 *
 802 * Populates srcs in proper layout order for the stripe and returns the
 803 * 'count' of sources to be used in a call to async_gen_syndrome.  The P
 804 * destination buffer is recorded in srcs[count] and the Q destination
 805 * is recorded in srcs[count+1]].
 806 */
 807static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh)
 808{
 809        int disks = sh->disks;
 810        int syndrome_disks = sh->ddf_layout ? disks : (disks - 2);
 811        int d0_idx = raid6_d0(sh);
 812        int count;
 813        int i;
 814
 815        for (i = 0; i < disks; i++)
 816                srcs[i] = NULL;
 817
 818        count = 0;
 819        i = d0_idx;
 820        do {
 821                int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
 822
 823                srcs[slot] = sh->dev[i].page;
 824                i = raid6_next_disk(i, disks);
 825        } while (i != d0_idx);
 826
 827        return syndrome_disks;
 828}
 829
 830static struct dma_async_tx_descriptor *
 831ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu)
 832{
 833        int disks = sh->disks;
 834        struct page **blocks = percpu->scribble;
 835        int target;
 836        int qd_idx = sh->qd_idx;
 837        struct dma_async_tx_descriptor *tx;
 838        struct async_submit_ctl submit;
 839        struct r5dev *tgt;
 840        struct page *dest;
 841        int i;
 842        int count;
 843
 844        if (sh->ops.target < 0)
 845                target = sh->ops.target2;
 846        else if (sh->ops.target2 < 0)
 847                target = sh->ops.target;
 848        else
 849                /* we should only have one valid target */
 850                BUG();
 851        BUG_ON(target < 0);
 852        pr_debug("%s: stripe %llu block: %d\n",
 853                __func__, (unsigned long long)sh->sector, target);
 854
 855        tgt = &sh->dev[target];
 856        BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
 857        dest = tgt->page;
 858
 859        atomic_inc(&sh->count);
 860
 861        if (target == qd_idx) {
 862                count = set_syndrome_sources(blocks, sh);
 863                blocks[count] = NULL; /* regenerating p is not necessary */
 864                BUG_ON(blocks[count+1] != dest); /* q should already be set */
 865                init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
 866                                  ops_complete_compute, sh,
 867                                  to_addr_conv(sh, percpu));
 868                tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
 869        } else {
 870                /* Compute any data- or p-drive using XOR */
 871                count = 0;
 872                for (i = disks; i-- ; ) {
 873                        if (i == target || i == qd_idx)
 874                                continue;
 875                        blocks[count++] = sh->dev[i].page;
 876                }
 877
 878                init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
 879                                  NULL, ops_complete_compute, sh,
 880                                  to_addr_conv(sh, percpu));
 881                tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit);
 882        }
 883
 884        return tx;
 885}
 886
 887static struct dma_async_tx_descriptor *
 888ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu)
 889{
 890        int i, count, disks = sh->disks;
 891        int syndrome_disks = sh->ddf_layout ? disks : disks-2;
 892        int d0_idx = raid6_d0(sh);
 893        int faila = -1, failb = -1;
 894        int target = sh->ops.target;
 895        int target2 = sh->ops.target2;
 896        struct r5dev *tgt = &sh->dev[target];
 897        struct r5dev *tgt2 = &sh->dev[target2];
 898        struct dma_async_tx_descriptor *tx;
 899        struct page **blocks = percpu->scribble;
 900        struct async_submit_ctl submit;
 901
 902        pr_debug("%s: stripe %llu block1: %d block2: %d\n",
 903                 __func__, (unsigned long long)sh->sector, target, target2);
 904        BUG_ON(target < 0 || target2 < 0);
 905        BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
 906        BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags));
 907
 908        /* we need to open-code set_syndrome_sources to handle the
 909         * slot number conversion for 'faila' and 'failb'
 910         */
 911        for (i = 0; i < disks ; i++)
 912                blocks[i] = NULL;
 913        count = 0;
 914        i = d0_idx;
 915        do {
 916                int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks);
 917
 918                blocks[slot] = sh->dev[i].page;
 919
 920                if (i == target)
 921                        faila = slot;
 922                if (i == target2)
 923                        failb = slot;
 924                i = raid6_next_disk(i, disks);
 925        } while (i != d0_idx);
 926
 927        BUG_ON(faila == failb);
 928        if (failb < faila)
 929                swap(faila, failb);
 930        pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
 931                 __func__, (unsigned long long)sh->sector, faila, failb);
 932
 933        atomic_inc(&sh->count);
 934
 935        if (failb == syndrome_disks+1) {
 936                /* Q disk is one of the missing disks */
 937                if (faila == syndrome_disks) {
 938                        /* Missing P+Q, just recompute */
 939                        init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
 940                                          ops_complete_compute, sh,
 941                                          to_addr_conv(sh, percpu));
 942                        return async_gen_syndrome(blocks, 0, syndrome_disks+2,
 943                                                  STRIPE_SIZE, &submit);
 944                } else {
 945                        struct page *dest;
 946                        int data_target;
 947                        int qd_idx = sh->qd_idx;
 948
 949                        /* Missing D+Q: recompute D from P, then recompute Q */
 950                        if (target == qd_idx)
 951                                data_target = target2;
 952                        else
 953                                data_target = target;
 954
 955                        count = 0;
 956                        for (i = disks; i-- ; ) {
 957                                if (i == data_target || i == qd_idx)
 958                                        continue;
 959                                blocks[count++] = sh->dev[i].page;
 960                        }
 961                        dest = sh->dev[data_target].page;
 962                        init_async_submit(&submit,
 963                                          ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST,
 964                                          NULL, NULL, NULL,
 965                                          to_addr_conv(sh, percpu));
 966                        tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE,
 967                                       &submit);
 968
 969                        count = set_syndrome_sources(blocks, sh);
 970                        init_async_submit(&submit, ASYNC_TX_FENCE, tx,
 971                                          ops_complete_compute, sh,
 972                                          to_addr_conv(sh, percpu));
 973                        return async_gen_syndrome(blocks, 0, count+2,
 974                                                  STRIPE_SIZE, &submit);
 975                }
 976        } else {
 977                init_async_submit(&submit, ASYNC_TX_FENCE, NULL,
 978                                  ops_complete_compute, sh,
 979                                  to_addr_conv(sh, percpu));
 980                if (failb == syndrome_disks) {
 981                        /* We're missing D+P. */
 982                        return async_raid6_datap_recov(syndrome_disks+2,
 983                                                       STRIPE_SIZE, faila,
 984                                                       blocks, &submit);
 985                } else {
 986                        /* We're missing D+D. */
 987                        return async_raid6_2data_recov(syndrome_disks+2,
 988                                                       STRIPE_SIZE, faila, failb,
 989                                                       blocks, &submit);
 990                }
 991        }
 992}
 993
 994
 995static void ops_complete_prexor(void *stripe_head_ref)
 996{
 997        struct stripe_head *sh = stripe_head_ref;
 998
 999        pr_debug("%s: stripe %llu\n", __func__,
1000                (unsigned long long)sh->sector);
1001}
1002
1003static struct dma_async_tx_descriptor *
1004ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu,
1005               struct dma_async_tx_descriptor *tx)
1006{
1007        int disks = sh->disks;
1008        struct page **xor_srcs = percpu->scribble;
1009        int count = 0, pd_idx = sh->pd_idx, i;
1010        struct async_submit_ctl submit;
1011
1012        /* existing parity data subtracted */
1013        struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
1014
1015        pr_debug("%s: stripe %llu\n", __func__,
1016                (unsigned long long)sh->sector);
1017
1018        for (i = disks; i--; ) {
1019                struct r5dev *dev = &sh->dev[i];
1020                /* Only process blocks that are known to be uptodate */
1021                if (test_bit(R5_Wantdrain, &dev->flags))
1022                        xor_srcs[count++] = dev->page;
1023        }
1024
1025        init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
1026                          ops_complete_prexor, sh, to_addr_conv(sh, percpu));
1027        tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
1028
1029        return tx;
1030}
1031
1032static struct dma_async_tx_descriptor *
1033ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
1034{
1035        int disks = sh->disks;
1036        int i;
1037
1038        pr_debug("%s: stripe %llu\n", __func__,
1039                (unsigned long long)sh->sector);
1040
1041        for (i = disks; i--; ) {
1042                struct r5dev *dev = &sh->dev[i];
1043                struct bio *chosen;
1044
1045                if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) {
1046                        struct bio *wbi;
1047
1048                        spin_lock_irq(&sh->raid_conf->device_lock);
1049                        chosen = dev->towrite;
1050                        dev->towrite = NULL;
1051                        BUG_ON(dev->written);
1052                        wbi = dev->written = chosen;
1053                        spin_unlock_irq(&sh->raid_conf->device_lock);
1054
1055                        while (wbi && wbi->bi_sector <
1056                                dev->sector + STRIPE_SECTORS) {
1057                                if (wbi->bi_rw & REQ_FUA)
1058                                        set_bit(R5_WantFUA, &dev->flags);
1059                                tx = async_copy_data(1, wbi, dev->page,
1060                                        dev->sector, tx);
1061                                wbi = r5_next_bio(wbi, dev->sector);
1062                        }
1063                }
1064        }
1065
1066        return tx;
1067}
1068
1069static void ops_complete_reconstruct(void *stripe_head_ref)
1070{
1071        struct stripe_head *sh = stripe_head_ref;
1072        int disks = sh->disks;
1073        int pd_idx = sh->pd_idx;
1074        int qd_idx = sh->qd_idx;
1075        int i;
1076        bool fua = false;
1077
1078        pr_debug("%s: stripe %llu\n", __func__,
1079                (unsigned long long)sh->sector);
1080
1081        for (i = disks; i--; )
1082                fua |= test_bit(R5_WantFUA, &sh->dev[i].flags);
1083
1084        for (i = disks; i--; ) {
1085                struct r5dev *dev = &sh->dev[i];
1086
1087                if (dev->written || i == pd_idx || i == qd_idx) {
1088                        set_bit(R5_UPTODATE, &dev->flags);
1089                        if (fua)
1090                                set_bit(R5_WantFUA, &dev->flags);
1091                }
1092        }
1093
1094        if (sh->reconstruct_state == reconstruct_state_drain_run)
1095                sh->reconstruct_state = reconstruct_state_drain_result;
1096        else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run)
1097                sh->reconstruct_state = reconstruct_state_prexor_drain_result;
1098        else {
1099                BUG_ON(sh->reconstruct_state != reconstruct_state_run);
1100                sh->reconstruct_state = reconstruct_state_result;
1101        }
1102
1103        set_bit(STRIPE_HANDLE, &sh->state);
1104        release_stripe(sh);
1105}
1106
1107static void
1108ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
1109                     struct dma_async_tx_descriptor *tx)
1110{
1111        int disks = sh->disks;
1112        struct page **xor_srcs = percpu->scribble;
1113        struct async_submit_ctl submit;
1114        int count = 0, pd_idx = sh->pd_idx, i;
1115        struct page *xor_dest;
1116        int prexor = 0;
1117        unsigned long flags;
1118
1119        pr_debug("%s: stripe %llu\n", __func__,
1120                (unsigned long long)sh->sector);
1121
1122        /* check if prexor is active which means only process blocks
1123         * that are part of a read-modify-write (written)
1124         */
1125        if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) {
1126                prexor = 1;
1127                xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
1128                for (i = disks; i--; ) {
1129                        struct r5dev *dev = &sh->dev[i];
1130                        if (dev->written)
1131                                xor_srcs[count++] = dev->page;
1132                }
1133        } else {
1134                xor_dest = sh->dev[pd_idx].page;
1135                for (i = disks; i--; ) {
1136                        struct r5dev *dev = &sh->dev[i];
1137                        if (i != pd_idx)
1138                                xor_srcs[count++] = dev->page;
1139                }
1140        }
1141
1142        /* 1/ if we prexor'd then the dest is reused as a source
1143         * 2/ if we did not prexor then we are redoing the parity
1144         * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
1145         * for the synchronous xor case
1146         */
1147        flags = ASYNC_TX_ACK |
1148                (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST);
1149
1150        atomic_inc(&sh->count);
1151
1152        init_async_submit(&submit, flags, tx, ops_complete_reconstruct, sh,
1153                          to_addr_conv(sh, percpu));
1154        if (unlikely(count == 1))
1155                tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
1156        else
1157                tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
1158}
1159
1160static void
1161ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu,
1162                     struct dma_async_tx_descriptor *tx)
1163{
1164        struct async_submit_ctl submit;
1165        struct page **blocks = percpu->scribble;
1166        int count;
1167
1168        pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector);
1169
1170        count = set_syndrome_sources(blocks, sh);
1171
1172        atomic_inc(&sh->count);
1173
1174        init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_reconstruct,
1175                          sh, to_addr_conv(sh, percpu));
1176        async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE,  &submit);
1177}
1178
1179static void ops_complete_check(void *stripe_head_ref)
1180{
1181        struct stripe_head *sh = stripe_head_ref;
1182
1183        pr_debug("%s: stripe %llu\n", __func__,
1184                (unsigned long long)sh->sector);
1185
1186        sh->check_state = check_state_check_result;
1187        set_bit(STRIPE_HANDLE, &sh->state);
1188        release_stripe(sh);
1189}
1190
1191static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu)
1192{
1193        int disks = sh->disks;
1194        int pd_idx = sh->pd_idx;
1195        int qd_idx = sh->qd_idx;
1196        struct page *xor_dest;
1197        struct page **xor_srcs = percpu->scribble;
1198        struct dma_async_tx_descriptor *tx;
1199        struct async_submit_ctl submit;
1200        int count;
1201        int i;
1202
1203        pr_debug("%s: stripe %llu\n", __func__,
1204                (unsigned long long)sh->sector);
1205
1206        count = 0;
1207        xor_dest = sh->dev[pd_idx].page;
1208        xor_srcs[count++] = xor_dest;
1209        for (i = disks; i--; ) {
1210                if (i == pd_idx || i == qd_idx)
1211                        continue;
1212                xor_srcs[count++] = sh->dev[i].page;
1213        }
1214
1215        init_async_submit(&submit, 0, NULL, NULL, NULL,
1216                          to_addr_conv(sh, percpu));
1217        tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE,
1218                           &sh->ops.zero_sum_result, &submit);
1219
1220        atomic_inc(&sh->count);
1221        init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL);
1222        tx = async_trigger_callback(&submit);
1223}
1224
1225static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp)
1226{
1227        struct page **srcs = percpu->scribble;
1228        struct async_submit_ctl submit;
1229        int count;
1230
1231        pr_debug("%s: stripe %llu checkp: %d\n", __func__,
1232                (unsigned long long)sh->sector, checkp);
1233
1234        count = set_syndrome_sources(srcs, sh);
1235        if (!checkp)
1236                srcs[count] = NULL;
1237
1238        atomic_inc(&sh->count);
1239        init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check,
1240                          sh, to_addr_conv(sh, percpu));
1241        async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE,
1242                           &sh->ops.zero_sum_result, percpu->spare_page, &submit);
1243}
1244
1245static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
1246{
1247        int overlap_clear = 0, i, disks = sh->disks;
1248        struct dma_async_tx_descriptor *tx = NULL;
1249        raid5_conf_t *conf = sh->raid_conf;
1250        int level = conf->level;
1251        struct raid5_percpu *percpu;
1252        unsigned long cpu;
1253
1254        cpu = get_cpu();
1255        percpu = per_cpu_ptr(conf->percpu, cpu);
1256        if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
1257                ops_run_biofill(sh);
1258                overlap_clear++;
1259        }
1260
1261        if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) {
1262                if (level < 6)
1263                        tx = ops_run_compute5(sh, percpu);
1264                else {
1265                        if (sh->ops.target2 < 0 || sh->ops.target < 0)
1266                                tx = ops_run_compute6_1(sh, percpu);
1267                        else
1268                                tx = ops_run_compute6_2(sh, percpu);
1269                }
1270                /* terminate the chain if reconstruct is not set to be run */
1271                if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request))
1272                        async_tx_ack(tx);
1273        }
1274
1275        if (test_bit(STRIPE_OP_PREXOR, &ops_request))
1276                tx = ops_run_prexor(sh, percpu, tx);
1277
1278        if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) {
1279                tx = ops_run_biodrain(sh, tx);
1280                overlap_clear++;
1281        }
1282
1283        if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) {
1284                if (level < 6)
1285                        ops_run_reconstruct5(sh, percpu, tx);
1286                else
1287                        ops_run_reconstruct6(sh, percpu, tx);
1288        }
1289
1290        if (test_bit(STRIPE_OP_CHECK, &ops_request)) {
1291                if (sh->check_state == check_state_run)
1292                        ops_run_check_p(sh, percpu);
1293                else if (sh->check_state == check_state_run_q)
1294                        ops_run_check_pq(sh, percpu, 0);
1295                else if (sh->check_state == check_state_run_pq)
1296                        ops_run_check_pq(sh, percpu, 1);
1297                else
1298                        BUG();
1299        }
1300
1301        if (overlap_clear)
1302                for (i = disks; i--; ) {
1303                        struct r5dev *dev = &sh->dev[i];
1304                        if (test_and_clear_bit(R5_Overlap, &dev->flags))
1305                                wake_up(&sh->raid_conf->wait_for_overlap);
1306                }
1307        put_cpu();
1308}
1309
1310#ifdef CONFIG_MULTICORE_RAID456
1311static void async_run_ops(void *param, async_cookie_t cookie)
1312{
1313        struct stripe_head *sh = param;
1314        unsigned long ops_request = sh->ops.request;
1315
1316        clear_bit_unlock(STRIPE_OPS_REQ_PENDING, &sh->state);
1317        wake_up(&sh->ops.wait_for_ops);
1318
1319        __raid_run_ops(sh, ops_request);
1320        release_stripe(sh);
1321}
1322
1323static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
1324{
1325        /* since handle_stripe can be called outside of raid5d context
1326         * we need to ensure sh->ops.request is de-staged before another
1327         * request arrives
1328         */
1329        wait_event(sh->ops.wait_for_ops,
1330                   !test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING, &sh->state));
1331        sh->ops.request = ops_request;
1332
1333        atomic_inc(&sh->count);
1334        async_schedule(async_run_ops, sh);
1335}
1336#else
1337#define raid_run_ops __raid_run_ops
1338#endif
1339
1340static int grow_one_stripe(raid5_conf_t *conf)
1341{
1342        struct stripe_head *sh;
1343        sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL);
1344        if (!sh)
1345                return 0;
1346
1347        sh->raid_conf = conf;
1348        #ifdef CONFIG_MULTICORE_RAID456
1349        init_waitqueue_head(&sh->ops.wait_for_ops);
1350        #endif
1351
1352        if (grow_buffers(sh)) {
1353                shrink_buffers(sh);
1354                kmem_cache_free(conf->slab_cache, sh);
1355                return 0;
1356        }
1357        /* we just created an active stripe so... */
1358        atomic_set(&sh->count, 1);
1359        atomic_inc(&conf->active_stripes);
1360        INIT_LIST_HEAD(&sh->lru);
1361        release_stripe(sh);
1362        return 1;
1363}
1364
1365static int grow_stripes(raid5_conf_t *conf, int num)
1366{
1367        struct kmem_cache *sc;
1368        int devs = max(conf->raid_disks, conf->previous_raid_disks);
1369
1370        if (conf->mddev->gendisk)
1371                sprintf(conf->cache_name[0],
1372                        "raid%d-%s", conf->level, mdname(conf->mddev));
1373        else
1374                sprintf(conf->cache_name[0],
1375                        "raid%d-%p", conf->level, conf->mddev);
1376        sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]);
1377
1378        conf->active_name = 0;
1379        sc = kmem_cache_create(conf->cache_name[conf->active_name],
1380                               sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev),
1381                               0, 0, NULL);
1382        if (!sc)
1383                return 1;
1384        conf->slab_cache = sc;
1385        conf->pool_size = devs;
1386        while (num--)
1387                if (!grow_one_stripe(conf))
1388                        return 1;
1389        return 0;
1390}
1391
1392/**
1393 * scribble_len - return the required size of the scribble region
1394 * @num - total number of disks in the array
1395 *
1396 * The size must be enough to contain:
1397 * 1/ a struct page pointer for each device in the array +2
1398 * 2/ room to convert each entry in (1) to its corresponding dma
1399 *    (dma_map_page()) or page (page_address()) address.
1400 *
1401 * Note: the +2 is for the destination buffers of the ddf/raid6 case where we
1402 * calculate over all devices (not just the data blocks), using zeros in place
1403 * of the P and Q blocks.
1404 */
1405static size_t scribble_len(int num)
1406{
1407        size_t len;
1408
1409        len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2);
1410
1411        return len;
1412}
1413
1414static int resize_stripes(raid5_conf_t *conf, int newsize)
1415{
1416        /* Make all the stripes able to hold 'newsize' devices.
1417         * New slots in each stripe get 'page' set to a new page.
1418         *
1419         * This happens in stages:
1420         * 1/ create a new kmem_cache and allocate the required number of
1421         *    stripe_heads.
1422         * 2/ gather all the old stripe_heads and tranfer the pages across
1423         *    to the new stripe_heads.  This will have the side effect of
1424         *    freezing the array as once all stripe_heads have been collected,
1425         *    no IO will be possible.  Old stripe heads are freed once their
1426         *    pages have been transferred over, and the old kmem_cache is
1427         *    freed when all stripes are done.
1428         * 3/ reallocate conf->disks to be suitable bigger.  If this fails,
1429         *    we simple return a failre status - no need to clean anything up.
1430         * 4/ allocate new pages for the new slots in the new stripe_heads.
1431         *    If this fails, we don't bother trying the shrink the
1432         *    stripe_heads down again, we just leave them as they are.
1433         *    As each stripe_head is processed the new one is released into
1434         *    active service.
1435         *
1436         * Once step2 is started, we cannot afford to wait for a write,
1437         * so we use GFP_NOIO allocations.
1438         */
1439        struct stripe_head *osh, *nsh;
1440        LIST_HEAD(newstripes);
1441        struct disk_info *ndisks;
1442        unsigned long cpu;
1443        int err;
1444        struct kmem_cache *sc;
1445        int i;
1446
1447        if (newsize <= conf->pool_size)
1448                return 0; /* never bother to shrink */
1449
1450        err = md_allow_write(conf->mddev);
1451        if (err)
1452                return err;
1453
1454        /* Step 1 */
1455        sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
1456                               sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
1457                               0, 0, NULL);
1458        if (!sc)
1459                return -ENOMEM;
1460
1461        for (i = conf->max_nr_stripes; i; i--) {
1462                nsh = kmem_cache_zalloc(sc, GFP_KERNEL);
1463                if (!nsh)
1464                        break;
1465
1466                nsh->raid_conf = conf;
1467                #ifdef CONFIG_MULTICORE_RAID456
1468                init_waitqueue_head(&nsh->ops.wait_for_ops);
1469                #endif
1470
1471                list_add(&nsh->lru, &newstripes);
1472        }
1473        if (i) {
1474                /* didn't get enough, give up */
1475                while (!list_empty(&newstripes)) {
1476                        nsh = list_entry(newstripes.next, struct stripe_head, lru);
1477                        list_del(&nsh->lru);
1478                        kmem_cache_free(sc, nsh);
1479                }
1480                kmem_cache_destroy(sc);
1481                return -ENOMEM;
1482        }
1483        /* Step 2 - Must use GFP_NOIO now.
1484         * OK, we have enough stripes, start collecting inactive
1485         * stripes and copying them over
1486         */
1487        list_for_each_entry(nsh, &newstripes, lru) {
1488                spin_lock_irq(&conf->device_lock);
1489                wait_event_lock_irq(conf->wait_for_stripe,
1490                                    !list_empty(&conf->inactive_list),
1491                                    conf->device_lock,
1492                                    );
1493                osh = get_free_stripe(conf);
1494                spin_unlock_irq(&conf->device_lock);
1495                atomic_set(&nsh->count, 1);
1496                for(i=0; i<conf->pool_size; i++)
1497                        nsh->dev[i].page = osh->dev[i].page;
1498                for( ; i<newsize; i++)
1499                        nsh->dev[i].page = NULL;
1500                kmem_cache_free(conf->slab_cache, osh);
1501        }
1502        kmem_cache_destroy(conf->slab_cache);
1503
1504        /* Step 3.
1505         * At this point, we are holding all the stripes so the array
1506         * is completely stalled, so now is a good time to resize
1507         * conf->disks and the scribble region
1508         */
1509        ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO);
1510        if (ndisks) {
1511                for (i=0; i<conf->raid_disks; i++)
1512                        ndisks[i] = conf->disks[i];
1513                kfree(conf->disks);
1514                conf->disks = ndisks;
1515        } else
1516                err = -ENOMEM;
1517
1518        get_online_cpus();
1519        conf->scribble_len = scribble_len(newsize);
1520        for_each_present_cpu(cpu) {
1521                struct raid5_percpu *percpu;
1522                void *scribble;
1523
1524                percpu = per_cpu_ptr(conf->percpu, cpu);
1525                scribble = kmalloc(conf->scribble_len, GFP_NOIO);
1526
1527                if (scribble) {
1528                        kfree(percpu->scribble);
1529                        percpu->scribble = scribble;
1530                } else {
1531                        err = -ENOMEM;
1532                        break;
1533                }
1534        }
1535        put_online_cpus();
1536
1537        /* Step 4, return new stripes to service */
1538        while(!list_empty(&newstripes)) {
1539                nsh = list_entry(newstripes.next, struct stripe_head, lru);
1540                list_del_init(&nsh->lru);
1541
1542                for (i=conf->raid_disks; i < newsize; i++)
1543                        if (nsh->dev[i].page == NULL) {
1544                                struct page *p = alloc_page(GFP_NOIO);
1545                                nsh->dev[i].page = p;
1546                                if (!p)
1547                                        err = -ENOMEM;
1548                        }
1549                release_stripe(nsh);
1550        }
1551        /* critical section pass, GFP_NOIO no longer needed */
1552
1553        conf->slab_cache = sc;
1554        conf->active_name = 1-conf->active_name;
1555        conf->pool_size = newsize;
1556        return err;
1557}
1558
1559static int drop_one_stripe(raid5_conf_t *conf)
1560{
1561        struct stripe_head *sh;
1562
1563        spin_lock_irq(&conf->device_lock);
1564        sh = get_free_stripe(conf);
1565        spin_unlock_irq(&conf->device_lock);
1566        if (!sh)
1567                return 0;
1568        BUG_ON(atomic_read(&sh->count));
1569        shrink_buffers(sh);
1570        kmem_cache_free(conf->slab_cache, sh);
1571        atomic_dec(&conf->active_stripes);
1572        return 1;
1573}
1574
1575static void shrink_stripes(raid5_conf_t *conf)
1576{
1577        while (drop_one_stripe(conf))
1578                ;
1579
1580        if (conf->slab_cache)
1581                kmem_cache_destroy(conf->slab_cache);
1582        conf->slab_cache = NULL;
1583}
1584
1585static void raid5_end_read_request(struct bio * bi, int error)
1586{
1587        struct stripe_head *sh = bi->bi_private;
1588        raid5_conf_t *conf = sh->raid_conf;
1589        int disks = sh->disks, i;
1590        int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
1591        char b[BDEVNAME_SIZE];
1592        mdk_rdev_t *rdev;
1593
1594
1595        for (i=0 ; i<disks; i++)
1596                if (bi == &sh->dev[i].req)
1597                        break;
1598
1599        pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
1600                (unsigned long long)sh->sector, i, atomic_read(&sh->count),
1601                uptodate);
1602        if (i == disks) {
1603                BUG();
1604                return;
1605        }
1606
1607        if (uptodate) {
1608                set_bit(R5_UPTODATE, &sh->dev[i].flags);
1609                if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
1610                        rdev = conf->disks[i].rdev;
1611                        printk_ratelimited(
1612                                KERN_INFO
1613                                "md/raid:%s: read error corrected"
1614                                " (%lu sectors at %llu on %s)\n",
1615                                mdname(conf->mddev), STRIPE_SECTORS,
1616                                (unsigned long long)(sh->sector
1617                                                     + rdev->data_offset),
1618                                bdevname(rdev->bdev, b));
1619                        atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
1620                        clear_bit(R5_ReadError, &sh->dev[i].flags);
1621                        clear_bit(R5_ReWrite, &sh->dev[i].flags);
1622                }
1623                if (atomic_read(&conf->disks[i].rdev->read_errors))
1624                        atomic_set(&conf->disks[i].rdev->read_errors, 0);
1625        } else {
1626                const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
1627                int retry = 0;
1628                rdev = conf->disks[i].rdev;
1629
1630                clear_bit(R5_UPTODATE, &sh->dev[i].flags);
1631                atomic_inc(&rdev->read_errors);
1632                if (conf->mddev->degraded >= conf->max_degraded)
1633                        printk_ratelimited(
1634                                KERN_WARNING
1635                                "md/raid:%s: read error not correctable "
1636                                "(sector %llu on %s).\n",
1637                                mdname(conf->mddev),
1638                                (unsigned long long)(sh->sector
1639                                                     + rdev->data_offset),
1640                                bdn);
1641                else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
1642                        /* Oh, no!!! */
1643                        printk_ratelimited(
1644                                KERN_WARNING
1645                                "md/raid:%s: read error NOT corrected!! "
1646                                "(sector %llu on %s).\n",
1647                                mdname(conf->mddev),
1648                                (unsigned long long)(sh->sector
1649                                                     + rdev->data_offset),
1650                                bdn);
1651                else if (atomic_read(&rdev->read_errors)
1652                         > conf->max_nr_stripes)
1653                        printk(KERN_WARNING
1654                               "md/raid:%s: Too many read errors, failing device %s.\n",
1655                               mdname(conf->mddev), bdn);
1656                else
1657                        retry = 1;
1658                if (retry)
1659                        set_bit(R5_ReadError, &sh->dev[i].flags);
1660                else {
1661                        clear_bit(R5_ReadError, &sh->dev[i].flags);
1662                        clear_bit(R5_ReWrite, &sh->dev[i].flags);
1663                        md_error(conf->mddev, rdev);
1664                }
1665        }
1666        rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
1667        clear_bit(R5_LOCKED, &sh->dev[i].flags);
1668        set_bit(STRIPE_HANDLE, &sh->state);
1669        release_stripe(sh);
1670}
1671
1672static void raid5_end_write_request(struct bio *bi, int error)
1673{
1674        struct stripe_head *sh = bi->bi_private;
1675        raid5_conf_t *conf = sh->raid_conf;
1676        int disks = sh->disks, i;
1677        int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
1678        sector_t first_bad;
1679        int bad_sectors;
1680
1681        for (i=0 ; i<disks; i++)
1682                if (bi == &sh->dev[i].req)
1683                        break;
1684
1685        pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
1686                (unsigned long long)sh->sector, i, atomic_read(&sh->count),
1687                uptodate);
1688        if (i == disks) {
1689                BUG();
1690                return;
1691        }
1692
1693        if (!uptodate) {
1694                set_bit(WriteErrorSeen, &conf->disks[i].rdev->flags);
1695                set_bit(R5_WriteError, &sh->dev[i].flags);
1696        } else if (is_badblock(conf->disks[i].rdev, sh->sector, STRIPE_SECTORS,
1697                               &first_bad, &bad_sectors))
1698                set_bit(R5_MadeGood, &sh->dev[i].flags);
1699
1700        rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
1701        
1702        clear_bit(R5_LOCKED, &sh->dev[i].flags);
1703        set_bit(STRIPE_HANDLE, &sh->state);
1704        release_stripe(sh);
1705}
1706
1707
1708static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
1709        
1710static void raid5_build_block(struct stripe_head *sh, int i, int previous)
1711{
1712        struct r5dev *dev = &sh->dev[i];
1713
1714        bio_init(&dev->req);
1715        dev->req.bi_io_vec = &dev->vec;
1716        dev->req.bi_vcnt++;
1717        dev->req.bi_max_vecs++;
1718        dev->vec.bv_page = dev->page;
1719        dev->vec.bv_len = STRIPE_SIZE;
1720        dev->vec.bv_offset = 0;
1721
1722        dev->req.bi_sector = sh->sector;
1723        dev->req.bi_private = sh;
1724
1725        dev->flags = 0;
1726        dev->sector = compute_blocknr(sh, i, previous);
1727}
1728
1729static void error(mddev_t *mddev, mdk_rdev_t *rdev)
1730{
1731        char b[BDEVNAME_SIZE];
1732        raid5_conf_t *conf = mddev->private;
1733        pr_debug("raid456: error called\n");
1734
1735        if (test_and_clear_bit(In_sync, &rdev->flags)) {
1736                unsigned long flags;
1737                spin_lock_irqsave(&conf->device_lock, flags);
1738                mddev->degraded++;
1739                spin_unlock_irqrestore(&conf->device_lock, flags);
1740                /*
1741                 * if recovery was running, make sure it aborts.
1742                 */
1743                set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1744        }
1745        set_bit(Blocked, &rdev->flags);
1746        set_bit(Faulty, &rdev->flags);
1747        set_bit(MD_CHANGE_DEVS, &mddev->flags);
1748        printk(KERN_ALERT
1749               "md/raid:%s: Disk failure on %s, disabling device.\n"
1750               "md/raid:%s: Operation continuing on %d devices.\n",
1751               mdname(mddev),
1752               bdevname(rdev->bdev, b),
1753               mdname(mddev),
1754               conf->raid_disks - mddev->degraded);
1755}
1756
1757/*
1758 * Input: a 'big' sector number,
1759 * Output: index of the data and parity disk, and the sector # in them.
1760 */
1761static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector,
1762                                     int previous, int *dd_idx,
1763                                     struct stripe_head *sh)
1764{
1765        sector_t stripe, stripe2;
1766        sector_t chunk_number;
1767        unsigned int chunk_offset;
1768        int pd_idx, qd_idx;
1769        int ddf_layout = 0;
1770        sector_t new_sector;
1771        int algorithm = previous ? conf->prev_algo
1772                                 : conf->algorithm;
1773        int sectors_per_chunk = previous ? conf->prev_chunk_sectors
1774                                         : conf->chunk_sectors;
1775        int raid_disks = previous ? conf->previous_raid_disks
1776                                  : conf->raid_disks;
1777        int data_disks = raid_disks - conf->max_degraded;
1778
1779        /* First compute the information on this sector */
1780
1781        /*
1782         * Compute the chunk number and the sector offset inside the chunk
1783         */
1784        chunk_offset = sector_div(r_sector, sectors_per_chunk);
1785        chunk_number = r_sector;
1786
1787        /*
1788         * Compute the stripe number
1789         */
1790        stripe = chunk_number;
1791        *dd_idx = sector_div(stripe, data_disks);
1792        stripe2 = stripe;
1793        /*
1794         * Select the parity disk based on the user selected algorithm.
1795         */
1796        pd_idx = qd_idx = -1;
1797        switch(conf->level) {
1798        case 4:
1799                pd_idx = data_disks;
1800                break;
1801        case 5:
1802                switch (algorithm) {
1803                case ALGORITHM_LEFT_ASYMMETRIC:
1804                        pd_idx = data_disks - sector_div(stripe2, raid_disks);
1805                        if (*dd_idx >= pd_idx)
1806                                (*dd_idx)++;
1807                        break;
1808                case ALGORITHM_RIGHT_ASYMMETRIC:
1809                        pd_idx = sector_div(stripe2, raid_disks);
1810                        if (*dd_idx >= pd_idx)
1811                                (*dd_idx)++;
1812                        break;
1813                case ALGORITHM_LEFT_SYMMETRIC:
1814                        pd_idx = data_disks - sector_div(stripe2, raid_disks);
1815                        *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1816                        break;
1817                case ALGORITHM_RIGHT_SYMMETRIC:
1818                        pd_idx = sector_div(stripe2, raid_disks);
1819                        *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1820                        break;
1821                case ALGORITHM_PARITY_0:
1822                        pd_idx = 0;
1823                        (*dd_idx)++;
1824                        break;
1825                case ALGORITHM_PARITY_N:
1826                        pd_idx = data_disks;
1827                        break;
1828                default:
1829                        BUG();
1830                }
1831                break;
1832        case 6:
1833
1834                switch (algorithm) {
1835                case ALGORITHM_LEFT_ASYMMETRIC:
1836                        pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1837                        qd_idx = pd_idx + 1;
1838                        if (pd_idx == raid_disks-1) {
1839                                (*dd_idx)++;    /* Q D D D P */
1840                                qd_idx = 0;
1841                        } else if (*dd_idx >= pd_idx)
1842                                (*dd_idx) += 2; /* D D P Q D */
1843                        break;
1844                case ALGORITHM_RIGHT_ASYMMETRIC:
1845                        pd_idx = sector_div(stripe2, raid_disks);
1846                        qd_idx = pd_idx + 1;
1847                        if (pd_idx == raid_disks-1) {
1848                                (*dd_idx)++;    /* Q D D D P */
1849                                qd_idx = 0;
1850                        } else if (*dd_idx >= pd_idx)
1851                                (*dd_idx) += 2; /* D D P Q D */
1852                        break;
1853                case ALGORITHM_LEFT_SYMMETRIC:
1854                        pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1855                        qd_idx = (pd_idx + 1) % raid_disks;
1856                        *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
1857                        break;
1858                case ALGORITHM_RIGHT_SYMMETRIC:
1859                        pd_idx = sector_div(stripe2, raid_disks);
1860                        qd_idx = (pd_idx + 1) % raid_disks;
1861                        *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks;
1862                        break;
1863
1864                case ALGORITHM_PARITY_0:
1865                        pd_idx = 0;
1866                        qd_idx = 1;
1867                        (*dd_idx) += 2;
1868                        break;
1869                case ALGORITHM_PARITY_N:
1870                        pd_idx = data_disks;
1871                        qd_idx = data_disks + 1;
1872                        break;
1873
1874                case ALGORITHM_ROTATING_ZERO_RESTART:
1875                        /* Exactly the same as RIGHT_ASYMMETRIC, but or
1876                         * of blocks for computing Q is different.
1877                         */
1878                        pd_idx = sector_div(stripe2, raid_disks);
1879                        qd_idx = pd_idx + 1;
1880                        if (pd_idx == raid_disks-1) {
1881                                (*dd_idx)++;    /* Q D D D P */
1882                                qd_idx = 0;
1883                        } else if (*dd_idx >= pd_idx)
1884                                (*dd_idx) += 2; /* D D P Q D */
1885                        ddf_layout = 1;
1886                        break;
1887
1888                case ALGORITHM_ROTATING_N_RESTART:
1889                        /* Same a left_asymmetric, by first stripe is
1890                         * D D D P Q  rather than
1891                         * Q D D D P
1892                         */
1893                        stripe2 += 1;
1894                        pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1895                        qd_idx = pd_idx + 1;
1896                        if (pd_idx == raid_disks-1) {
1897                                (*dd_idx)++;    /* Q D D D P */
1898                                qd_idx = 0;
1899                        } else if (*dd_idx >= pd_idx)
1900                                (*dd_idx) += 2; /* D D P Q D */
1901                        ddf_layout = 1;
1902                        break;
1903
1904                case ALGORITHM_ROTATING_N_CONTINUE:
1905                        /* Same as left_symmetric but Q is before P */
1906                        pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks);
1907                        qd_idx = (pd_idx + raid_disks - 1) % raid_disks;
1908                        *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks;
1909                        ddf_layout = 1;
1910                        break;
1911
1912                case ALGORITHM_LEFT_ASYMMETRIC_6:
1913                        /* RAID5 left_asymmetric, with Q on last device */
1914                        pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
1915                        if (*dd_idx >= pd_idx)
1916                                (*dd_idx)++;
1917                        qd_idx = raid_disks - 1;
1918                        break;
1919
1920                case ALGORITHM_RIGHT_ASYMMETRIC_6:
1921                        pd_idx = sector_div(stripe2, raid_disks-1);
1922                        if (*dd_idx >= pd_idx)
1923                                (*dd_idx)++;
1924                        qd_idx = raid_disks - 1;
1925                        break;
1926
1927                case ALGORITHM_LEFT_SYMMETRIC_6:
1928                        pd_idx = data_disks - sector_div(stripe2, raid_disks-1);
1929                        *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
1930                        qd_idx = raid_disks - 1;
1931                        break;
1932
1933                case ALGORITHM_RIGHT_SYMMETRIC_6:
1934                        pd_idx = sector_div(stripe2, raid_disks-1);
1935                        *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1);
1936                        qd_idx = raid_disks - 1;
1937                        break;
1938
1939                case ALGORITHM_PARITY_0_6:
1940                        pd_idx = 0;
1941                        (*dd_idx)++;
1942                        qd_idx = raid_disks - 1;
1943                        break;
1944
1945                default:
1946                        BUG();
1947                }
1948                break;
1949        }
1950
1951        if (sh) {
1952                sh->pd_idx = pd_idx;
1953                sh->qd_idx = qd_idx;
1954                sh->ddf_layout = ddf_layout;
1955        }
1956        /*
1957         * Finally, compute the new sector number
1958         */
1959        new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset;
1960        return new_sector;
1961}
1962
1963
1964static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
1965{
1966        raid5_conf_t *conf = sh->raid_conf;
1967        int raid_disks = sh->disks;
1968        int data_disks = raid_disks - conf->max_degraded;
1969        sector_t new_sector = sh->sector, check;
1970        int sectors_per_chunk = previous ? conf->prev_chunk_sectors
1971                                         : conf->chunk_sectors;
1972        int algorithm = previous ? conf->prev_algo
1973                                 : conf->algorithm;
1974        sector_t stripe;
1975        int chunk_offset;
1976        sector_t chunk_number;
1977        int dummy1, dd_idx = i;
1978        sector_t r_sector;
1979        struct stripe_head sh2;
1980
1981
1982        chunk_offset = sector_div(new_sector, sectors_per_chunk);
1983        stripe = new_sector;
1984
1985        if (i == sh->pd_idx)
1986                return 0;
1987        switch(conf->level) {
1988        case 4: break;
1989        case 5:
1990                switch (algorithm) {
1991                case ALGORITHM_LEFT_ASYMMETRIC:
1992                case ALGORITHM_RIGHT_ASYMMETRIC:
1993                        if (i > sh->pd_idx)
1994                                i--;
1995                        break;
1996                case ALGORITHM_LEFT_SYMMETRIC:
1997                case ALGORITHM_RIGHT_SYMMETRIC:
1998                        if (i < sh->pd_idx)
1999                                i += raid_disks;
2000                        i -= (sh->pd_idx + 1);
2001                        break;
2002                case ALGORITHM_PARITY_0:
2003                        i -= 1;
2004                        break;
2005                case ALGORITHM_PARITY_N:
2006                        break;
2007                default:
2008                        BUG();
2009                }
2010                break;
2011        case 6:
2012                if (i == sh->qd_idx)
2013                        return 0; /* It is the Q disk */
2014                switch (algorithm) {
2015                case ALGORITHM_LEFT_ASYMMETRIC:
2016                case ALGORITHM_RIGHT_ASYMMETRIC:
2017                case ALGORITHM_ROTATING_ZERO_RESTART:
2018                case ALGORITHM_ROTATING_N_RESTART:
2019                        if (sh->pd_idx == raid_disks-1)
2020                                i--;    /* Q D D D P */
2021                        else if (i > sh->pd_idx)
2022                                i -= 2; /* D D P Q D */
2023                        break;
2024                case ALGORITHM_LEFT_SYMMETRIC:
2025                case ALGORITHM_RIGHT_SYMMETRIC:
2026                        if (sh->pd_idx == raid_disks-1)
2027                                i--; /* Q D D D P */
2028                        else {
2029                                /* D D P Q D */
2030                                if (i < sh->pd_idx)
2031                                        i += raid_disks;
2032                                i -= (sh->pd_idx + 2);
2033                        }
2034                        break;
2035                case ALGORITHM_PARITY_0:
2036                        i -= 2;
2037                        break;
2038                case ALGORITHM_PARITY_N:
2039                        break;
2040                case ALGORITHM_ROTATING_N_CONTINUE:
2041                        /* Like left_symmetric, but P is before Q */
2042                        if (sh->pd_idx == 0)
2043                                i--;    /* P D D D Q */
2044                        else {
2045                                /* D D Q P D */
2046                                if (i < sh->pd_idx)
2047                                        i += raid_disks;
2048                                i -= (sh->pd_idx + 1);
2049                        }
2050                        break;
2051                case ALGORITHM_LEFT_ASYMMETRIC_6:
2052                case ALGORITHM_RIGHT_ASYMMETRIC_6:
2053                        if (i > sh->pd_idx)
2054                                i--;
2055                        break;
2056                case ALGORITHM_LEFT_SYMMETRIC_6:
2057                case ALGORITHM_RIGHT_SYMMETRIC_6:
2058                        if (i < sh->pd_idx)
2059                                i += data_disks + 1;
2060                        i -= (sh->pd_idx + 1);
2061                        break;
2062                case ALGORITHM_PARITY_0_6:
2063                        i -= 1;
2064                        break;
2065                default:
2066                        BUG();
2067                }
2068                break;
2069        }
2070
2071        chunk_number = stripe * data_disks + i;
2072        r_sector = chunk_number * sectors_per_chunk + chunk_offset;
2073
2074        check = raid5_compute_sector(conf, r_sector,
2075                                     previous, &dummy1, &sh2);
2076        if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx
2077                || sh2.qd_idx != sh->qd_idx) {
2078                printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n",
2079                       mdname(conf->mddev));
2080                return 0;
2081        }
2082        return r_sector;
2083}
2084
2085
2086static void
2087schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
2088                         int rcw, int expand)
2089{
2090        int i, pd_idx = sh->pd_idx, disks = sh->disks;
2091        raid5_conf_t *conf = sh->raid_conf;
2092        int level = conf->level;
2093
2094        if (rcw) {
2095                /* if we are not expanding this is a proper write request, and
2096                 * there will be bios with new data to be drained into the
2097                 * stripe cache
2098                 */
2099                if (!expand) {
2100                        sh->reconstruct_state = reconstruct_state_drain_run;
2101                        set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2102                } else
2103                        sh->reconstruct_state = reconstruct_state_run;
2104
2105                set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
2106
2107                for (i = disks; i--; ) {
2108                        struct r5dev *dev = &sh->dev[i];
2109
2110                        if (dev->towrite) {
2111                                set_bit(R5_LOCKED, &dev->flags);
2112                                set_bit(R5_Wantdrain, &dev->flags);
2113                                if (!expand)
2114                                        clear_bit(R5_UPTODATE, &dev->flags);
2115                                s->locked++;
2116                        }
2117                }
2118                if (s->locked + conf->max_degraded == disks)
2119                        if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
2120                                atomic_inc(&conf->pending_full_writes);
2121        } else {
2122                BUG_ON(level == 6);
2123                BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
2124                        test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
2125
2126                sh->reconstruct_state = reconstruct_state_prexor_drain_run;
2127                set_bit(STRIPE_OP_PREXOR, &s->ops_request);
2128                set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
2129                set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
2130
2131                for (i = disks; i--; ) {
2132                        struct r5dev *dev = &sh->dev[i];
2133                        if (i == pd_idx)
2134                                continue;
2135
2136                        if (dev->towrite &&
2137                            (test_bit(R5_UPTODATE, &dev->flags) ||
2138                             test_bit(R5_Wantcompute, &dev->flags))) {
2139                                set_bit(R5_Wantdrain, &dev->flags);
2140                                set_bit(R5_LOCKED, &dev->flags);
2141                                clear_bit(R5_UPTODATE, &dev->flags);
2142                                s->locked++;
2143                        }
2144                }
2145        }
2146
2147        /* keep the parity disk(s) locked while asynchronous operations
2148         * are in flight
2149         */
2150        set_bit(R5_LOCKED, &sh->dev[pd_idx].flags);
2151        clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
2152        s->locked++;
2153
2154        if (level == 6) {
2155                int qd_idx = sh->qd_idx;
2156                struct r5dev *dev = &sh->dev[qd_idx];
2157
2158                set_bit(R5_LOCKED, &dev->flags);
2159                clear_bit(R5_UPTODATE, &dev->flags);
2160                s->locked++;
2161        }
2162
2163        pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
2164                __func__, (unsigned long long)sh->sector,
2165                s->locked, s->ops_request);
2166}
2167
2168/*
2169 * Each stripe/dev can have one or more bion attached.
2170 * toread/towrite point to the first in a chain.
2171 * The bi_next chain must be in order.
2172 */
2173static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite)
2174{
2175        struct bio **bip;
2176        raid5_conf_t *conf = sh->raid_conf;
2177        int firstwrite=0;
2178
2179        pr_debug("adding bi b#%llu to stripe s#%llu\n",
2180                (unsigned long long)bi->bi_sector,
2181                (unsigned long long)sh->sector);
2182
2183
2184        spin_lock_irq(&conf->device_lock);
2185        if (forwrite) {
2186                bip = &sh->dev[dd_idx].towrite;
2187                if (*bip == NULL && sh->dev[dd_idx].written == NULL)
2188                        firstwrite = 1;
2189        } else
2190                bip = &sh->dev[dd_idx].toread;
2191        while (*bip && (*bip)->bi_sector < bi->bi_sector) {
2192                if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector)
2193                        goto overlap;
2194                bip = & (*bip)->bi_next;
2195        }
2196        if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9))
2197                goto overlap;
2198
2199        BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
2200        if (*bip)
2201                bi->bi_next = *bip;
2202        *bip = bi;
2203        bi->bi_phys_segments++;
2204
2205        if (forwrite) {
2206                /* check if page is covered */
2207                sector_t sector = sh->dev[dd_idx].sector;
2208                for (bi=sh->dev[dd_idx].towrite;
2209                     sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
2210                             bi && bi->bi_sector <= sector;
2211                     bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
2212                        if (bi->bi_sector + (bi->bi_size>>9) >= sector)
2213                                sector = bi->bi_sector + (bi->bi_size>>9);
2214                }
2215                if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
2216                        set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
2217        }
2218        spin_unlock_irq(&conf->device_lock);
2219
2220        pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
2221                (unsigned long long)(*bip)->bi_sector,
2222                (unsigned long long)sh->sector, dd_idx);
2223
2224        if (conf->mddev->bitmap && firstwrite) {
2225                bitmap_startwrite(conf->mddev->bitmap, sh->sector,
2226                                  STRIPE_SECTORS, 0);
2227                sh->bm_seq = conf->seq_flush+1;
2228                set_bit(STRIPE_BIT_DELAY, &sh->state);
2229        }
2230        return 1;
2231
2232 overlap:
2233        set_bit(R5_Overlap, &sh->dev[dd_idx].flags);
2234        spin_unlock_irq(&conf->device_lock);
2235        return 0;
2236}
2237
2238static void end_reshape(raid5_conf_t *conf);
2239
2240static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous,
2241                            struct stripe_head *sh)
2242{
2243        int sectors_per_chunk =
2244                previous ? conf->prev_chunk_sectors : conf->chunk_sectors;
2245        int dd_idx;
2246        int chunk_offset = sector_div(stripe, sectors_per_chunk);
2247        int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
2248
2249        raid5_compute_sector(conf,
2250                             stripe * (disks - conf->max_degraded)
2251                             *sectors_per_chunk + chunk_offset,
2252                             previous,
2253                             &dd_idx, sh);
2254}
2255
2256static void
2257handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh,
2258                                struct stripe_head_state *s, int disks,
2259                                struct bio **return_bi)
2260{
2261        int i;
2262        for (i = disks; i--; ) {
2263                struct bio *bi;
2264                int bitmap_end = 0;
2265
2266                if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
2267                        mdk_rdev_t *rdev;
2268                        rcu_read_lock();
2269                        rdev = rcu_dereference(conf->disks[i].rdev);
2270                        if (rdev && test_bit(In_sync, &rdev->flags))
2271                                atomic_inc(&rdev->nr_pending);
2272                        else
2273                                rdev = NULL;
2274                        rcu_read_unlock();
2275                        if (rdev) {
2276                                if (!rdev_set_badblocks(
2277                                            rdev,
2278                                            sh->sector,
2279                                            STRIPE_SECTORS, 0))
2280                                        md_error(conf->mddev, rdev);
2281                                rdev_dec_pending(rdev, conf->mddev);
2282                        }
2283                }
2284                spin_lock_irq(&conf->device_lock);
2285                /* fail all writes first */
2286                bi = sh->dev[i].towrite;
2287                sh->dev[i].towrite = NULL;
2288                if (bi) {
2289                        s->to_write--;
2290                        bitmap_end = 1;
2291                }
2292
2293                if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2294                        wake_up(&conf->wait_for_overlap);
2295
2296                while (bi && bi->bi_sector <
2297                        sh->dev[i].sector + STRIPE_SECTORS) {
2298                        struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
2299                        clear_bit(BIO_UPTODATE, &bi->bi_flags);
2300                        if (!raid5_dec_bi_phys_segments(bi)) {
2301                                md_write_end(conf->mddev);
2302                                bi->bi_next = *return_bi;
2303                                *return_bi = bi;
2304                        }
2305                        bi = nextbi;
2306                }
2307                /* and fail all 'written' */
2308                bi = sh->dev[i].written;
2309                sh->dev[i].written = NULL;
2310                if (bi) bitmap_end = 1;
2311                while (bi && bi->bi_sector <
2312                       sh->dev[i].sector + STRIPE_SECTORS) {
2313                        struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
2314                        clear_bit(BIO_UPTODATE, &bi->bi_flags);
2315                        if (!raid5_dec_bi_phys_segments(bi)) {
2316                                md_write_end(conf->mddev);
2317                                bi->bi_next = *return_bi;
2318                                *return_bi = bi;
2319                        }
2320                        bi = bi2;
2321                }
2322
2323                /* fail any reads if this device is non-operational and
2324                 * the data has not reached the cache yet.
2325                 */
2326                if (!test_bit(R5_Wantfill, &sh->dev[i].flags) &&
2327                    (!test_bit(R5_Insync, &sh->dev[i].flags) ||
2328                      test_bit(R5_ReadError, &sh->dev[i].flags))) {
2329                        bi = sh->dev[i].toread;
2330                        sh->dev[i].toread = NULL;
2331                        if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2332                                wake_up(&conf->wait_for_overlap);
2333                        if (bi) s->to_read--;
2334                        while (bi && bi->bi_sector <
2335                               sh->dev[i].sector + STRIPE_SECTORS) {
2336                                struct bio *nextbi =
2337                                        r5_next_bio(bi, sh->dev[i].sector);
2338                                clear_bit(BIO_UPTODATE, &bi->bi_flags);
2339                                if (!raid5_dec_bi_phys_segments(bi)) {
2340                                        bi->bi_next = *return_bi;
2341                                        *return_bi = bi;
2342                                }
2343                                bi = nextbi;
2344                        }
2345                }
2346                spin_unlock_irq(&conf->device_lock);
2347                if (bitmap_end)
2348                        bitmap_endwrite(conf->mddev->bitmap, sh->sector,
2349                                        STRIPE_SECTORS, 0, 0);
2350                /* If we were in the middle of a write the parity block might
2351                 * still be locked - so just clear all R5_LOCKED flags
2352                 */
2353                clear_bit(R5_LOCKED, &sh->dev[i].flags);
2354        }
2355
2356        if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2357                if (atomic_dec_and_test(&conf->pending_full_writes))
2358                        md_wakeup_thread(conf->mddev->thread);
2359}
2360
2361static void
2362handle_failed_sync(raid5_conf_t *conf, struct stripe_head *sh,
2363                   struct stripe_head_state *s)
2364{
2365        int abort = 0;
2366        int i;
2367
2368        md_done_sync(conf->mddev, STRIPE_SECTORS, 0);
2369        clear_bit(STRIPE_SYNCING, &sh->state);
2370        s->syncing = 0;
2371        /* There is nothing more to do for sync/check/repair.
2372         * For recover we need to record a bad block on all
2373         * non-sync devices, or abort the recovery
2374         */
2375        if (!test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery))
2376                return;
2377        /* During recovery devices cannot be removed, so locking and
2378         * refcounting of rdevs is not needed
2379         */
2380        for (i = 0; i < conf->raid_disks; i++) {
2381                mdk_rdev_t *rdev = conf->disks[i].rdev;
2382                if (!rdev
2383                    || test_bit(Faulty, &rdev->flags)
2384                    || test_bit(In_sync, &rdev->flags))
2385                        continue;
2386                if (!rdev_set_badblocks(rdev, sh->sector,
2387                                        STRIPE_SECTORS, 0))
2388                        abort = 1;
2389        }
2390        if (abort) {
2391                conf->recovery_disabled = conf->mddev->recovery_disabled;
2392                set_bit(MD_RECOVERY_INTR, &conf->mddev->recovery);
2393        }
2394}
2395
2396/* fetch_block - checks the given member device to see if its data needs
2397 * to be read or computed to satisfy a request.
2398 *
2399 * Returns 1 when no more member devices need to be checked, otherwise returns
2400 * 0 to tell the loop in handle_stripe_fill to continue
2401 */
2402static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
2403                       int disk_idx, int disks)
2404{
2405        struct r5dev *dev = &sh->dev[disk_idx];
2406        struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]],
2407                                  &sh->dev[s->failed_num[1]] };
2408
2409        /* is the data in this block needed, and can we get it? */
2410        if (!test_bit(R5_LOCKED, &dev->flags) &&
2411            !test_bit(R5_UPTODATE, &dev->flags) &&
2412            (dev->toread ||
2413             (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
2414             s->syncing || s->expanding ||
2415             (s->failed >= 1 && fdev[0]->toread) ||
2416             (s->failed >= 2 && fdev[1]->toread) ||
2417             (sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite &&
2418              !test_bit(R5_OVERWRITE, &fdev[0]->flags)) ||
2419             (sh->raid_conf->level == 6 && s->failed && s->to_write))) {
2420                /* we would like to get this block, possibly by computing it,
2421                 * otherwise read it if the backing disk is insync
2422                 */
2423                BUG_ON(test_bit(R5_Wantcompute, &dev->flags));
2424                BUG_ON(test_bit(R5_Wantread, &dev->flags));
2425                if ((s->uptodate == disks - 1) &&
2426                    (s->failed && (disk_idx == s->failed_num[0] ||
2427                                   disk_idx == s->failed_num[1]))) {
2428                        /* have disk failed, and we're requested to fetch it;
2429                         * do compute it
2430                         */
2431                        pr_debug("Computing stripe %llu block %d\n",
2432                               (unsigned long long)sh->sector, disk_idx);
2433                        set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2434                        set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2435                        set_bit(R5_Wantcompute, &dev->flags);
2436                        sh->ops.target = disk_idx;
2437                        sh->ops.target2 = -1; /* no 2nd target */
2438                        s->req_compute = 1;
2439                        /* Careful: from this point on 'uptodate' is in the eye
2440                         * of raid_run_ops which services 'compute' operations
2441                         * before writes. R5_Wantcompute flags a block that will
2442                         * be R5_UPTODATE by the time it is needed for a
2443                         * subsequent operation.
2444                         */
2445                        s->uptodate++;
2446                        return 1;
2447                } else if (s->uptodate == disks-2 && s->failed >= 2) {
2448                        /* Computing 2-failure is *very* expensive; only
2449                         * do it if failed >= 2
2450                         */
2451                        int other;
2452                        for (other = disks; other--; ) {
2453                                if (other == disk_idx)
2454                                        continue;
2455                                if (!test_bit(R5_UPTODATE,
2456                                      &sh->dev[other].flags))
2457                                        break;
2458                        }
2459                        BUG_ON(other < 0);
2460                        pr_debug("Computing stripe %llu blocks %d,%d\n",
2461                               (unsigned long long)sh->sector,
2462                               disk_idx, other);
2463                        set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2464                        set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2465                        set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags);
2466                        set_bit(R5_Wantcompute, &sh->dev[other].flags);
2467                        sh->ops.target = disk_idx;
2468                        sh->ops.target2 = other;
2469                        s->uptodate += 2;
2470                        s->req_compute = 1;
2471                        return 1;
2472                } else if (test_bit(R5_Insync, &dev->flags)) {
2473                        set_bit(R5_LOCKED, &dev->flags);
2474                        set_bit(R5_Wantread, &dev->flags);
2475                        s->locked++;
2476                        pr_debug("Reading block %d (sync=%d)\n",
2477                                disk_idx, s->syncing);
2478                }
2479        }
2480
2481        return 0;
2482}
2483
2484/**
2485 * handle_stripe_fill - read or compute data to satisfy pending requests.
2486 */
2487static void handle_stripe_fill(struct stripe_head *sh,
2488                               struct stripe_head_state *s,
2489                               int disks)
2490{
2491        int i;
2492
2493        /* look for blocks to read/compute, skip this if a compute
2494         * is already in flight, or if the stripe contents are in the
2495         * midst of changing due to a write
2496         */
2497        if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state &&
2498            !sh->reconstruct_state)
2499                for (i = disks; i--; )
2500                        if (fetch_block(sh, s, i, disks))
2501                                break;
2502        set_bit(STRIPE_HANDLE, &sh->state);
2503}
2504
2505
2506/* handle_stripe_clean_event
2507 * any written block on an uptodate or failed drive can be returned.
2508 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
2509 * never LOCKED, so we don't need to test 'failed' directly.
2510 */
2511static void handle_stripe_clean_event(raid5_conf_t *conf,
2512        struct stripe_head *sh, int disks, struct bio **return_bi)
2513{
2514        int i;
2515        struct r5dev *dev;
2516
2517        for (i = disks; i--; )
2518                if (sh->dev[i].written) {
2519                        dev = &sh->dev[i];
2520                        if (!test_bit(R5_LOCKED, &dev->flags) &&
2521                                test_bit(R5_UPTODATE, &dev->flags)) {
2522                                /* We can return any write requests */
2523                                struct bio *wbi, *wbi2;
2524                                int bitmap_end = 0;
2525                                pr_debug("Return write for disc %d\n", i);
2526                                spin_lock_irq(&conf->device_lock);
2527                                wbi = dev->written;
2528                                dev->written = NULL;
2529                                while (wbi && wbi->bi_sector <
2530                                        dev->sector + STRIPE_SECTORS) {
2531                                        wbi2 = r5_next_bio(wbi, dev->sector);
2532                                        if (!raid5_dec_bi_phys_segments(wbi)) {
2533                                                md_write_end(conf->mddev);
2534                                                wbi->bi_next = *return_bi;
2535                                                *return_bi = wbi;
2536                                        }
2537                                        wbi = wbi2;
2538                                }
2539                                if (dev->towrite == NULL)
2540                                        bitmap_end = 1;
2541                                spin_unlock_irq(&conf->device_lock);
2542                                if (bitmap_end)
2543                                        bitmap_endwrite(conf->mddev->bitmap,
2544                                                        sh->sector,
2545                                                        STRIPE_SECTORS,
2546                                         !test_bit(STRIPE_DEGRADED, &sh->state),
2547                                                        0);
2548                        }
2549                }
2550
2551        if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
2552                if (atomic_dec_and_test(&conf->pending_full_writes))
2553                        md_wakeup_thread(conf->mddev->thread);
2554}
2555
2556static void handle_stripe_dirtying(raid5_conf_t *conf,
2557                                   struct stripe_head *sh,
2558                                   struct stripe_head_state *s,
2559                                   int disks)
2560{
2561        int rmw = 0, rcw = 0, i;
2562        if (conf->max_degraded == 2) {
2563                /* RAID6 requires 'rcw' in current implementation
2564                 * Calculate the real rcw later - for now fake it
2565                 * look like rcw is cheaper
2566                 */
2567                rcw = 1; rmw = 2;
2568        } else for (i = disks; i--; ) {
2569                /* would I have to read this buffer for read_modify_write */
2570                struct r5dev *dev = &sh->dev[i];
2571                if ((dev->towrite || i == sh->pd_idx) &&
2572                    !test_bit(R5_LOCKED, &dev->flags) &&
2573                    !(test_bit(R5_UPTODATE, &dev->flags) ||
2574                      test_bit(R5_Wantcompute, &dev->flags))) {
2575                        if (test_bit(R5_Insync, &dev->flags))
2576                                rmw++;
2577                        else
2578                                rmw += 2*disks;  /* cannot read it */
2579                }
2580                /* Would I have to read this buffer for reconstruct_write */
2581                if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx &&
2582                    !test_bit(R5_LOCKED, &dev->flags) &&
2583                    !(test_bit(R5_UPTODATE, &dev->flags) ||
2584                    test_bit(R5_Wantcompute, &dev->flags))) {
2585                        if (test_bit(R5_Insync, &dev->flags)) rcw++;
2586                        else
2587                                rcw += 2*disks;
2588                }
2589        }
2590        pr_debug("for sector %llu, rmw=%d rcw=%d\n",
2591                (unsigned long long)sh->sector, rmw, rcw);
2592        set_bit(STRIPE_HANDLE, &sh->state);
2593        if (rmw < rcw && rmw > 0)
2594                /* prefer read-modify-write, but need to get some data */
2595                for (i = disks; i--; ) {
2596                        struct r5dev *dev = &sh->dev[i];
2597                        if ((dev->towrite || i == sh->pd_idx) &&
2598                            !test_bit(R5_LOCKED, &dev->flags) &&
2599                            !(test_bit(R5_UPTODATE, &dev->flags) ||
2600                            test_bit(R5_Wantcompute, &dev->flags)) &&
2601                            test_bit(R5_Insync, &dev->flags)) {
2602                                if (
2603                                  test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
2604                                        pr_debug("Read_old block "
2605                                                "%d for r-m-w\n", i);
2606                                        set_bit(R5_LOCKED, &dev->flags);
2607                                        set_bit(R5_Wantread, &dev->flags);
2608                                        s->locked++;
2609                                } else {
2610                                        set_bit(STRIPE_DELAYED, &sh->state);
2611                                        set_bit(STRIPE_HANDLE, &sh->state);
2612                                }
2613                        }
2614                }
2615        if (rcw <= rmw && rcw > 0) {
2616                /* want reconstruct write, but need to get some data */
2617                rcw = 0;
2618                for (i = disks; i--; ) {
2619                        struct r5dev *dev = &sh->dev[i];
2620                        if (!test_bit(R5_OVERWRITE, &dev->flags) &&
2621                            i != sh->pd_idx && i != sh->qd_idx &&
2622                            !test_bit(R5_LOCKED, &dev->flags) &&
2623                            !(test_bit(R5_UPTODATE, &dev->flags) ||
2624                              test_bit(R5_Wantcompute, &dev->flags))) {
2625                                rcw++;
2626                                if (!test_bit(R5_Insync, &dev->flags))
2627                                        continue; /* it's a failed drive */
2628                                if (
2629                                  test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
2630                                        pr_debug("Read_old block "
2631                                                "%d for Reconstruct\n", i);
2632                                        set_bit(R5_LOCKED, &dev->flags);
2633                                        set_bit(R5_Wantread, &dev->flags);
2634                                        s->locked++;
2635                                } else {
2636                                        set_bit(STRIPE_DELAYED, &sh->state);
2637                                        set_bit(STRIPE_HANDLE, &sh->state);
2638                                }
2639                        }
2640                }
2641        }
2642        /* now if nothing is locked, and if we have enough data,
2643         * we can start a write request
2644         */
2645        /* since handle_stripe can be called at any time we need to handle the
2646         * case where a compute block operation has been submitted and then a
2647         * subsequent call wants to start a write request.  raid_run_ops only
2648         * handles the case where compute block and reconstruct are requested
2649         * simultaneously.  If this is not the case then new writes need to be
2650         * held off until the compute completes.
2651         */
2652        if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) &&
2653            (s->locked == 0 && (rcw == 0 || rmw == 0) &&
2654            !test_bit(STRIPE_BIT_DELAY, &sh->state)))
2655                schedule_reconstruction(sh, s, rcw == 0, 0);
2656}
2657
2658static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh,
2659                                struct stripe_head_state *s, int disks)
2660{
2661        struct r5dev *dev = NULL;
2662
2663        set_bit(STRIPE_HANDLE, &sh->state);
2664
2665        switch (sh->check_state) {
2666        case check_state_idle:
2667                /* start a new check operation if there are no failures */
2668                if (s->failed == 0) {
2669                        BUG_ON(s->uptodate != disks);
2670                        sh->check_state = check_state_run;
2671                        set_bit(STRIPE_OP_CHECK, &s->ops_request);
2672                        clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
2673                        s->uptodate--;
2674                        break;
2675                }
2676                dev = &sh->dev[s->failed_num[0]];
2677                /* fall through */
2678        case check_state_compute_result:
2679                sh->check_state = check_state_idle;
2680                if (!dev)
2681                        dev = &sh->dev[sh->pd_idx];
2682
2683                /* check that a write has not made the stripe insync */
2684                if (test_bit(STRIPE_INSYNC, &sh->state))
2685                        break;
2686
2687                /* either failed parity check, or recovery is happening */
2688                BUG_ON(!test_bit(R5_UPTODATE, &dev->flags));
2689                BUG_ON(s->uptodate != disks);
2690
2691                set_bit(R5_LOCKED, &dev->flags);
2692                s->locked++;
2693                set_bit(R5_Wantwrite, &dev->flags);
2694
2695                clear_bit(STRIPE_DEGRADED, &sh->state);
2696                set_bit(STRIPE_INSYNC, &sh->state);
2697                break;
2698        case check_state_run:
2699                break; /* we will be called again upon completion */
2700        case check_state_check_result:
2701                sh->check_state = check_state_idle;
2702
2703                /* if a failure occurred during the check operation, leave
2704                 * STRIPE_INSYNC not set and let the stripe be handled again
2705                 */
2706                if (s->failed)
2707                        break;
2708
2709                /* handle a successful check operation, if parity is correct
2710                 * we are done.  Otherwise update the mismatch count and repair
2711                 * parity if !MD_RECOVERY_CHECK
2712                 */
2713                if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0)
2714                        /* parity is correct (on disc,
2715                         * not in buffer any more)
2716                         */
2717                        set_bit(STRIPE_INSYNC, &sh->state);
2718                else {
2719                        conf->mddev->resync_mismatches += STRIPE_SECTORS;
2720                        if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
2721                                /* don't try to repair!! */
2722                                set_bit(STRIPE_INSYNC, &sh->state);
2723                        else {
2724                                sh->check_state = check_state_compute_run;
2725                                set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2726                                set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2727                                set_bit(R5_Wantcompute,
2728                                        &sh->dev[sh->pd_idx].flags);
2729                                sh->ops.target = sh->pd_idx;
2730                                sh->ops.target2 = -1;
2731                                s->uptodate++;
2732                        }
2733                }
2734                break;
2735        case check_state_compute_run:
2736                break;
2737        default:
2738                printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
2739                       __func__, sh->check_state,
2740                       (unsigned long long) sh->sector);
2741                BUG();
2742        }
2743}
2744
2745
2746static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh,
2747                                  struct stripe_head_state *s,
2748                                  int disks)
2749{
2750        int pd_idx = sh->pd_idx;
2751        int qd_idx = sh->qd_idx;
2752        struct r5dev *dev;
2753
2754        set_bit(STRIPE_HANDLE, &sh->state);
2755
2756        BUG_ON(s->failed > 2);
2757
2758        /* Want to check and possibly repair P and Q.
2759         * However there could be one 'failed' device, in which
2760         * case we can only check one of them, possibly using the
2761         * other to generate missing data
2762         */
2763
2764        switch (sh->check_state) {
2765        case check_state_idle:
2766                /* start a new check operation if there are < 2 failures */
2767                if (s->failed == s->q_failed) {
2768                        /* The only possible failed device holds Q, so it
2769                         * makes sense to check P (If anything else were failed,
2770                         * we would have used P to recreate it).
2771                         */
2772                        sh->check_state = check_state_run;
2773                }
2774                if (!s->q_failed && s->failed < 2) {
2775                        /* Q is not failed, and we didn't use it to generate
2776                         * anything, so it makes sense to check it
2777                         */
2778                        if (sh->check_state == check_state_run)
2779                                sh->check_state = check_state_run_pq;
2780                        else
2781                                sh->check_state = check_state_run_q;
2782                }
2783
2784                /* discard potentially stale zero_sum_result */
2785                sh->ops.zero_sum_result = 0;
2786
2787                if (sh->check_state == check_state_run) {
2788                        /* async_xor_zero_sum destroys the contents of P */
2789                        clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags);
2790                        s->uptodate--;
2791                }
2792                if (sh->check_state >= check_state_run &&
2793                    sh->check_state <= check_state_run_pq) {
2794                        /* async_syndrome_zero_sum preserves P and Q, so
2795                         * no need to mark them !uptodate here
2796                         */
2797                        set_bit(STRIPE_OP_CHECK, &s->ops_request);
2798                        break;
2799                }
2800
2801                /* we have 2-disk failure */
2802                BUG_ON(s->failed != 2);
2803                /* fall through */
2804        case check_state_compute_result:
2805                sh->check_state = check_state_idle;
2806
2807                /* check that a write has not made the stripe insync */
2808                if (test_bit(STRIPE_INSYNC, &sh->state))
2809                        break;
2810
2811                /* now write out any block on a failed drive,
2812                 * or P or Q if they were recomputed
2813                 */
2814                BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */
2815                if (s->failed == 2) {
2816                        dev = &sh->dev[s->failed_num[1]];
2817                        s->locked++;
2818                        set_bit(R5_LOCKED, &dev->flags);
2819                        set_bit(R5_Wantwrite, &dev->flags);
2820                }
2821                if (s->failed >= 1) {
2822                        dev = &sh->dev[s->failed_num[0]];
2823                        s->locked++;
2824                        set_bit(R5_LOCKED, &dev->flags);
2825                        set_bit(R5_Wantwrite, &dev->flags);
2826                }
2827                if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
2828                        dev = &sh->dev[pd_idx];
2829                        s->locked++;
2830                        set_bit(R5_LOCKED, &dev->flags);
2831                        set_bit(R5_Wantwrite, &dev->flags);
2832                }
2833                if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
2834                        dev = &sh->dev[qd_idx];
2835                        s->locked++;
2836                        set_bit(R5_LOCKED, &dev->flags);
2837                        set_bit(R5_Wantwrite, &dev->flags);
2838                }
2839                clear_bit(STRIPE_DEGRADED, &sh->state);
2840
2841                set_bit(STRIPE_INSYNC, &sh->state);
2842                break;
2843        case check_state_run:
2844        case check_state_run_q:
2845        case check_state_run_pq:
2846                break; /* we will be called again upon completion */
2847        case check_state_check_result:
2848                sh->check_state = check_state_idle;
2849
2850                /* handle a successful check operation, if parity is correct
2851                 * we are done.  Otherwise update the mismatch count and repair
2852                 * parity if !MD_RECOVERY_CHECK
2853                 */
2854                if (sh->ops.zero_sum_result == 0) {
2855                        /* both parities are correct */
2856                        if (!s->failed)
2857                                set_bit(STRIPE_INSYNC, &sh->state);
2858                        else {
2859                                /* in contrast to the raid5 case we can validate
2860                                 * parity, but still have a failure to write
2861                                 * back
2862                                 */
2863                                sh->check_state = check_state_compute_result;
2864                                /* Returning at this point means that we may go
2865                                 * off and bring p and/or q uptodate again so
2866                                 * we make sure to check zero_sum_result again
2867                                 * to verify if p or q need writeback
2868                                 */
2869                        }
2870                } else {
2871                        conf->mddev->resync_mismatches += STRIPE_SECTORS;
2872                        if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery))
2873                                /* don't try to repair!! */
2874                                set_bit(STRIPE_INSYNC, &sh->state);
2875                        else {
2876                                int *target = &sh->ops.target;
2877
2878                                sh->ops.target = -1;
2879                                sh->ops.target2 = -1;
2880                                sh->check_state = check_state_compute_run;
2881                                set_bit(STRIPE_COMPUTE_RUN, &sh->state);
2882                                set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request);
2883                                if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) {
2884                                        set_bit(R5_Wantcompute,
2885                                                &sh->dev[pd_idx].flags);
2886                                        *target = pd_idx;
2887                                        target = &sh->ops.target2;
2888                                        s->uptodate++;
2889                                }
2890                                if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) {
2891                                        set_bit(R5_Wantcompute,
2892                                                &sh->dev[qd_idx].flags);
2893                                        *target = qd_idx;
2894                                        s->uptodate++;
2895                                }
2896                        }
2897                }
2898                break;
2899        case check_state_compute_run:
2900                break;
2901        default:
2902                printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n",
2903                       __func__, sh->check_state,
2904                       (unsigned long long) sh->sector);
2905                BUG();
2906        }
2907}
2908
2909static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh)
2910{
2911        int i;
2912
2913        /* We have read all the blocks in this stripe and now we need to
2914         * copy some of them into a target stripe for expand.
2915         */
2916        struct dma_async_tx_descriptor *tx = NULL;
2917        clear_bit(STRIPE_EXPAND_SOURCE, &sh->state);
2918        for (i = 0; i < sh->disks; i++)
2919                if (i != sh->pd_idx && i != sh->qd_idx) {
2920                        int dd_idx, j;
2921                        struct stripe_head *sh2;
2922                        struct async_submit_ctl submit;
2923
2924                        sector_t bn = compute_blocknr(sh, i, 1);
2925                        sector_t s = raid5_compute_sector(conf, bn, 0,
2926                                                          &dd_idx, NULL);
2927                        sh2 = get_active_stripe(conf, s, 0, 1, 1);
2928                        if (sh2 == NULL)
2929                                /* so far only the early blocks of this stripe
2930                                 * have been requested.  When later blocks
2931                                 * get requested, we will try again
2932                                 */
2933                                continue;
2934                        if (!test_bit(STRIPE_EXPANDING, &sh2->state) ||
2935                           test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) {
2936                                /* must have already done this block */
2937                                release_stripe(sh2);
2938                                continue;
2939                        }
2940
2941                        /* place all the copies on one channel */
2942                        init_async_submit(&submit, 0, tx, NULL, NULL, NULL);
2943                        tx = async_memcpy(sh2->dev[dd_idx].page,
2944                                          sh->dev[i].page, 0, 0, STRIPE_SIZE,
2945                                          &submit);
2946
2947                        set_bit(R5_Expanded, &sh2->dev[dd_idx].flags);
2948                        set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags);
2949                        for (j = 0; j < conf->raid_disks; j++)
2950                                if (j != sh2->pd_idx &&
2951                                    j != sh2->qd_idx &&
2952                                    !test_bit(R5_Expanded, &sh2->dev[j].flags))
2953                                        break;
2954                        if (j == conf->raid_disks) {
2955                                set_bit(STRIPE_EXPAND_READY, &sh2->state);
2956                                set_bit(STRIPE_HANDLE, &sh2->state);
2957                        }
2958                        release_stripe(sh2);
2959
2960                }
2961        /* done submitting copies, wait for them to complete */
2962        if (tx) {
2963                async_tx_ack(tx);
2964                dma_wait_for_async_tx(tx);
2965        }
2966}
2967
2968
2969/*
2970 * handle_stripe - do things to a stripe.
2971 *
2972 * We lock the stripe and then examine the state of various bits
2973 * to see what needs to be done.
2974 * Possible results:
2975 *    return some read request which now have data
2976 *    return some write requests which are safely on disc
2977 *    schedule a read on some buffers
2978 *    schedule a write of some buffers
2979 *    return confirmation of parity correctness
2980 *
2981 * buffers are taken off read_list or write_list, and bh_cache buffers
2982 * get BH_Lock set before the stripe lock is released.
2983 *
2984 */
2985
2986static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
2987{
2988        raid5_conf_t *conf = sh->raid_conf;
2989        int disks = sh->disks;
2990        struct r5dev *dev;
2991        int i;
2992
2993        memset(s, 0, sizeof(*s));
2994
2995        s->syncing = test_bit(STRIPE_SYNCING, &sh->state);
2996        s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
2997        s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
2998        s->failed_num[0] = -1;
2999        s->failed_num[1] = -1;
3000
3001        /* Now to look around and see what can be done */
3002        rcu_read_lock();
3003        spin_lock_irq(&conf->device_lock);
3004        for (i=disks; i--; ) {
3005                mdk_rdev_t *rdev;
3006                sector_t first_bad;
3007                int bad_sectors;
3008                int is_bad = 0;
3009
3010                dev = &sh->dev[i];
3011
3012                pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
3013                        i, dev->flags, dev->toread, dev->towrite, dev->written);
3014                /* maybe we can reply to a read
3015                 *
3016                 * new wantfill requests are only permitted while
3017                 * ops_complete_biofill is guaranteed to be inactive
3018                 */
3019                if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread &&
3020                    !test_bit(STRIPE_BIOFILL_RUN, &sh->state))
3021                        set_bit(R5_Wantfill, &dev->flags);
3022
3023                /* now count some things */
3024                if (test_bit(R5_LOCKED, &dev->flags))
3025                        s->locked++;
3026                if (test_bit(R5_UPTODATE, &dev->flags))
3027                        s->uptodate++;
3028                if (test_bit(R5_Wantcompute, &dev->flags)) {
3029                        s->compute++;
3030                        BUG_ON(s->compute > 2);
3031                }
3032
3033                if (test_bit(R5_Wantfill, &dev->flags))
3034                        s->to_fill++;
3035                else if (dev->toread)
3036                        s->to_read++;
3037                if (dev->towrite) {
3038                        s->to_write++;
3039                        if (!test_bit(R5_OVERWRITE, &dev->flags))
3040                                s->non_overwrite++;
3041                }
3042                if (dev->written)
3043                        s->written++;
3044                rdev = rcu_dereference(conf->disks[i].rdev);
3045                if (rdev) {
3046                        is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
3047                                             &first_bad, &bad_sectors);
3048                        if (s->blocked_rdev == NULL
3049                            && (test_bit(Blocked, &rdev->flags)
3050                                || is_bad < 0)) {
3051                                if (is_bad < 0)
3052                                        set_bit(BlockedBadBlocks,
3053                                                &rdev->flags);
3054                                s->blocked_rdev = rdev;
3055                                atomic_inc(&rdev->nr_pending);
3056                        }
3057                }
3058                clear_bit(R5_Insync, &dev->flags);
3059                if (!rdev)
3060                        /* Not in-sync */;
3061                else if (is_bad) {
3062                        /* also not in-sync */
3063                        if (!test_bit(WriteErrorSeen, &rdev->flags)) {
3064                                /* treat as in-sync, but with a read error
3065                                 * which we can now try to correct
3066                                 */
3067                                set_bit(R5_Insync, &dev->flags);
3068                                set_bit(R5_ReadError, &dev->flags);
3069                        }
3070                } else if (test_bit(In_sync, &rdev->flags))
3071                        set_bit(R5_Insync, &dev->flags);
3072                else if (!test_bit(Faulty, &rdev->flags)) {
3073                        /* in sync if before recovery_offset */
3074                        if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
3075                                set_bit(R5_Insync, &dev->flags);
3076                }
3077                if (test_bit(R5_WriteError, &dev->flags)) {
3078                        clear_bit(R5_Insync, &dev->flags);
3079                        if (!test_bit(Faulty, &rdev->flags)) {
3080                                s->handle_bad_blocks = 1;
3081                                atomic_inc(&rdev->nr_pending);
3082                        } else
3083                                clear_bit(R5_WriteError, &dev->flags);
3084                }
3085                if (test_bit(R5_MadeGood, &dev->flags)) {
3086                        if (!test_bit(Faulty, &rdev->flags)) {
3087                                s->handle_bad_blocks = 1;
3088                                atomic_inc(&rdev->nr_pending);
3089                        } else
3090                                clear_bit(R5_MadeGood, &dev->flags);
3091                }
3092                if (!test_bit(R5_Insync, &dev->flags)) {
3093                        /* The ReadError flag will just be confusing now */
3094                        clear_bit(R5_ReadError, &dev->flags);
3095                        clear_bit(R5_ReWrite, &dev->flags);
3096                }
3097                if (test_bit(R5_ReadError, &dev->flags))
3098                        clear_bit(R5_Insync, &dev->flags);
3099                if (!test_bit(R5_Insync, &dev->flags)) {
3100                        if (s->failed < 2)
3101                                s->failed_num[s->failed] = i;
3102                        s->failed++;
3103                }
3104        }
3105        spin_unlock_irq(&conf->device_lock);
3106        rcu_read_unlock();
3107}
3108
3109static void handle_stripe(struct stripe_head *sh)
3110{
3111        struct stripe_head_state s;
3112        raid5_conf_t *conf = sh->raid_conf;
3113        int i;
3114        int prexor;
3115        int disks = sh->disks;
3116        struct r5dev *pdev, *qdev;
3117
3118        clear_bit(STRIPE_HANDLE, &sh->state);
3119        if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) {
3120                /* already being handled, ensure it gets handled
3121                 * again when current action finishes */
3122                set_bit(STRIPE_HANDLE, &sh->state);
3123                return;
3124        }
3125
3126        if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
3127                set_bit(STRIPE_SYNCING, &sh->state);
3128                clear_bit(STRIPE_INSYNC, &sh->state);
3129        }
3130        clear_bit(STRIPE_DELAYED, &sh->state);
3131
3132        pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
3133                "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
3134               (unsigned long long)sh->sector, sh->state,
3135               atomic_read(&sh->count), sh->pd_idx, sh->qd_idx,
3136               sh->check_state, sh->reconstruct_state);
3137
3138        analyse_stripe(sh, &s);
3139
3140        if (s.handle_bad_blocks) {
3141                set_bit(STRIPE_HANDLE, &sh->state);
3142                goto finish;
3143        }
3144
3145        if (unlikely(s.blocked_rdev)) {
3146                if (s.syncing || s.expanding || s.expanded ||
3147                    s.to_write || s.written) {
3148                        set_bit(STRIPE_HANDLE, &sh->state);
3149                        goto finish;
3150                }
3151                /* There is nothing for the blocked_rdev to block */
3152                rdev_dec_pending(s.blocked_rdev, conf->mddev);
3153                s.blocked_rdev = NULL;
3154        }
3155
3156        if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) {
3157                set_bit(STRIPE_OP_BIOFILL, &s.ops_request);
3158                set_bit(STRIPE_BIOFILL_RUN, &sh->state);
3159        }
3160
3161        pr_debug("locked=%d uptodate=%d to_read=%d"
3162               " to_write=%d failed=%d failed_num=%d,%d\n",
3163               s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
3164               s.failed_num[0], s.failed_num[1]);
3165        /* check if the array has lost more than max_degraded devices and,
3166         * if so, some requests might need to be failed.
3167         */
3168        if (s.failed > conf->max_degraded) {
3169                sh->check_state = 0;
3170                sh->reconstruct_state = 0;
3171                if (s.to_read+s.to_write+s.written)
3172                        handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
3173                if (s.syncing)
3174                        handle_failed_sync(conf, sh, &s);
3175        }
3176
3177        /*
3178         * might be able to return some write requests if the parity blocks
3179         * are safe, or on a failed drive
3180         */
3181        pdev = &sh->dev[sh->pd_idx];
3182        s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx)
3183                || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx);
3184        qdev = &sh->dev[sh->qd_idx];
3185        s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx)
3186                || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx)
3187                || conf->level < 6;
3188
3189        if (s.written &&
3190            (s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
3191                             && !test_bit(R5_LOCKED, &pdev->flags)
3192                             && test_bit(R5_UPTODATE, &pdev->flags)))) &&
3193            (s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
3194                             && !test_bit(R5_LOCKED, &qdev->flags)
3195                             && test_bit(R5_UPTODATE, &qdev->flags)))))
3196                handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
3197
3198        /* Now we might consider reading some blocks, either to check/generate
3199         * parity, or to satisfy requests
3200         * or to load a block that is being partially written.
3201         */
3202        if (s.to_read || s.non_overwrite
3203            || (conf->level == 6 && s.to_write && s.failed)
3204            || (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding)
3205                handle_stripe_fill(sh, &s, disks);
3206
3207        /* Now we check to see if any write operations have recently
3208         * completed
3209         */
3210        prexor = 0;
3211        if (sh->reconstruct_state == reconstruct_state_prexor_drain_result)
3212                prexor = 1;
3213        if (sh->reconstruct_state == reconstruct_state_drain_result ||
3214            sh->reconstruct_state == reconstruct_state_prexor_drain_result) {
3215                sh->reconstruct_state = reconstruct_state_idle;
3216
3217                /* All the 'written' buffers and the parity block are ready to
3218                 * be written back to disk
3219                 */
3220                BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags));
3221                BUG_ON(sh->qd_idx >= 0 &&
3222                       !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags));
3223                for (i = disks; i--; ) {
3224                        struct r5dev *dev = &sh->dev[i];
3225                        if (test_bit(R5_LOCKED, &dev->flags) &&
3226                                (i == sh->pd_idx || i == sh->qd_idx ||
3227                                 dev->written)) {
3228                                pr_debug("Writing block %d\n", i);
3229                                set_bit(R5_Wantwrite, &dev->flags);
3230                                if (prexor)
3231                                        continue;
3232                                if (!test_bit(R5_Insync, &dev->flags) ||
3233                                    ((i == sh->pd_idx || i == sh->qd_idx)  &&
3234                                     s.failed == 0))
3235                                        set_bit(STRIPE_INSYNC, &sh->state);
3236                        }
3237                }
3238                if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3239                        s.dec_preread_active = 1;
3240        }
3241
3242        /* Now to consider new write requests and what else, if anything
3243         * should be read.  We do not handle new writes when:
3244         * 1/ A 'write' operation (copy+xor) is already in flight.
3245         * 2/ A 'check' operation is in flight, as it may clobber the parity
3246         *    block.
3247         */
3248        if (s.to_write && !sh->reconstruct_state && !sh->check_state)
3249                handle_stripe_dirtying(conf, sh, &s, disks);
3250
3251        /* maybe we need to check and possibly fix the parity for this stripe
3252         * Any reads will already have been scheduled, so we just see if enough
3253         * data is available.  The parity check is held off while parity
3254         * dependent operations are in flight.
3255         */
3256        if (sh->check_state ||
3257            (s.syncing && s.locked == 0 &&
3258             !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
3259             !test_bit(STRIPE_INSYNC, &sh->state))) {
3260                if (conf->level == 6)
3261                        handle_parity_checks6(conf, sh, &s, disks);
3262                else
3263                        handle_parity_checks5(conf, sh, &s, disks);
3264        }
3265
3266        if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
3267                md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3268                clear_bit(STRIPE_SYNCING, &sh->state);
3269        }
3270
3271        /* If the failed drives are just a ReadError, then we might need
3272         * to progress the repair/check process
3273         */
3274        if (s.failed <= conf->max_degraded && !conf->mddev->ro)
3275                for (i = 0; i < s.failed; i++) {
3276                        struct r5dev *dev = &sh->dev[s.failed_num[i]];
3277                        if (test_bit(R5_ReadError, &dev->flags)
3278                            && !test_bit(R5_LOCKED, &dev->flags)
3279                            && test_bit(R5_UPTODATE, &dev->flags)
3280                                ) {
3281                                if (!test_bit(R5_ReWrite, &dev->flags)) {
3282                                        set_bit(R5_Wantwrite, &dev->flags);
3283                                        set_bit(R5_ReWrite, &dev->flags);
3284                                        set_bit(R5_LOCKED, &dev->flags);
3285                                        s.locked++;
3286                                } else {
3287                                        /* let's read it back */
3288                                        set_bit(R5_Wantread, &dev->flags);
3289                                        set_bit(R5_LOCKED, &dev->flags);
3290                                        s.locked++;
3291                                }
3292                        }
3293                }
3294
3295
3296        /* Finish reconstruct operations initiated by the expansion process */
3297        if (sh->reconstruct_state == reconstruct_state_result) {
3298                struct stripe_head *sh_src
3299                        = get_active_stripe(conf, sh->sector, 1, 1, 1);
3300                if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) {
3301                        /* sh cannot be written until sh_src has been read.
3302                         * so arrange for sh to be delayed a little
3303                         */
3304                        set_bit(STRIPE_DELAYED, &sh->state);
3305                        set_bit(STRIPE_HANDLE, &sh->state);
3306                        if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE,
3307                                              &sh_src->state))
3308                                atomic_inc(&conf->preread_active_stripes);
3309                        release_stripe(sh_src);
3310                        goto finish;
3311                }
3312                if (sh_src)
3313                        release_stripe(sh_src);
3314
3315                sh->reconstruct_state = reconstruct_state_idle;
3316                clear_bit(STRIPE_EXPANDING, &sh->state);
3317                for (i = conf->raid_disks; i--; ) {
3318                        set_bit(R5_Wantwrite, &sh->dev[i].flags);
3319                        set_bit(R5_LOCKED, &sh->dev[i].flags);
3320                        s.locked++;
3321                }
3322        }
3323
3324        if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) &&
3325            !sh->reconstruct_state) {
3326                /* Need to write out all blocks after computing parity */
3327                sh->disks = conf->raid_disks;
3328                stripe_set_idx(sh->sector, conf, 0, sh);
3329                schedule_reconstruction(sh, &s, 1, 1);
3330        } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) {
3331                clear_bit(STRIPE_EXPAND_READY, &sh->state);
3332                atomic_dec(&conf->reshape_stripes);
3333                wake_up(&conf->wait_for_overlap);
3334                md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
3335        }
3336
3337        if (s.expanding && s.locked == 0 &&
3338            !test_bit(STRIPE_COMPUTE_RUN, &sh->state))
3339                handle_stripe_expansion(conf, sh);
3340
3341finish:
3342        /* wait for this device to become unblocked */
3343        if (conf->mddev->external && unlikely(s.blocked_rdev))
3344                md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev);
3345
3346        if (s.handle_bad_blocks)
3347                for (i = disks; i--; ) {
3348                        mdk_rdev_t *rdev;
3349                        struct r5dev *dev = &sh->dev[i];
3350                        if (test_and_clear_bit(R5_WriteError, &dev->flags)) {
3351                                /* We own a safe reference to the rdev */
3352                                rdev = conf->disks[i].rdev;
3353                                if (!rdev_set_badblocks(rdev, sh->sector,
3354                                                        STRIPE_SECTORS, 0))
3355                                        md_error(conf->mddev, rdev);
3356                                rdev_dec_pending(rdev, conf->mddev);
3357                        }
3358                        if (test_and_clear_bit(R5_MadeGood, &dev->flags)) {
3359                                rdev = conf->disks[i].rdev;
3360                                rdev_clear_badblocks(rdev, sh->sector,
3361                                                     STRIPE_SECTORS);
3362                                rdev_dec_pending(rdev, conf->mddev);
3363                        }
3364                }
3365
3366        if (s.ops_request)
3367                raid_run_ops(sh, s.ops_request);
3368
3369        ops_run_io(sh, &s);
3370
3371        if (s.dec_preread_active) {
3372                /* We delay this until after ops_run_io so that if make_request
3373                 * is waiting on a flush, it won't continue until the writes
3374                 * have actually been submitted.
3375                 */
3376                atomic_dec(&conf->preread_active_stripes);
3377                if (atomic_read(&conf->preread_active_stripes) <
3378                    IO_THRESHOLD)
3379                        md_wakeup_thread(conf->mddev->thread);
3380        }
3381
3382        return_io(s.return_bi);
3383
3384        clear_bit_unlock(STRIPE_ACTIVE, &sh->state);
3385}
3386
3387static void raid5_activate_delayed(raid5_conf_t *conf)
3388{
3389        if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
3390                while (!list_empty(&conf->delayed_list)) {
3391                        struct list_head *l = conf->delayed_list.next;
3392                        struct stripe_head *sh;
3393                        sh = list_entry(l, struct stripe_head, lru);
3394                        list_del_init(l);
3395                        clear_bit(STRIPE_DELAYED, &sh->state);
3396                        if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3397                                atomic_inc(&conf->preread_active_stripes);
3398                        list_add_tail(&sh->lru, &conf->hold_list);
3399                }
3400        }
3401}
3402
3403static void activate_bit_delay(raid5_conf_t *conf)
3404{
3405        /* device_lock is held */
3406        struct list_head head;
3407        list_add(&head, &conf->bitmap_list);
3408        list_del_init(&conf->bitmap_list);
3409        while (!list_empty(&head)) {
3410                struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru);
3411                list_del_init(&sh->lru);
3412                atomic_inc(&sh->count);
3413                __release_stripe(conf, sh);
3414        }
3415}
3416
3417int md_raid5_congested(mddev_t *mddev, int bits)
3418{
3419        raid5_conf_t *conf = mddev->private;
3420
3421        /* No difference between reads and writes.  Just check
3422         * how busy the stripe_cache is
3423         */
3424
3425        if (conf->inactive_blocked)
3426                return 1;
3427        if (conf->quiesce)
3428                return 1;
3429        if (list_empty_careful(&conf->inactive_list))
3430                return 1;
3431
3432        return 0;
3433}
3434EXPORT_SYMBOL_GPL(md_raid5_congested);
3435
3436static int raid5_congested(void *data, int bits)
3437{
3438        mddev_t *mddev = data;
3439
3440        return mddev_congested(mddev, bits) ||
3441                md_raid5_congested(mddev, bits);
3442}
3443
3444/* We want read requests to align with chunks where possible,
3445 * but write requests don't need to.
3446 */
3447static int raid5_mergeable_bvec(struct request_queue *q,
3448                                struct bvec_merge_data *bvm,
3449                                struct bio_vec *biovec)
3450{
3451        mddev_t *mddev = q->queuedata;
3452        sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
3453        int max;
3454        unsigned int chunk_sectors = mddev->chunk_sectors;
3455        unsigned int bio_sectors = bvm->bi_size >> 9;
3456
3457        if ((bvm->bi_rw & 1) == WRITE)
3458                return biovec->bv_len; /* always allow writes to be mergeable */
3459
3460        if (mddev->new_chunk_sectors < mddev->chunk_sectors)
3461                chunk_sectors = mddev->new_chunk_sectors;
3462        max =  (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
3463        if (max < 0) max = 0;
3464        if (max <= biovec->bv_len && bio_sectors == 0)
3465                return biovec->bv_len;
3466        else
3467                return max;
3468}
3469
3470
3471static int in_chunk_boundary(mddev_t *mddev, struct bio *bio)
3472{
3473        sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
3474        unsigned int chunk_sectors = mddev->chunk_sectors;
3475        unsigned int bio_sectors = bio->bi_size >> 9;
3476
3477        if (mddev->new_chunk_sectors < mddev->chunk_sectors)
3478                chunk_sectors = mddev->new_chunk_sectors;
3479        return  chunk_sectors >=
3480                ((sector & (chunk_sectors - 1)) + bio_sectors);
3481}
3482
3483/*
3484 *  add bio to the retry LIFO  ( in O(1) ... we are in interrupt )
3485 *  later sampled by raid5d.
3486 */
3487static void add_bio_to_retry(struct bio *bi,raid5_conf_t *conf)
3488{
3489        unsigned long flags;
3490
3491        spin_lock_irqsave(&conf->device_lock, flags);
3492
3493        bi->bi_next = conf->retry_read_aligned_list;
3494        conf->retry_read_aligned_list = bi;
3495
3496        spin_unlock_irqrestore(&conf->device_lock, flags);
3497        md_wakeup_thread(conf->mddev->thread);
3498}
3499
3500
3501static struct bio *remove_bio_from_retry(raid5_conf_t *conf)
3502{
3503        struct bio *bi;
3504
3505        bi = conf->retry_read_aligned;
3506        if (bi) {
3507                conf->retry_read_aligned = NULL;
3508                return bi;
3509        }
3510        bi = conf->retry_read_aligned_list;
3511        if(bi) {
3512                conf->retry_read_aligned_list = bi->bi_next;
3513                bi->bi_next = NULL;
3514                /*
3515                 * this sets the active strip count to 1 and the processed
3516                 * strip count to zero (upper 8 bits)
3517                 */
3518                bi->bi_phys_segments = 1; /* biased count of active stripes */
3519        }
3520
3521        return bi;
3522}
3523
3524
3525/*
3526 *  The "raid5_align_endio" should check if the read succeeded and if it
3527 *  did, call bio_endio on the original bio (having bio_put the new bio
3528 *  first).
3529 *  If the read failed..
3530 */
3531static void raid5_align_endio(struct bio *bi, int error)
3532{
3533        struct bio* raid_bi  = bi->bi_private;
3534        mddev_t *mddev;
3535        raid5_conf_t *conf;
3536        int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
3537        mdk_rdev_t *rdev;
3538
3539        bio_put(bi);
3540
3541        rdev = (void*)raid_bi->bi_next;
3542        raid_bi->bi_next = NULL;
3543        mddev = rdev->mddev;
3544        conf = mddev->private;
3545
3546        rdev_dec_pending(rdev, conf->mddev);
3547
3548        if (!error && uptodate) {
3549                bio_endio(raid_bi, 0);
3550                if (atomic_dec_and_test(&conf->active_aligned_reads))
3551                        wake_up(&conf->wait_for_stripe);
3552                return;
3553        }
3554
3555
3556        pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
3557
3558        add_bio_to_retry(raid_bi, conf);
3559}
3560
3561static int bio_fits_rdev(struct bio *bi)
3562{
3563        struct request_queue *q = bdev_get_queue(bi->bi_bdev);
3564
3565        if ((bi->bi_size>>9) > queue_max_sectors(q))
3566                return 0;
3567        blk_recount_segments(q, bi);
3568        if (bi->bi_phys_segments > queue_max_segments(q))
3569                return 0;
3570
3571        if (q->merge_bvec_fn)
3572                /* it's too hard to apply the merge_bvec_fn at this stage,
3573                 * just just give up
3574                 */
3575                return 0;
3576
3577        return 1;
3578}
3579
3580
3581static int chunk_aligned_read(mddev_t *mddev, struct bio * raid_bio)
3582{
3583        raid5_conf_t *conf = mddev->private;
3584        int dd_idx;
3585        struct bio* align_bi;
3586        mdk_rdev_t *rdev;
3587
3588        if (!in_chunk_boundary(mddev, raid_bio)) {
3589                pr_debug("chunk_aligned_read : non aligned\n");
3590                return 0;
3591        }
3592        /*
3593         * use bio_clone_mddev to make a copy of the bio
3594         */
3595        align_bi = bio_clone_mddev(raid_bio, GFP_NOIO, mddev);
3596        if (!align_bi)
3597                return 0;
3598        /*
3599         *   set bi_end_io to a new function, and set bi_private to the
3600         *     original bio.
3601         */
3602        align_bi->bi_end_io  = raid5_align_endio;
3603        align_bi->bi_private = raid_bio;
3604        /*
3605         *      compute position
3606         */
3607        align_bi->bi_sector =  raid5_compute_sector(conf, raid_bio->bi_sector,
3608                                                    0,
3609                                                    &dd_idx, NULL);
3610
3611        rcu_read_lock();
3612        rdev = rcu_dereference(conf->disks[dd_idx].rdev);
3613        if (rdev && test_bit(In_sync, &rdev->flags)) {
3614                sector_t first_bad;
3615                int bad_sectors;
3616
3617                atomic_inc(&rdev->nr_pending);
3618                rcu_read_unlock();
3619                raid_bio->bi_next = (void*)rdev;
3620                align_bi->bi_bdev =  rdev->bdev;
3621                align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
3622                align_bi->bi_sector += rdev->data_offset;
3623
3624                if (!bio_fits_rdev(align_bi) ||
3625                    is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9,
3626                                &first_bad, &bad_sectors)) {
3627                        /* too big in some way, or has a known bad block */
3628                        bio_put(align_bi);
3629                        rdev_dec_pending(rdev, mddev);
3630                        return 0;
3631                }
3632
3633                spin_lock_irq(&conf->device_lock);
3634                wait_event_lock_irq(conf->wait_for_stripe,
3635                                    conf->quiesce == 0,
3636                                    conf->device_lock, /* nothing */);
3637                atomic_inc(&conf->active_aligned_reads);
3638                spin_unlock_irq(&conf->device_lock);
3639
3640                generic_make_request(align_bi);
3641                return 1;
3642        } else {
3643                rcu_read_unlock();
3644                bio_put(align_bi);
3645                return 0;
3646        }
3647}
3648
3649/* __get_priority_stripe - get the next stripe to process
3650 *
3651 * Full stripe writes are allowed to pass preread active stripes up until
3652 * the bypass_threshold is exceeded.  In general the bypass_count
3653 * increments when the handle_list is handled before the hold_list; however, it
3654 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
3655 * stripe with in flight i/o.  The bypass_count will be reset when the
3656 * head of the hold_list has changed, i.e. the head was promoted to the
3657 * handle_list.
3658 */
3659static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf)
3660{
3661        struct stripe_head *sh;
3662
3663        pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
3664                  __func__,
3665                  list_empty(&conf->handle_list) ? "empty" : "busy",
3666                  list_empty(&conf->hold_list) ? "empty" : "busy",
3667                  atomic_read(&conf->pending_full_writes), conf->bypass_count);
3668
3669        if (!list_empty(&conf->handle_list)) {
3670                sh = list_entry(conf->handle_list.next, typeof(*sh), lru);
3671
3672                if (list_empty(&conf->hold_list))
3673                        conf->bypass_count = 0;
3674                else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) {
3675                        if (conf->hold_list.next == conf->last_hold)
3676                                conf->bypass_count++;
3677                        else {
3678                                conf->last_hold = conf->hold_list.next;
3679                                conf->bypass_count -= conf->bypass_threshold;
3680                                if (conf->bypass_count < 0)
3681                                        conf->bypass_count = 0;
3682                        }
3683                }
3684        } else if (!list_empty(&conf->hold_list) &&
3685                   ((conf->bypass_threshold &&
3686                     conf->bypass_count > conf->bypass_threshold) ||
3687                    atomic_read(&conf->pending_full_writes) == 0)) {
3688                sh = list_entry(conf->hold_list.next,
3689                                typeof(*sh), lru);
3690                conf->bypass_count -= conf->bypass_threshold;
3691                if (conf->bypass_count < 0)
3692                        conf->bypass_count = 0;
3693        } else
3694                return NULL;
3695
3696        list_del_init(&sh->lru);
3697        atomic_inc(&sh->count);
3698        BUG_ON(atomic_read(&sh->count) != 1);
3699        return sh;
3700}
3701
3702static int make_request(mddev_t *mddev, struct bio * bi)
3703{
3704        raid5_conf_t *conf = mddev->private;
3705        int dd_idx;
3706        sector_t new_sector;
3707        sector_t logical_sector, last_sector;
3708        struct stripe_head *sh;
3709        const int rw = bio_data_dir(bi);
3710        int remaining;
3711        int plugged;
3712
3713        if (unlikely(bi->bi_rw & REQ_FLUSH)) {
3714                md_flush_request(mddev, bi);
3715                return 0;
3716        }
3717
3718        md_write_start(mddev, bi);
3719
3720        if (rw == READ &&
3721             mddev->reshape_position == MaxSector &&
3722             chunk_aligned_read(mddev,bi))
3723                return 0;
3724
3725        logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
3726        last_sector = bi->bi_sector + (bi->bi_size>>9);
3727        bi->bi_next = NULL;
3728        bi->bi_phys_segments = 1;       /* over-loaded to count active stripes */
3729
3730        plugged = mddev_check_plugged(mddev);
3731        for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
3732                DEFINE_WAIT(w);
3733                int disks, data_disks;
3734                int previous;
3735
3736        retry:
3737                previous = 0;
3738                disks = conf->raid_disks;
3739                prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
3740                if (unlikely(conf->reshape_progress != MaxSector)) {
3741                        /* spinlock is needed as reshape_progress may be
3742                         * 64bit on a 32bit platform, and so it might be
3743                         * possible to see a half-updated value
3744                         * Of course reshape_progress could change after
3745                         * the lock is dropped, so once we get a reference
3746                         * to the stripe that we think it is, we will have
3747                         * to check again.
3748                         */
3749                        spin_lock_irq(&conf->device_lock);
3750                        if (mddev->delta_disks < 0
3751                            ? logical_sector < conf->reshape_progress
3752                            : logical_sector >= conf->reshape_progress) {
3753                                disks = conf->previous_raid_disks;
3754                                previous = 1;
3755                        } else {
3756                                if (mddev->delta_disks < 0
3757                                    ? logical_sector < conf->reshape_safe
3758                                    : logical_sector >= conf->reshape_safe) {
3759                                        spin_unlock_irq(&conf->device_lock);
3760                                        schedule();
3761                                        goto retry;
3762                                }
3763                        }
3764                        spin_unlock_irq(&conf->device_lock);
3765                }
3766                data_disks = disks - conf->max_degraded;
3767
3768                new_sector = raid5_compute_sector(conf, logical_sector,
3769                                                  previous,
3770                                                  &dd_idx, NULL);
3771                pr_debug("raid456: make_request, sector %llu logical %llu\n",
3772                        (unsigned long long)new_sector, 
3773                        (unsigned long long)logical_sector);
3774
3775                sh = get_active_stripe(conf, new_sector, previous,
3776                                       (bi->bi_rw&RWA_MASK), 0);
3777                if (sh) {
3778                        if (unlikely(previous)) {
3779                                /* expansion might have moved on while waiting for a
3780                                 * stripe, so we must do the range check again.
3781                                 * Expansion could still move past after this
3782                                 * test, but as we are holding a reference to
3783                                 * 'sh', we know that if that happens,
3784                                 *  STRIPE_EXPANDING will get set and the expansion
3785                                 * won't proceed until we finish with the stripe.
3786                                 */
3787                                int must_retry = 0;
3788                                spin_lock_irq(&conf->device_lock);
3789                                if (mddev->delta_disks < 0
3790                                    ? logical_sector >= conf->reshape_progress
3791                                    : logical_sector < conf->reshape_progress)
3792                                        /* mismatch, need to try again */
3793                                        must_retry = 1;
3794                                spin_unlock_irq(&conf->device_lock);
3795                                if (must_retry) {
3796                                        release_stripe(sh);
3797                                        schedule();
3798                                        goto retry;
3799                                }
3800                        }
3801
3802                        if (rw == WRITE &&
3803                            logical_sector >= mddev->suspend_lo &&
3804                            logical_sector < mddev->suspend_hi) {
3805                                release_stripe(sh);
3806                                /* As the suspend_* range is controlled by
3807                                 * userspace, we want an interruptible
3808                                 * wait.
3809                                 */
3810                                flush_signals(current);
3811                                prepare_to_wait(&conf->wait_for_overlap,
3812                                                &w, TASK_INTERRUPTIBLE);
3813                                if (logical_sector >= mddev->suspend_lo &&
3814                                    logical_sector < mddev->suspend_hi)
3815                                        schedule();
3816                                goto retry;
3817                        }
3818
3819                        if (test_bit(STRIPE_EXPANDING, &sh->state) ||
3820                            !add_stripe_bio(sh, bi, dd_idx, rw)) {
3821                                /* Stripe is busy expanding or
3822                                 * add failed due to overlap.  Flush everything
3823                                 * and wait a while
3824                                 */
3825                                md_wakeup_thread(mddev->thread);
3826                                release_stripe(sh);
3827                                schedule();
3828                                goto retry;
3829                        }
3830                        finish_wait(&conf->wait_for_overlap, &w);
3831                        set_bit(STRIPE_HANDLE, &sh->state);
3832                        clear_bit(STRIPE_DELAYED, &sh->state);
3833                        if ((bi->bi_rw & REQ_SYNC) &&
3834                            !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
3835                                atomic_inc(&conf->preread_active_stripes);
3836                        release_stripe(sh);
3837                } else {
3838                        /* cannot get stripe for read-ahead, just give-up */
3839                        clear_bit(BIO_UPTODATE, &bi->bi_flags);
3840                        finish_wait(&conf->wait_for_overlap, &w);
3841                        break;
3842                }
3843                        
3844        }
3845        if (!plugged)
3846                md_wakeup_thread(mddev->thread);
3847
3848        spin_lock_irq(&conf->device_lock);
3849        remaining = raid5_dec_bi_phys_segments(bi);
3850        spin_unlock_irq(&conf->device_lock);
3851        if (remaining == 0) {
3852
3853                if ( rw == WRITE )
3854                        md_write_end(mddev);
3855
3856                bio_endio(bi, 0);
3857        }
3858
3859        return 0;
3860}
3861
3862static sector_t raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks);
3863
3864static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped)
3865{
3866        /* reshaping is quite different to recovery/resync so it is
3867         * handled quite separately ... here.
3868         *
3869         * On each call to sync_request, we gather one chunk worth of
3870         * destination stripes and flag them as expanding.
3871         * Then we find all the source stripes and request reads.
3872         * As the reads complete, handle_stripe will copy the data
3873         * into the destination stripe and release that stripe.
3874         */
3875        raid5_conf_t *conf = mddev->private;
3876        struct stripe_head *sh;
3877        sector_t first_sector, last_sector;
3878        int raid_disks = conf->previous_raid_disks;
3879        int data_disks = raid_disks - conf->max_degraded;
3880        int new_data_disks = conf->raid_disks - conf->max_degraded;
3881        int i;
3882        int dd_idx;
3883        sector_t writepos, readpos, safepos;
3884        sector_t stripe_addr;
3885        int reshape_sectors;
3886        struct list_head stripes;
3887
3888        if (sector_nr == 0) {
3889                /* If restarting in the middle, skip the initial sectors */
3890                if (mddev->delta_disks < 0 &&
3891                    conf->reshape_progress < raid5_size(mddev, 0, 0)) {
3892                        sector_nr = raid5_size(mddev, 0, 0)
3893                                - conf->reshape_progress;
3894                } else if (mddev->delta_disks >= 0 &&
3895                           conf->reshape_progress > 0)
3896                        sector_nr = conf->reshape_progress;
3897                sector_div(sector_nr, new_data_disks);
3898                if (sector_nr) {
3899                        mddev->curr_resync_completed = sector_nr;
3900                        sysfs_notify(&mddev->kobj, NULL, "sync_completed");
3901                        *skipped = 1;
3902                        return sector_nr;
3903                }
3904        }
3905
3906        /* We need to process a full chunk at a time.
3907         * If old and new chunk sizes differ, we need to process the
3908         * largest of these
3909         */
3910        if (mddev->new_chunk_sectors > mddev->chunk_sectors)
3911                reshape_sectors = mddev->new_chunk_sectors;
3912        else
3913                reshape_sectors = mddev->chunk_sectors;
3914
3915        /* we update the metadata when there is more than 3Meg
3916         * in the block range (that is rather arbitrary, should
3917         * probably be time based) or when the data about to be
3918         * copied would over-write the source of the data at
3919         * the front of the range.
3920         * i.e. one new_stripe along from reshape_progress new_maps
3921         * to after where reshape_safe old_maps to
3922         */
3923        writepos = conf->reshape_progress;
3924        sector_div(writepos, new_data_disks);
3925        readpos = conf->reshape_progress;
3926        sector_div(readpos, data_disks);
3927        safepos = conf->reshape_safe;
3928        sector_div(safepos, data_disks);
3929        if (mddev->delta_disks < 0) {
3930                writepos -= min_t(sector_t, reshape_sectors, writepos);
3931                readpos += reshape_sectors;
3932                safepos += reshape_sectors;
3933        } else {
3934                writepos += reshape_sectors;
3935                readpos -= min_t(sector_t, reshape_sectors, readpos);
3936                safepos -= min_t(sector_t, reshape_sectors, safepos);
3937        }
3938
3939        /* 'writepos' is the most advanced device address we might write.
3940         * 'readpos' is the least advanced device address we might read.
3941         * 'safepos' is the least address recorded in the metadata as having
3942         *     been reshaped.
3943         * If 'readpos' is behind 'writepos', then there is no way that we can
3944         * ensure safety in the face of a crash - that must be done by userspace
3945         * making a backup of the data.  So in that case there is no particular
3946         * rush to update metadata.
3947         * Otherwise if 'safepos' is behind 'writepos', then we really need to
3948         * update the metadata to advance 'safepos' to match 'readpos' so that
3949         * we can be safe in the event of a crash.
3950         * So we insist on updating metadata if safepos is behind writepos and
3951         * readpos is beyond writepos.
3952         * In any case, update the metadata every 10 seconds.
3953         * Maybe that number should be configurable, but I'm not sure it is
3954         * worth it.... maybe it could be a multiple of safemode_delay???
3955         */
3956        if ((mddev->delta_disks < 0
3957             ? (safepos > writepos && readpos < writepos)
3958             : (safepos < writepos && readpos > writepos)) ||
3959            time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
3960                /* Cannot proceed until we've updated the superblock... */
3961                wait_event(conf->wait_for_overlap,
3962                           atomic_read(&conf->reshape_stripes)==0);
3963                mddev->reshape_position = conf->reshape_progress;
3964                mddev->curr_resync_completed = sector_nr;
3965                conf->reshape_checkpoint = jiffies;
3966                set_bit(MD_CHANGE_DEVS, &mddev->flags);
3967                md_wakeup_thread(mddev->thread);
3968                wait_event(mddev->sb_wait, mddev->flags == 0 ||
3969                           kthread_should_stop());
3970                spin_lock_irq(&conf->device_lock);
3971                conf->reshape_safe = mddev->reshape_position;
3972                spin_unlock_irq(&conf->device_lock);
3973                wake_up(&conf->wait_for_overlap);
3974                sysfs_notify(&mddev->kobj, NULL, "sync_completed");
3975        }
3976
3977        if (mddev->delta_disks < 0) {
3978                BUG_ON(conf->reshape_progress == 0);
3979                stripe_addr = writepos;
3980                BUG_ON((mddev->dev_sectors &
3981                        ~((sector_t)reshape_sectors - 1))
3982                       - reshape_sectors - stripe_addr
3983                       != sector_nr);
3984        } else {
3985                BUG_ON(writepos != sector_nr + reshape_sectors);
3986                stripe_addr = sector_nr;
3987        }
3988        INIT_LIST_HEAD(&stripes);
3989        for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) {
3990                int j;
3991                int skipped_disk = 0;
3992                sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1);
3993                set_bit(STRIPE_EXPANDING, &sh->state);
3994                atomic_inc(&conf->reshape_stripes);
3995                /* If any of this stripe is beyond the end of the old
3996                 * array, then we need to zero those blocks
3997                 */
3998                for (j=sh->disks; j--;) {
3999                        sector_t s;
4000                        if (j == sh->pd_idx)
4001                                continue;
4002                        if (conf->level == 6 &&
4003                            j == sh->qd_idx)
4004                                continue;
4005                        s = compute_blocknr(sh, j, 0);
4006                        if (s < raid5_size(mddev, 0, 0)) {
4007                                skipped_disk = 1;
4008                                continue;
4009                        }
4010                        memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE);
4011                        set_bit(R5_Expanded, &sh->dev[j].flags);
4012                        set_bit(R5_UPTODATE, &sh->dev[j].flags);
4013                }
4014                if (!skipped_disk) {
4015                        set_bit(STRIPE_EXPAND_READY, &sh->state);
4016                        set_bit(STRIPE_HANDLE, &sh->state);
4017                }
4018                list_add(&sh->lru, &stripes);
4019        }
4020        spin_lock_irq(&conf->device_lock);
4021        if (mddev->delta_disks < 0)
4022                conf->reshape_progress -= reshape_sectors * new_data_disks;
4023        else
4024                conf->reshape_progress += reshape_sectors * new_data_disks;
4025        spin_unlock_irq(&conf->device_lock);
4026        /* Ok, those stripe are ready. We can start scheduling
4027         * reads on the source stripes.
4028         * The source stripes are determined by mapping the first and last
4029         * block on the destination stripes.
4030         */
4031        first_sector =
4032                raid5_compute_sector(conf, stripe_addr*(new_data_disks),
4033                                     1, &dd_idx, NULL);
4034        last_sector =
4035                raid5_compute_sector(conf, ((stripe_addr+reshape_sectors)
4036                                            * new_data_disks - 1),
4037                                     1, &dd_idx, NULL);
4038        if (last_sector >= mddev->dev_sectors)
4039                last_sector = mddev->dev_sectors - 1;
4040        while (first_sector <= last_sector) {
4041                sh = get_active_stripe(conf, first_sector, 1, 0, 1);
4042                set_bit(STRIPE_EXPAND_SOURCE, &sh->state);
4043                set_bit(STRIPE_HANDLE, &sh->state);
4044                release_stripe(sh);
4045                first_sector += STRIPE_SECTORS;
4046        }
4047        /* Now that the sources are clearly marked, we can release
4048         * the destination stripes
4049         */
4050        while (!list_empty(&stripes)) {
4051                sh = list_entry(stripes.next, struct stripe_head, lru);
4052                list_del_init(&sh->lru);
4053                release_stripe(sh);
4054        }
4055        /* If this takes us to the resync_max point where we have to pause,
4056         * then we need to write out the superblock.
4057         */
4058        sector_nr += reshape_sectors;
4059        if ((sector_nr - mddev->curr_resync_completed) * 2
4060            >= mddev->resync_max - mddev->curr_resync_completed) {
4061                /* Cannot proceed until we've updated the superblock... */
4062                wait_event(conf->wait_for_overlap,
4063                           atomic_read(&conf->reshape_stripes) == 0);
4064                mddev->reshape_position = conf->reshape_progress;
4065                mddev->curr_resync_completed = sector_nr;
4066                conf->reshape_checkpoint = jiffies;
4067                set_bit(MD_CHANGE_DEVS, &mddev->flags);
4068                md_wakeup_thread(mddev->thread);
4069                wait_event(mddev->sb_wait,
4070                           !test_bit(MD_CHANGE_DEVS, &mddev->flags)
4071                           || kthread_should_stop());
4072                spin_lock_irq(&conf->device_lock);
4073                conf->reshape_safe = mddev->reshape_position;
4074                spin_unlock_irq(&conf->device_lock);
4075                wake_up(&conf->wait_for_overlap);
4076                sysfs_notify(&mddev->kobj, NULL, "sync_completed");
4077        }
4078        return reshape_sectors;
4079}
4080
4081/* FIXME go_faster isn't used */
4082static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster)
4083{
4084        raid5_conf_t *conf = mddev->private;
4085        struct stripe_head *sh;
4086        sector_t max_sector = mddev->dev_sectors;
4087        sector_t sync_blocks;
4088        int still_degraded = 0;
4089        int i;
4090
4091        if (sector_nr >= max_sector) {
4092                /* just being told to finish up .. nothing much to do */
4093
4094                if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
4095                        end_reshape(conf);
4096                        return 0;
4097                }
4098
4099                if (mddev->curr_resync < max_sector) /* aborted */
4100                        bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
4101                                        &sync_blocks, 1);
4102                else /* completed sync */
4103                        conf->fullsync = 0;
4104                bitmap_close_sync(mddev->bitmap);
4105
4106                return 0;
4107        }
4108
4109        /* Allow raid5_quiesce to complete */
4110        wait_event(conf->wait_for_overlap, conf->quiesce != 2);
4111
4112        if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
4113                return reshape_request(mddev, sector_nr, skipped);
4114
4115        /* No need to check resync_max as we never do more than one
4116         * stripe, and as resync_max will always be on a chunk boundary,
4117         * if the check in md_do_sync didn't fire, there is no chance
4118         * of overstepping resync_max here
4119         */
4120
4121        /* if there is too many failed drives and we are trying
4122         * to resync, then assert that we are finished, because there is
4123         * nothing we can do.
4124         */
4125        if (mddev->degraded >= conf->max_degraded &&
4126            test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
4127                sector_t rv = mddev->dev_sectors - sector_nr;
4128                *skipped = 1;
4129                return rv;
4130        }
4131        if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
4132            !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
4133            !conf->fullsync && sync_blocks >= STRIPE_SECTORS) {
4134                /* we can skip this block, and probably more */
4135                sync_blocks /= STRIPE_SECTORS;
4136                *skipped = 1;
4137                return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
4138        }
4139
4140
4141        bitmap_cond_end_sync(mddev->bitmap, sector_nr);
4142
4143        sh = get_active_stripe(conf, sector_nr, 0, 1, 0);
4144        if (sh == NULL) {
4145                sh = get_active_stripe(conf, sector_nr, 0, 0, 0);
4146                /* make sure we don't swamp the stripe cache if someone else
4147                 * is trying to get access
4148                 */
4149                schedule_timeout_uninterruptible(1);
4150        }
4151        /* Need to check if array will still be degraded after recovery/resync
4152         * We don't need to check the 'failed' flag as when that gets set,
4153         * recovery aborts.
4154         */
4155        for (i = 0; i < conf->raid_disks; i++)
4156                if (conf->disks[i].rdev == NULL)
4157                        still_degraded = 1;
4158
4159        bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
4160
4161        set_bit(STRIPE_SYNC_REQUESTED, &sh->state);
4162
4163        handle_stripe(sh);
4164        release_stripe(sh);
4165
4166        return STRIPE_SECTORS;
4167}
4168
4169static int  retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio)
4170{
4171        /* We may not be able to submit a whole bio at once as there
4172         * may not be enough stripe_heads available.
4173         * We cannot pre-allocate enough stripe_heads as we may need
4174         * more than exist in the cache (if we allow ever large chunks).
4175         * So we do one stripe head at a time and record in
4176         * ->bi_hw_segments how many have been done.
4177         *
4178         * We *know* that this entire raid_bio is in one chunk, so
4179         * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
4180         */
4181        struct stripe_head *sh;
4182        int dd_idx;
4183        sector_t sector, logical_sector, last_sector;
4184        int scnt = 0;
4185        int remaining;
4186        int handled = 0;
4187
4188        logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
4189        sector = raid5_compute_sector(conf, logical_sector,
4190                                      0, &dd_idx, NULL);
4191        last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9);
4192
4193        for (; logical_sector < last_sector;
4194             logical_sector += STRIPE_SECTORS,
4195                     sector += STRIPE_SECTORS,
4196                     scnt++) {
4197
4198                if (scnt < raid5_bi_hw_segments(raid_bio))
4199                        /* already done this stripe */
4200                        continue;
4201
4202                sh = get_active_stripe(conf, sector, 0, 1, 0);
4203
4204                if (!sh) {
4205                        /* failed to get a stripe - must wait */
4206                        raid5_set_bi_hw_segments(raid_bio, scnt);
4207                        conf->retry_read_aligned = raid_bio;
4208                        return handled;
4209                }
4210
4211                set_bit(R5_ReadError, &sh->dev[dd_idx].flags);
4212                if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
4213                        release_stripe(sh);
4214                        raid5_set_bi_hw_segments(raid_bio, scnt);
4215                        conf->retry_read_aligned = raid_bio;
4216                        return handled;
4217                }
4218
4219                handle_stripe(sh);
4220                release_stripe(sh);
4221                handled++;
4222        }
4223        spin_lock_irq(&conf->device_lock);
4224        remaining = raid5_dec_bi_phys_segments(raid_bio);
4225        spin_unlock_irq(&conf->device_lock);
4226        if (remaining == 0)
4227                bio_endio(raid_bio, 0);
4228        if (atomic_dec_and_test(&conf->active_aligned_reads))
4229                wake_up(&conf->wait_for_stripe);
4230        return handled;
4231}
4232
4233
4234/*
4235 * This is our raid5 kernel thread.
4236 *
4237 * We scan the hash table for stripes which can be handled now.
4238 * During the scan, completed stripes are saved for us by the interrupt
4239 * handler, so that they will not have to wait for our next wakeup.
4240 */
4241static void raid5d(mddev_t *mddev)
4242{
4243        struct stripe_head *sh;
4244        raid5_conf_t *conf = mddev->private;
4245        int handled;
4246        struct blk_plug plug;
4247
4248        pr_debug("+++ raid5d active\n");
4249
4250        md_check_recovery(mddev);
4251
4252        blk_start_plug(&plug);
4253        handled = 0;
4254        spin_lock_irq(&conf->device_lock);
4255        while (1) {
4256                struct bio *bio;
4257
4258                if (atomic_read(&mddev->plug_cnt) == 0 &&
4259                    !list_empty(&conf->bitmap_list)) {
4260                        /* Now is a good time to flush some bitmap updates */
4261                        conf->seq_flush++;
4262                        spin_unlock_irq(&conf->device_lock);
4263                        bitmap_unplug(mddev->bitmap);
4264                        spin_lock_irq(&conf->device_lock);
4265                        conf->seq_write = conf->seq_flush;
4266                        activate_bit_delay(conf);
4267                }
4268                if (atomic_read(&mddev->plug_cnt) == 0)
4269                        raid5_activate_delayed(conf);
4270
4271                while ((bio = remove_bio_from_retry(conf))) {
4272                        int ok;
4273                        spin_unlock_irq(&conf->device_lock);
4274                        ok = retry_aligned_read(conf, bio);
4275                        spin_lock_irq(&conf->device_lock);
4276                        if (!ok)
4277                                break;
4278                        handled++;
4279                }
4280
4281                sh = __get_priority_stripe(conf);
4282
4283                if (!sh)
4284                        break;
4285                spin_unlock_irq(&conf->device_lock);
4286                
4287                handled++;
4288                handle_stripe(sh);
4289                release_stripe(sh);
4290                cond_resched();
4291
4292                if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
4293                        md_check_recovery(mddev);
4294
4295                spin_lock_irq(&conf->device_lock);
4296        }
4297        pr_debug("%d stripes handled\n", handled);
4298
4299        spin_unlock_irq(&conf->device_lock);
4300
4301        async_tx_issue_pending_all();
4302        blk_finish_plug(&plug);
4303
4304        pr_debug("--- raid5d inactive\n");
4305}
4306
4307static ssize_t
4308raid5_show_stripe_cache_size(mddev_t *mddev, char *page)
4309{
4310        raid5_conf_t *conf = mddev->private;
4311        if (conf)
4312                return sprintf(page, "%d\n", conf->max_nr_stripes);
4313        else
4314                return 0;
4315}
4316
4317int
4318raid5_set_cache_size(mddev_t *mddev, int size)
4319{
4320        raid5_conf_t *conf = mddev->private;
4321        int err;
4322
4323        if (size <= 16 || size > 32768)
4324                return -EINVAL;
4325        while (size < conf->max_nr_stripes) {
4326                if (drop_one_stripe(conf))
4327                        conf->max_nr_stripes--;
4328                else
4329                        break;
4330        }
4331        err = md_allow_write(mddev);
4332        if (err)
4333                return err;
4334        while (size > conf->max_nr_stripes) {
4335                if (grow_one_stripe(conf))
4336                        conf->max_nr_stripes++;
4337                else break;
4338        }
4339        return 0;
4340}
4341EXPORT_SYMBOL(raid5_set_cache_size);
4342
4343static ssize_t
4344raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
4345{
4346        raid5_conf_t *conf = mddev->private;
4347        unsigned long new;
4348        int err;
4349
4350        if (len >= PAGE_SIZE)
4351                return -EINVAL;
4352        if (!conf)
4353                return -ENODEV;
4354
4355        if (strict_strtoul(page, 10, &new))
4356                return -EINVAL;
4357        err = raid5_set_cache_size(mddev, new);
4358        if (err)
4359                return err;
4360        return len;
4361}
4362
4363static struct md_sysfs_entry
4364raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
4365                                raid5_show_stripe_cache_size,
4366                                raid5_store_stripe_cache_size);
4367
4368static ssize_t
4369raid5_show_preread_threshold(mddev_t *mddev, char *page)
4370{
4371        raid5_conf_t *conf = mddev->private;
4372        if (conf)
4373                return sprintf(page, "%d\n", conf->bypass_threshold);
4374        else
4375                return 0;
4376}
4377
4378static ssize_t
4379raid5_store_preread_threshold(mddev_t *mddev, const char *page, size_t len)
4380{
4381        raid5_conf_t *conf = mddev->private;
4382        unsigned long new;
4383        if (len >= PAGE_SIZE)
4384                return -EINVAL;
4385        if (!conf)
4386                return -ENODEV;
4387
4388        if (strict_strtoul(page, 10, &new))
4389                return -EINVAL;
4390        if (new > conf->max_nr_stripes)
4391                return -EINVAL;
4392        conf->bypass_threshold = new;
4393        return len;
4394}
4395
4396static struct md_sysfs_entry
4397raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
4398                                        S_IRUGO | S_IWUSR,
4399                                        raid5_show_preread_threshold,
4400                                        raid5_store_preread_threshold);
4401
4402static ssize_t
4403stripe_cache_active_show(mddev_t *mddev, char *page)
4404{
4405        raid5_conf_t *conf = mddev->private;
4406        if (conf)
4407                return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
4408        else
4409                return 0;
4410}
4411
4412static struct md_sysfs_entry
4413raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
4414
4415static struct attribute *raid5_attrs[] =  {
4416        &raid5_stripecache_size.attr,
4417        &raid5_stripecache_active.attr,
4418        &raid5_preread_bypass_threshold.attr,
4419        NULL,
4420};
4421static struct attribute_group raid5_attrs_group = {
4422        .name = NULL,
4423        .attrs = raid5_attrs,
4424};
4425
4426static sector_t
4427raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks)
4428{
4429        raid5_conf_t *conf = mddev->private;
4430
4431        if (!sectors)
4432                sectors = mddev->dev_sectors;
4433        if (!raid_disks)
4434                /* size is defined by the smallest of previous and new size */
4435                raid_disks = min(conf->raid_disks, conf->previous_raid_disks);
4436
4437        sectors &= ~((sector_t)mddev->chunk_sectors - 1);
4438        sectors &= ~((sector_t)mddev->new_chunk_sectors - 1);
4439        return sectors * (raid_disks - conf->max_degraded);
4440}
4441
4442static void raid5_free_percpu(raid5_conf_t *conf)
4443{
4444        struct raid5_percpu *percpu;
4445        unsigned long cpu;
4446
4447        if (!conf->percpu)
4448                return;
4449
4450        get_online_cpus();
4451        for_each_possible_cpu(cpu) {
4452                percpu = per_cpu_ptr(conf->percpu, cpu);
4453                safe_put_page(percpu->spare_page);
4454                kfree(percpu->scribble);
4455        }
4456#ifdef CONFIG_HOTPLUG_CPU
4457        unregister_cpu_notifier(&conf->cpu_notify);
4458#endif
4459        put_online_cpus();
4460
4461        free_percpu(conf->percpu);
4462}
4463
4464static void free_conf(raid5_conf_t *conf)
4465{
4466        shrink_stripes(conf);
4467        raid5_free_percpu(conf);
4468        kfree(conf->disks);
4469        kfree(conf->stripe_hashtbl);
4470        kfree(conf);
4471}
4472
4473#ifdef CONFIG_HOTPLUG_CPU
4474static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
4475                              void *hcpu)
4476{
4477        raid5_conf_t *conf = container_of(nfb, raid5_conf_t, cpu_notify);
4478        long cpu = (long)hcpu;
4479        struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
4480
4481        switch (action) {
4482        case CPU_UP_PREPARE:
4483        case CPU_UP_PREPARE_FROZEN:
4484                if (conf->level == 6 && !percpu->spare_page)
4485                        percpu->spare_page = alloc_page(GFP_KERNEL);
4486                if (!percpu->scribble)
4487                        percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
4488
4489                if (!percpu->scribble ||
4490                    (conf->level == 6 && !percpu->spare_page)) {
4491                        safe_put_page(percpu->spare_page);
4492                        kfree(percpu->scribble);
4493                        pr_err("%s: failed memory allocation for cpu%ld\n",
4494                               __func__, cpu);
4495                        return notifier_from_errno(-ENOMEM);
4496                }
4497                break;
4498        case CPU_DEAD:
4499        case CPU_DEAD_FROZEN:
4500                safe_put_page(percpu->spare_page);
4501                kfree(percpu->scribble);
4502                percpu->spare_page = NULL;
4503                percpu->scribble = NULL;
4504                break;
4505        default:
4506                break;
4507        }
4508        return NOTIFY_OK;
4509}
4510#endif
4511
4512static int raid5_alloc_percpu(raid5_conf_t *conf)
4513{
4514        unsigned long cpu;
4515        struct page *spare_page;
4516        struct raid5_percpu __percpu *allcpus;
4517        void *scribble;
4518        int err;
4519
4520        allcpus = alloc_percpu(struct raid5_percpu);
4521        if (!allcpus)
4522                return -ENOMEM;
4523        conf->percpu = allcpus;
4524
4525        get_online_cpus();
4526        err = 0;
4527        for_each_present_cpu(cpu) {
4528                if (conf->level == 6) {
4529                        spare_page = alloc_page(GFP_KERNEL);
4530                        if (!spare_page) {
4531                                err = -ENOMEM;
4532                                break;
4533                        }
4534                        per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
4535                }
4536                scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
4537                if (!scribble) {
4538                        err = -ENOMEM;
4539                        break;
4540                }
4541                per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
4542        }
4543#ifdef CONFIG_HOTPLUG_CPU
4544        conf->cpu_notify.notifier_call = raid456_cpu_notify;
4545        conf->cpu_notify.priority = 0;
4546        if (err == 0)
4547                err = register_cpu_notifier(&conf->cpu_notify);
4548#endif
4549        put_online_cpus();
4550
4551        return err;
4552}
4553
4554static raid5_conf_t *setup_conf(mddev_t *mddev)
4555{
4556        raid5_conf_t *conf;
4557        int raid_disk, memory, max_disks;
4558        mdk_rdev_t *rdev;
4559        struct disk_info *disk;
4560
4561        if (mddev->new_level != 5
4562            && mddev->new_level != 4
4563            && mddev->new_level != 6) {
4564                printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n",
4565                       mdname(mddev), mddev->new_level);
4566                return ERR_PTR(-EIO);
4567        }
4568        if ((mddev->new_level == 5
4569             && !algorithm_valid_raid5(mddev->new_layout)) ||
4570            (mddev->new_level == 6
4571             && !algorithm_valid_raid6(mddev->new_layout))) {
4572                printk(KERN_ERR "md/raid:%s: layout %d not supported\n",
4573                       mdname(mddev), mddev->new_layout);
4574                return ERR_PTR(-EIO);
4575        }
4576        if (mddev->new_level == 6 && mddev->raid_disks < 4) {
4577                printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n",
4578                       mdname(mddev), mddev->raid_disks);
4579                return ERR_PTR(-EINVAL);
4580        }
4581
4582        if (!mddev->new_chunk_sectors ||
4583            (mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
4584            !is_power_of_2(mddev->new_chunk_sectors)) {
4585                printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n",
4586                       mdname(mddev), mddev->new_chunk_sectors << 9);
4587                return ERR_PTR(-EINVAL);
4588        }
4589
4590        conf = kzalloc(sizeof(raid5_conf_t), GFP_KERNEL);
4591        if (conf == NULL)
4592                goto abort;
4593        spin_lock_init(&conf->device_lock);
4594        init_waitqueue_head(&conf->wait_for_stripe);
4595        init_waitqueue_head(&conf->wait_for_overlap);
4596        INIT_LIST_HEAD(&conf->handle_list);
4597        INIT_LIST_HEAD(&conf->hold_list);
4598        INIT_LIST_HEAD(&conf->delayed_list);
4599        INIT_LIST_HEAD(&conf->bitmap_list);
4600        INIT_LIST_HEAD(&conf->inactive_list);
4601        atomic_set(&conf->active_stripes, 0);
4602        atomic_set(&conf->preread_active_stripes, 0);
4603        atomic_set(&conf->active_aligned_reads, 0);
4604        conf->bypass_threshold = BYPASS_THRESHOLD;
4605
4606        conf->raid_disks = mddev->raid_disks;
4607        if (mddev->reshape_position == MaxSector)
4608                conf->previous_raid_disks = mddev->raid_disks;
4609        else
4610                conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
4611        max_disks = max(conf->raid_disks, conf->previous_raid_disks);
4612        conf->scribble_len = scribble_len(max_disks);
4613
4614        conf->disks = kzalloc(max_disks * sizeof(struct disk_info),
4615                              GFP_KERNEL);
4616        if (!conf->disks)
4617                goto abort;
4618
4619        conf->mddev = mddev;
4620
4621        if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL)
4622                goto abort;
4623
4624        conf->level = mddev->new_level;
4625        if (raid5_alloc_percpu(conf) != 0)
4626                goto abort;
4627
4628        pr_debug("raid456: run(%s) called.\n", mdname(mddev));
4629
4630        list_for_each_entry(rdev, &mddev->disks, same_set) {
4631                raid_disk = rdev->raid_disk;
4632                if (raid_disk >= max_disks
4633                    || raid_disk < 0)
4634                        continue;
4635                disk = conf->disks + raid_disk;
4636
4637                disk->rdev = rdev;
4638
4639                if (test_bit(In_sync, &rdev->flags)) {
4640                        char b[BDEVNAME_SIZE];
4641                        printk(KERN_INFO "md/raid:%s: device %s operational as raid"
4642                               " disk %d\n",
4643                               mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
4644                } else if (rdev->saved_raid_disk != raid_disk)
4645                        /* Cannot rely on bitmap to complete recovery */
4646                        conf->fullsync = 1;
4647        }
4648
4649        conf->chunk_sectors = mddev->new_chunk_sectors;
4650        conf->level = mddev->new_level;
4651        if (conf->level == 6)
4652                conf->max_degraded = 2;
4653        else
4654                conf->max_degraded = 1;
4655        conf->algorithm = mddev->new_layout;
4656        conf->max_nr_stripes = NR_STRIPES;
4657        conf->reshape_progress = mddev->reshape_position;
4658        if (conf->reshape_progress != MaxSector) {
4659                conf->prev_chunk_sectors = mddev->chunk_sectors;
4660                conf->prev_algo = mddev->layout;
4661        }
4662
4663        memory = conf->max_nr_stripes * (sizeof(struct stripe_head) +
4664                 max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024;
4665        if (grow_stripes(conf, conf->max_nr_stripes)) {
4666                printk(KERN_ERR
4667                       "md/raid:%s: couldn't allocate %dkB for buffers\n",
4668                       mdname(mddev), memory);
4669                goto abort;
4670        } else
4671                printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
4672                       mdname(mddev), memory);
4673
4674        conf->thread = md_register_thread(raid5d, mddev, NULL);
4675        if (!conf->thread) {
4676                printk(KERN_ERR
4677                       "md/raid:%s: couldn't allocate thread.\n",
4678                       mdname(mddev));
4679                goto abort;
4680        }
4681
4682        return conf;
4683
4684 abort:
4685        if (conf) {
4686                free_conf(conf);
4687                return ERR_PTR(-EIO);
4688        } else
4689                return ERR_PTR(-ENOMEM);
4690}
4691
4692
4693static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded)
4694{
4695        switch (algo) {
4696        case ALGORITHM_PARITY_0:
4697                if (raid_disk < max_degraded)
4698                        return 1;
4699                break;
4700        case ALGORITHM_PARITY_N:
4701                if (raid_disk >= raid_disks - max_degraded)
4702                        return 1;
4703                break;
4704        case ALGORITHM_PARITY_0_6:
4705                if (raid_disk == 0 || 
4706                    raid_disk == raid_disks - 1)
4707                        return 1;
4708                break;
4709        case ALGORITHM_LEFT_ASYMMETRIC_6:
4710        case ALGORITHM_RIGHT_ASYMMETRIC_6:
4711        case ALGORITHM_LEFT_SYMMETRIC_6:
4712        case ALGORITHM_RIGHT_SYMMETRIC_6:
4713                if (raid_disk == raid_disks - 1)
4714                        return 1;
4715        }
4716        return 0;
4717}
4718
4719static int run(mddev_t *mddev)
4720{
4721        raid5_conf_t *conf;
4722        int working_disks = 0;
4723        int dirty_parity_disks = 0;
4724        mdk_rdev_t *rdev;
4725        sector_t reshape_offset = 0;
4726
4727        if (mddev->recovery_cp != MaxSector)
4728                printk(KERN_NOTICE "md/raid:%s: not clean"
4729                       " -- starting background reconstruction\n",
4730                       mdname(mddev));
4731        if (mddev->reshape_position != MaxSector) {
4732                /* Check that we can continue the reshape.
4733                 * Currently only disks can change, it must
4734                 * increase, and we must be past the point where
4735                 * a stripe over-writes itself
4736                 */
4737                sector_t here_new, here_old;
4738                int old_disks;
4739                int max_degraded = (mddev->level == 6 ? 2 : 1);
4740
4741                if (mddev->new_level != mddev->level) {
4742                        printk(KERN_ERR "md/raid:%s: unsupported reshape "
4743                               "required - aborting.\n",
4744                               mdname(mddev));
4745                        return -EINVAL;
4746                }
4747                old_disks = mddev->raid_disks - mddev->delta_disks;
4748                /* reshape_position must be on a new-stripe boundary, and one
4749                 * further up in new geometry must map after here in old
4750                 * geometry.
4751                 */
4752                here_new = mddev->reshape_position;
4753                if (sector_div(here_new, mddev->new_chunk_sectors *
4754                               (mddev->raid_disks - max_degraded))) {
4755                        printk(KERN_ERR "md/raid:%s: reshape_position not "
4756                               "on a stripe boundary\n", mdname(mddev));
4757                        return -EINVAL;
4758                }
4759                reshape_offset = here_new * mddev->new_chunk_sectors;
4760                /* here_new is the stripe we will write to */
4761                here_old = mddev->reshape_position;
4762                sector_div(here_old, mddev->chunk_sectors *
4763                           (old_disks-max_degraded));
4764                /* here_old is the first stripe that we might need to read
4765                 * from */
4766                if (mddev->delta_disks == 0) {
4767                        /* We cannot be sure it is safe to start an in-place
4768                         * reshape.  It is only safe if user-space if monitoring
4769                         * and taking constant backups.
4770                         * mdadm always starts a situation like this in
4771                         * readonly mode so it can take control before
4772                         * allowing any writes.  So just check for that.
4773                         */
4774                        if ((here_new * mddev->new_chunk_sectors != 
4775                             here_old * mddev->chunk_sectors) ||
4776                            mddev->ro == 0) {
4777                                printk(KERN_ERR "md/raid:%s: in-place reshape must be started"
4778                                       " in read-only mode - aborting\n",
4779                                       mdname(mddev));
4780                                return -EINVAL;
4781                        }
4782                } else if (mddev->delta_disks < 0
4783                    ? (here_new * mddev->new_chunk_sectors <=
4784                       here_old * mddev->chunk_sectors)
4785                    : (here_new * mddev->new_chunk_sectors >=
4786                       here_old * mddev->chunk_sectors)) {
4787                        /* Reading from the same stripe as writing to - bad */
4788                        printk(KERN_ERR "md/raid:%s: reshape_position too early for "
4789                               "auto-recovery - aborting.\n",
4790                               mdname(mddev));
4791                        return -EINVAL;
4792                }
4793                printk(KERN_INFO "md/raid:%s: reshape will continue\n",
4794                       mdname(mddev));
4795                /* OK, we should be able to continue; */
4796        } else {
4797                BUG_ON(mddev->level != mddev->new_level);
4798                BUG_ON(mddev->layout != mddev->new_layout);
4799                BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors);
4800                BUG_ON(mddev->delta_disks != 0);
4801        }
4802
4803        if (mddev->private == NULL)
4804                conf = setup_conf(mddev);
4805        else
4806                conf = mddev->private;
4807
4808        if (IS_ERR(conf))
4809                return PTR_ERR(conf);
4810
4811        mddev->thread = conf->thread;
4812        conf->thread = NULL;
4813        mddev->private = conf;
4814
4815        /*
4816         * 0 for a fully functional array, 1 or 2 for a degraded array.
4817         */
4818        list_for_each_entry(rdev, &mddev->disks, same_set) {
4819                if (rdev->raid_disk < 0)
4820                        continue;
4821                if (test_bit(In_sync, &rdev->flags)) {
4822                        working_disks++;
4823                        continue;
4824                }
4825                /* This disc is not fully in-sync.  However if it
4826                 * just stored parity (beyond the recovery_offset),
4827                 * when we don't need to be concerned about the
4828                 * array being dirty.
4829                 * When reshape goes 'backwards', we never have
4830                 * partially completed devices, so we only need
4831                 * to worry about reshape going forwards.
4832                 */
4833                /* Hack because v0.91 doesn't store recovery_offset properly. */
4834                if (mddev->major_version == 0 &&
4835                    mddev->minor_version > 90)
4836                        rdev->recovery_offset = reshape_offset;
4837                        
4838                if (rdev->recovery_offset < reshape_offset) {
4839                        /* We need to check old and new layout */
4840                        if (!only_parity(rdev->raid_disk,
4841                                         conf->algorithm,
4842                                         conf->raid_disks,
4843                                         conf->max_degraded))
4844                                continue;
4845                }
4846                if (!only_parity(rdev->raid_disk,
4847                                 conf->prev_algo,
4848                                 conf->previous_raid_disks,
4849                                 conf->max_degraded))
4850                        continue;
4851                dirty_parity_disks++;
4852        }
4853
4854        mddev->degraded = (max(conf->raid_disks, conf->previous_raid_disks)
4855                           - working_disks);
4856
4857        if (has_failed(conf)) {
4858                printk(KERN_ERR "md/raid:%s: not enough operational devices"
4859                        " (%d/%d failed)\n",
4860                        mdname(mddev), mddev->degraded, conf->raid_disks);
4861                goto abort;
4862        }
4863
4864        /* device size must be a multiple of chunk size */
4865        mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);
4866        mddev->resync_max_sectors = mddev->dev_sectors;
4867
4868        if (mddev->degraded > dirty_parity_disks &&
4869            mddev->recovery_cp != MaxSector) {
4870                if (mddev->ok_start_degraded)
4871                        printk(KERN_WARNING
4872                               "md/raid:%s: starting dirty degraded array"
4873                               " - data corruption possible.\n",
4874                               mdname(mddev));
4875                else {
4876                        printk(KERN_ERR
4877                               "md/raid:%s: cannot start dirty degraded array.\n",
4878                               mdname(mddev));
4879                        goto abort;
4880                }
4881        }
4882
4883        if (mddev->degraded == 0)
4884                printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d"
4885                       " devices, algorithm %d\n", mdname(mddev), conf->level,
4886                       mddev->raid_disks-mddev->degraded, mddev->raid_disks,
4887                       mddev->new_layout);
4888        else
4889                printk(KERN_ALERT "md/raid:%s: raid level %d active with %d"
4890                       " out of %d devices, algorithm %d\n",
4891                       mdname(mddev), conf->level,
4892                       mddev->raid_disks - mddev->degraded,
4893                       mddev->raid_disks, mddev->new_layout);
4894
4895        print_raid5_conf(conf);
4896
4897        if (conf->reshape_progress != MaxSector) {
4898                conf->reshape_safe = conf->reshape_progress;
4899                atomic_set(&conf->reshape_stripes, 0);
4900                clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4901                clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4902                set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4903                set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4904                mddev->sync_thread = md_register_thread(md_do_sync, mddev,
4905                                                        "reshape");
4906        }
4907
4908
4909        /* Ok, everything is just fine now */
4910        if (mddev->to_remove == &raid5_attrs_group)
4911                mddev->to_remove = NULL;
4912        else if (mddev->kobj.sd &&
4913            sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
4914                printk(KERN_WARNING
4915                       "raid5: failed to create sysfs attributes for %s\n",
4916                       mdname(mddev));
4917