linux/drivers/md/raid0.c
<<
>>
Prefs
   1/*
   2   raid0.c : Multiple Devices driver for Linux
   3             Copyright (C) 1994-96 Marc ZYNGIER
   4             <zyngier@ufr-info-p7.ibp.fr> or
   5             <maz@gloups.fdn.fr>
   6             Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
   7
   8
   9   RAID-0 management functions.
  10
  11   This program is free software; you can redistribute it and/or modify
  12   it under the terms of the GNU General Public License as published by
  13   the Free Software Foundation; either version 2, or (at your option)
  14   any later version.
  15   
  16   You should have received a copy of the GNU General Public License
  17   (for example /usr/src/linux/COPYING); if not, write to the Free
  18   Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.  
  19*/
  20
  21#include <linux/blkdev.h>
  22#include <linux/seq_file.h>
  23#include <linux/module.h>
  24#include <linux/slab.h>
  25#include "md.h"
  26#include "raid0.h"
  27#include "raid5.h"
  28
  29static int raid0_congested(void *data, int bits)
  30{
  31        struct mddev *mddev = data;
  32        struct r0conf *conf = mddev->private;
  33        struct md_rdev **devlist = conf->devlist;
  34        int raid_disks = conf->strip_zone[0].nb_dev;
  35        int i, ret = 0;
  36
  37        if (mddev_congested(mddev, bits))
  38                return 1;
  39
  40        for (i = 0; i < raid_disks && !ret ; i++) {
  41                struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
  42
  43                ret |= bdi_congested(&q->backing_dev_info, bits);
  44        }
  45        return ret;
  46}
  47
  48/*
  49 * inform the user of the raid configuration
  50*/
  51static void dump_zones(struct mddev *mddev)
  52{
  53        int j, k;
  54        sector_t zone_size = 0;
  55        sector_t zone_start = 0;
  56        char b[BDEVNAME_SIZE];
  57        struct r0conf *conf = mddev->private;
  58        int raid_disks = conf->strip_zone[0].nb_dev;
  59        printk(KERN_INFO "md: RAID0 configuration for %s - %d zone%s\n",
  60               mdname(mddev),
  61               conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
  62        for (j = 0; j < conf->nr_strip_zones; j++) {
  63                printk(KERN_INFO "md: zone%d=[", j);
  64                for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
  65                        printk(KERN_CONT "%s%s", k?"/":"",
  66                        bdevname(conf->devlist[j*raid_disks
  67                                                + k]->bdev, b));
  68                printk(KERN_CONT "]\n");
  69
  70                zone_size  = conf->strip_zone[j].zone_end - zone_start;
  71                printk(KERN_INFO "      zone-offset=%10lluKB, "
  72                                "device-offset=%10lluKB, size=%10lluKB\n",
  73                        (unsigned long long)zone_start>>1,
  74                        (unsigned long long)conf->strip_zone[j].dev_start>>1,
  75                        (unsigned long long)zone_size>>1);
  76                zone_start = conf->strip_zone[j].zone_end;
  77        }
  78        printk(KERN_INFO "\n");
  79}
  80
  81static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
  82{
  83        int i, c, err;
  84        sector_t curr_zone_end, sectors;
  85        struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
  86        struct strip_zone *zone;
  87        int cnt;
  88        char b[BDEVNAME_SIZE];
  89        char b2[BDEVNAME_SIZE];
  90        struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
  91        bool discard_supported = false;
  92
  93        if (!conf)
  94                return -ENOMEM;
  95        rdev_for_each(rdev1, mddev) {
  96                pr_debug("md/raid0:%s: looking at %s\n",
  97                         mdname(mddev),
  98                         bdevname(rdev1->bdev, b));
  99                c = 0;
 100
 101                /* round size to chunk_size */
 102                sectors = rdev1->sectors;
 103                sector_div(sectors, mddev->chunk_sectors);
 104                rdev1->sectors = sectors * mddev->chunk_sectors;
 105
 106                rdev_for_each(rdev2, mddev) {
 107                        pr_debug("md/raid0:%s:   comparing %s(%llu)"
 108                                 " with %s(%llu)\n",
 109                                 mdname(mddev),
 110                                 bdevname(rdev1->bdev,b),
 111                                 (unsigned long long)rdev1->sectors,
 112                                 bdevname(rdev2->bdev,b2),
 113                                 (unsigned long long)rdev2->sectors);
 114                        if (rdev2 == rdev1) {
 115                                pr_debug("md/raid0:%s:   END\n",
 116                                         mdname(mddev));
 117                                break;
 118                        }
 119                        if (rdev2->sectors == rdev1->sectors) {
 120                                /*
 121                                 * Not unique, don't count it as a new
 122                                 * group
 123                                 */
 124                                pr_debug("md/raid0:%s:   EQUAL\n",
 125                                         mdname(mddev));
 126                                c = 1;
 127                                break;
 128                        }
 129                        pr_debug("md/raid0:%s:   NOT EQUAL\n",
 130                                 mdname(mddev));
 131                }
 132                if (!c) {
 133                        pr_debug("md/raid0:%s:   ==> UNIQUE\n",
 134                                 mdname(mddev));
 135                        conf->nr_strip_zones++;
 136                        pr_debug("md/raid0:%s: %d zones\n",
 137                                 mdname(mddev), conf->nr_strip_zones);
 138                }
 139        }
 140        pr_debug("md/raid0:%s: FINAL %d zones\n",
 141                 mdname(mddev), conf->nr_strip_zones);
 142        err = -ENOMEM;
 143        conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
 144                                conf->nr_strip_zones, GFP_KERNEL);
 145        if (!conf->strip_zone)
 146                goto abort;
 147        conf->devlist = kzalloc(sizeof(struct md_rdev*)*
 148                                conf->nr_strip_zones*mddev->raid_disks,
 149                                GFP_KERNEL);
 150        if (!conf->devlist)
 151                goto abort;
 152
 153        /* The first zone must contain all devices, so here we check that
 154         * there is a proper alignment of slots to devices and find them all
 155         */
 156        zone = &conf->strip_zone[0];
 157        cnt = 0;
 158        smallest = NULL;
 159        dev = conf->devlist;
 160        err = -EINVAL;
 161        rdev_for_each(rdev1, mddev) {
 162                int j = rdev1->raid_disk;
 163
 164                if (mddev->level == 10) {
 165                        /* taking over a raid10-n2 array */
 166                        j /= 2;
 167                        rdev1->new_raid_disk = j;
 168                }
 169
 170                if (mddev->level == 1) {
 171                        /* taiking over a raid1 array-
 172                         * we have only one active disk
 173                         */
 174                        j = 0;
 175                        rdev1->new_raid_disk = j;
 176                }
 177
 178                if (j < 0) {
 179                        printk(KERN_ERR
 180                               "md/raid0:%s: remove inactive devices before converting to RAID0\n",
 181                               mdname(mddev));
 182                        goto abort;
 183                }
 184                if (j >= mddev->raid_disks) {
 185                        printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
 186                               "aborting!\n", mdname(mddev), j);
 187                        goto abort;
 188                }
 189                if (dev[j]) {
 190                        printk(KERN_ERR "md/raid0:%s: multiple devices for %d - "
 191                               "aborting!\n", mdname(mddev), j);
 192                        goto abort;
 193                }
 194                dev[j] = rdev1;
 195
 196                disk_stack_limits(mddev->gendisk, rdev1->bdev,
 197                                  rdev1->data_offset << 9);
 198
 199                if (rdev1->bdev->bd_disk->queue->merge_bvec_fn)
 200                        conf->has_merge_bvec = 1;
 201
 202                if (!smallest || (rdev1->sectors < smallest->sectors))
 203                        smallest = rdev1;
 204                cnt++;
 205
 206                if (blk_queue_discard(bdev_get_queue(rdev1->bdev)))
 207                        discard_supported = true;
 208        }
 209        if (cnt != mddev->raid_disks) {
 210                printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - "
 211                       "aborting!\n", mdname(mddev), cnt, mddev->raid_disks);
 212                goto abort;
 213        }
 214        zone->nb_dev = cnt;
 215        zone->zone_end = smallest->sectors * cnt;
 216
 217        curr_zone_end = zone->zone_end;
 218
 219        /* now do the other zones */
 220        for (i = 1; i < conf->nr_strip_zones; i++)
 221        {
 222                int j;
 223
 224                zone = conf->strip_zone + i;
 225                dev = conf->devlist + i * mddev->raid_disks;
 226
 227                pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
 228                zone->dev_start = smallest->sectors;
 229                smallest = NULL;
 230                c = 0;
 231
 232                for (j=0; j<cnt; j++) {
 233                        rdev = conf->devlist[j];
 234                        if (rdev->sectors <= zone->dev_start) {
 235                                pr_debug("md/raid0:%s: checking %s ... nope\n",
 236                                         mdname(mddev),
 237                                         bdevname(rdev->bdev, b));
 238                                continue;
 239                        }
 240                        pr_debug("md/raid0:%s: checking %s ..."
 241                                 " contained as device %d\n",
 242                                 mdname(mddev),
 243                                 bdevname(rdev->bdev, b), c);
 244                        dev[c] = rdev;
 245                        c++;
 246                        if (!smallest || rdev->sectors < smallest->sectors) {
 247                                smallest = rdev;
 248                                pr_debug("md/raid0:%s:  (%llu) is smallest!.\n",
 249                                         mdname(mddev),
 250                                         (unsigned long long)rdev->sectors);
 251                        }
 252                }
 253
 254                zone->nb_dev = c;
 255                sectors = (smallest->sectors - zone->dev_start) * c;
 256                pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
 257                         mdname(mddev),
 258                         zone->nb_dev, (unsigned long long)sectors);
 259
 260                curr_zone_end += sectors;
 261                zone->zone_end = curr_zone_end;
 262
 263                pr_debug("md/raid0:%s: current zone start: %llu\n",
 264                         mdname(mddev),
 265                         (unsigned long long)smallest->sectors);
 266        }
 267        mddev->queue->backing_dev_info.congested_fn = raid0_congested;
 268        mddev->queue->backing_dev_info.congested_data = mddev;
 269
 270        /*
 271         * now since we have the hard sector sizes, we can make sure
 272         * chunk size is a multiple of that sector size
 273         */
 274        if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) {
 275                printk(KERN_ERR "md/raid0:%s: chunk_size of %d not valid\n",
 276                       mdname(mddev),
 277                       mddev->chunk_sectors << 9);
 278                goto abort;
 279        }
 280
 281        blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
 282        blk_queue_io_opt(mddev->queue,
 283                         (mddev->chunk_sectors << 9) * mddev->raid_disks);
 284
 285        if (!discard_supported)
 286                queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
 287        else
 288                queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
 289
 290        pr_debug("md/raid0:%s: done.\n", mdname(mddev));
 291        *private_conf = conf;
 292
 293        return 0;
 294abort:
 295        kfree(conf->strip_zone);
 296        kfree(conf->devlist);
 297        kfree(conf);
 298        *private_conf = ERR_PTR(err);
 299        return err;
 300}
 301
 302/* Find the zone which holds a particular offset
 303 * Update *sectorp to be an offset in that zone
 304 */
 305static struct strip_zone *find_zone(struct r0conf *conf,
 306                                    sector_t *sectorp)
 307{
 308        int i;
 309        struct strip_zone *z = conf->strip_zone;
 310        sector_t sector = *sectorp;
 311
 312        for (i = 0; i < conf->nr_strip_zones; i++)
 313                if (sector < z[i].zone_end) {
 314                        if (i)
 315                                *sectorp = sector - z[i-1].zone_end;
 316                        return z + i;
 317                }
 318        BUG();
 319}
 320
 321/*
 322 * remaps the bio to the target device. we separate two flows.
 323 * power 2 flow and a general flow for the sake of perfromance
 324*/
 325static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
 326                                sector_t sector, sector_t *sector_offset)
 327{
 328        unsigned int sect_in_chunk;
 329        sector_t chunk;
 330        struct r0conf *conf = mddev->private;
 331        int raid_disks = conf->strip_zone[0].nb_dev;
 332        unsigned int chunk_sects = mddev->chunk_sectors;
 333
 334        if (is_power_of_2(chunk_sects)) {
 335                int chunksect_bits = ffz(~chunk_sects);
 336                /* find the sector offset inside the chunk */
 337                sect_in_chunk  = sector & (chunk_sects - 1);
 338                sector >>= chunksect_bits;
 339                /* chunk in zone */
 340                chunk = *sector_offset;
 341                /* quotient is the chunk in real device*/
 342                sector_div(chunk, zone->nb_dev << chunksect_bits);
 343        } else{
 344                sect_in_chunk = sector_div(sector, chunk_sects);
 345                chunk = *sector_offset;
 346                sector_div(chunk, chunk_sects * zone->nb_dev);
 347        }
 348        /*
 349        *  position the bio over the real device
 350        *  real sector = chunk in device + starting of zone
 351        *       + the position in the chunk
 352        */
 353        *sector_offset = (chunk * chunk_sects) + sect_in_chunk;
 354        return conf->devlist[(zone - conf->strip_zone)*raid_disks
 355                             + sector_div(sector, zone->nb_dev)];
 356}
 357
 358/**
 359 *      raid0_mergeable_bvec -- tell bio layer if two requests can be merged
 360 *      @q: request queue
 361 *      @bvm: properties of new bio
 362 *      @biovec: the request that could be merged to it.
 363 *
 364 *      Return amount of bytes we can accept at this offset
 365 */
 366static int raid0_mergeable_bvec(struct request_queue *q,
 367                                struct bvec_merge_data *bvm,
 368                                struct bio_vec *biovec)
 369{
 370        struct mddev *mddev = q->queuedata;
 371        struct r0conf *conf = mddev->private;
 372        sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
 373        sector_t sector_offset = sector;
 374        int max;
 375        unsigned int chunk_sectors = mddev->chunk_sectors;
 376        unsigned int bio_sectors = bvm->bi_size >> 9;
 377        struct strip_zone *zone;
 378        struct md_rdev *rdev;
 379        struct request_queue *subq;
 380
 381        if (is_power_of_2(chunk_sectors))
 382                max =  (chunk_sectors - ((sector & (chunk_sectors-1))
 383                                                + bio_sectors)) << 9;
 384        else
 385                max =  (chunk_sectors - (sector_div(sector, chunk_sectors)
 386                                                + bio_sectors)) << 9;
 387        if (max < 0)
 388                max = 0; /* bio_add cannot handle a negative return */
 389        if (max <= biovec->bv_len && bio_sectors == 0)
 390                return biovec->bv_len;
 391        if (max < biovec->bv_len)
 392                /* too small already, no need to check further */
 393                return max;
 394        if (!conf->has_merge_bvec)
 395                return max;
 396
 397        /* May need to check subordinate device */
 398        sector = sector_offset;
 399        zone = find_zone(mddev->private, &sector_offset);
 400        rdev = map_sector(mddev, zone, sector, &sector_offset);
 401        subq = bdev_get_queue(rdev->bdev);
 402        if (subq->merge_bvec_fn) {
 403                bvm->bi_bdev = rdev->bdev;
 404                bvm->bi_sector = sector_offset + zone->dev_start +
 405                        rdev->data_offset;
 406                return min(max, subq->merge_bvec_fn(subq, bvm, biovec));
 407        } else
 408                return max;
 409}
 410
 411static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
 412{
 413        sector_t array_sectors = 0;
 414        struct md_rdev *rdev;
 415
 416        WARN_ONCE(sectors || raid_disks,
 417                  "%s does not support generic reshape\n", __func__);
 418
 419        rdev_for_each(rdev, mddev)
 420                array_sectors += (rdev->sectors &
 421                                  ~(sector_t)(mddev->chunk_sectors-1));
 422
 423        return array_sectors;
 424}
 425
 426static int raid0_stop(struct mddev *mddev);
 427
 428static int raid0_run(struct mddev *mddev)
 429{
 430        struct r0conf *conf;
 431        int ret;
 432
 433        if (mddev->chunk_sectors == 0) {
 434                printk(KERN_ERR "md/raid0:%s: chunk size must be set.\n",
 435                       mdname(mddev));
 436                return -EINVAL;
 437        }
 438        if (md_check_no_bitmap(mddev))
 439                return -EINVAL;
 440        blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
 441        blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
 442        blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
 443
 444        /* if private is not null, we are here after takeover */
 445        if (mddev->private == NULL) {
 446                ret = create_strip_zones(mddev, &conf);
 447                if (ret < 0)
 448                        return ret;
 449                mddev->private = conf;
 450        }
 451        conf = mddev->private;
 452
 453        /* calculate array device size */
 454        md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
 455
 456        printk(KERN_INFO "md/raid0:%s: md_size is %llu sectors.\n",
 457               mdname(mddev),
 458               (unsigned long long)mddev->array_sectors);
 459        /* calculate the max read-ahead size.
 460         * For read-ahead of large files to be effective, we need to
 461         * readahead at least twice a whole stripe. i.e. number of devices
 462         * multiplied by chunk size times 2.
 463         * If an individual device has an ra_pages greater than the
 464         * chunk size, then we will not drive that device as hard as it
 465         * wants.  We consider this a configuration error: a larger
 466         * chunksize should be used in that case.
 467         */
 468        {
 469                int stripe = mddev->raid_disks *
 470                        (mddev->chunk_sectors << 9) / PAGE_SIZE;
 471                if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
 472                        mddev->queue->backing_dev_info.ra_pages = 2* stripe;
 473        }
 474
 475        blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
 476        dump_zones(mddev);
 477
 478        ret = md_integrity_register(mddev);
 479        if (ret)
 480                raid0_stop(mddev);
 481
 482        return ret;
 483}
 484
 485static int raid0_stop(struct mddev *mddev)
 486{
 487        struct r0conf *conf = mddev->private;
 488
 489        blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
 490        kfree(conf->strip_zone);
 491        kfree(conf->devlist);
 492        kfree(conf);
 493        mddev->private = NULL;
 494        return 0;
 495}
 496
 497/*
 498 * Is io distribute over 1 or more chunks ?
 499*/
 500static inline int is_io_in_chunk_boundary(struct mddev *mddev,
 501                        unsigned int chunk_sects, struct bio *bio)
 502{
 503        if (likely(is_power_of_2(chunk_sects))) {
 504                return chunk_sects >= ((bio->bi_sector & (chunk_sects-1))
 505                                        + (bio->bi_size >> 9));
 506        } else{
 507                sector_t sector = bio->bi_sector;
 508                return chunk_sects >= (sector_div(sector, chunk_sects)
 509                                                + (bio->bi_size >> 9));
 510        }
 511}
 512
 513static void raid0_make_request(struct mddev *mddev, struct bio *bio)
 514{
 515        unsigned int chunk_sects;
 516        sector_t sector_offset;
 517        struct strip_zone *zone;
 518        struct md_rdev *tmp_dev;
 519
 520        if (unlikely(bio->bi_rw & REQ_FLUSH)) {
 521                md_flush_request(mddev, bio);
 522                return;
 523        }
 524
 525        chunk_sects = mddev->chunk_sectors;
 526        if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) {
 527                sector_t sector = bio->bi_sector;
 528                struct bio_pair *bp;
 529                /* Sanity check -- queue functions should prevent this happening */
 530                if ((bio->bi_vcnt != 1 && bio->bi_vcnt != 0) ||
 531                    bio->bi_idx != 0)
 532                        goto bad_map;
 533                /* This is a one page bio that upper layers
 534                 * refuse to split for us, so we need to split it.
 535                 */
 536                if (likely(is_power_of_2(chunk_sects)))
 537                        bp = bio_split(bio, chunk_sects - (sector &
 538                                                           (chunk_sects-1)));
 539                else
 540                        bp = bio_split(bio, chunk_sects -
 541                                       sector_div(sector, chunk_sects));
 542                raid0_make_request(mddev, &bp->bio1);
 543                raid0_make_request(mddev, &bp->bio2);
 544                bio_pair_release(bp);
 545                return;
 546        }
 547
 548        sector_offset = bio->bi_sector;
 549        zone = find_zone(mddev->private, &sector_offset);
 550        tmp_dev = map_sector(mddev, zone, bio->bi_sector,
 551                             &sector_offset);
 552        bio->bi_bdev = tmp_dev->bdev;
 553        bio->bi_sector = sector_offset + zone->dev_start +
 554                tmp_dev->data_offset;
 555
 556        if (unlikely((bio->bi_rw & REQ_DISCARD) &&
 557                     !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
 558                /* Just ignore it */
 559                bio_endio(bio, 0);
 560                return;
 561        }
 562
 563        generic_make_request(bio);
 564        return;
 565
 566bad_map:
 567        printk("md/raid0:%s: make_request bug: can't convert block across chunks"
 568               " or bigger than %dk %llu %d\n",
 569               mdname(mddev), chunk_sects / 2,
 570               (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
 571
 572        bio_io_error(bio);
 573        return;
 574}
 575
 576static void raid0_status(struct seq_file *seq, struct mddev *mddev)
 577{
 578        seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
 579        return;
 580}
 581
 582static void *raid0_takeover_raid45(struct mddev *mddev)
 583{
 584        struct md_rdev *rdev;
 585        struct r0conf *priv_conf;
 586
 587        if (mddev->degraded != 1) {
 588                printk(KERN_ERR "md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
 589                       mdname(mddev),
 590                       mddev->degraded);
 591                return ERR_PTR(-EINVAL);
 592        }
 593
 594        rdev_for_each(rdev, mddev) {
 595                /* check slot number for a disk */
 596                if (rdev->raid_disk == mddev->raid_disks-1) {
 597                        printk(KERN_ERR "md/raid0:%s: raid5 must have missing parity disk!\n",
 598                               mdname(mddev));
 599                        return ERR_PTR(-EINVAL);
 600                }
 601        }
 602
 603        /* Set new parameters */
 604        mddev->new_level = 0;
 605        mddev->new_layout = 0;
 606        mddev->new_chunk_sectors = mddev->chunk_sectors;
 607        mddev->raid_disks--;
 608        mddev->delta_disks = -1;
 609        /* make sure it will be not marked as dirty */
 610        mddev->recovery_cp = MaxSector;
 611
 612        create_strip_zones(mddev, &priv_conf);
 613        return priv_conf;
 614}
 615
 616static void *raid0_takeover_raid10(struct mddev *mddev)
 617{
 618        struct r0conf *priv_conf;
 619
 620        /* Check layout:
 621         *  - far_copies must be 1
 622         *  - near_copies must be 2
 623         *  - disks number must be even
 624         *  - all mirrors must be already degraded
 625         */
 626        if (mddev->layout != ((1 << 8) + 2)) {
 627                printk(KERN_ERR "md/raid0:%s:: Raid0 cannot takover layout: 0x%x\n",
 628                       mdname(mddev),
 629                       mddev->layout);
 630                return ERR_PTR(-EINVAL);
 631        }
 632        if (mddev->raid_disks & 1) {
 633                printk(KERN_ERR "md/raid0:%s: Raid0 cannot takover Raid10 with odd disk number.\n",
 634                       mdname(mddev));
 635                return ERR_PTR(-EINVAL);
 636        }
 637        if (mddev->degraded != (mddev->raid_disks>>1)) {
 638                printk(KERN_ERR "md/raid0:%s: All mirrors must be already degraded!\n",
 639                       mdname(mddev));
 640                return ERR_PTR(-EINVAL);
 641        }
 642
 643        /* Set new parameters */
 644        mddev->new_level = 0;
 645        mddev->new_layout = 0;
 646        mddev->new_chunk_sectors = mddev->chunk_sectors;
 647        mddev->delta_disks = - mddev->raid_disks / 2;
 648        mddev->raid_disks += mddev->delta_disks;
 649        mddev->degraded = 0;
 650        /* make sure it will be not marked as dirty */
 651        mddev->recovery_cp = MaxSector;
 652
 653        create_strip_zones(mddev, &priv_conf);
 654        return priv_conf;
 655}
 656
 657static void *raid0_takeover_raid1(struct mddev *mddev)
 658{
 659        struct r0conf *priv_conf;
 660        int chunksect;
 661
 662        /* Check layout:
 663         *  - (N - 1) mirror drives must be already faulty
 664         */
 665        if ((mddev->raid_disks - 1) != mddev->degraded) {
 666                printk(KERN_ERR "md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
 667                       mdname(mddev));
 668                return ERR_PTR(-EINVAL);
 669        }
 670
 671        /*
 672         * a raid1 doesn't have the notion of chunk size, so
 673         * figure out the largest suitable size we can use.
 674         */
 675        chunksect = 64 * 2; /* 64K by default */
 676
 677        /* The array must be an exact multiple of chunksize */
 678        while (chunksect && (mddev->array_sectors & (chunksect - 1)))
 679                chunksect >>= 1;
 680
 681        if ((chunksect << 9) < PAGE_SIZE)
 682                /* array size does not allow a suitable chunk size */
 683                return ERR_PTR(-EINVAL);
 684
 685        /* Set new parameters */
 686        mddev->new_level = 0;
 687        mddev->new_layout = 0;
 688        mddev->new_chunk_sectors = chunksect;
 689        mddev->chunk_sectors = chunksect;
 690        mddev->delta_disks = 1 - mddev->raid_disks;
 691        mddev->raid_disks = 1;
 692        /* make sure it will be not marked as dirty */
 693        mddev->recovery_cp = MaxSector;
 694
 695        create_strip_zones(mddev, &priv_conf);
 696        return priv_conf;
 697}
 698
 699static void *raid0_takeover(struct mddev *mddev)
 700{
 701        /* raid0 can take over:
 702         *  raid4 - if all data disks are active.
 703         *  raid5 - providing it is Raid4 layout and one disk is faulty
 704         *  raid10 - assuming we have all necessary active disks
 705         *  raid1 - with (N -1) mirror drives faulty
 706         */
 707        if (mddev->level == 4)
 708                return raid0_takeover_raid45(mddev);
 709
 710        if (mddev->level == 5) {
 711                if (mddev->layout == ALGORITHM_PARITY_N)
 712                        return raid0_takeover_raid45(mddev);
 713
 714                printk(KERN_ERR "md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
 715                       mdname(mddev), ALGORITHM_PARITY_N);
 716        }
 717
 718        if (mddev->level == 10)
 719                return raid0_takeover_raid10(mddev);
 720
 721        if (mddev->level == 1)
 722                return raid0_takeover_raid1(mddev);
 723
 724        printk(KERN_ERR "Takeover from raid%i to raid0 not supported\n",
 725                mddev->level);
 726
 727        return ERR_PTR(-EINVAL);
 728}
 729
 730static void raid0_quiesce(struct mddev *mddev, int state)
 731{
 732}
 733
 734static struct md_personality raid0_personality=
 735{
 736        .name           = "raid0",
 737        .level          = 0,
 738        .owner          = THIS_MODULE,
 739        .make_request   = raid0_make_request,
 740        .run            = raid0_run,
 741        .stop           = raid0_stop,
 742        .status         = raid0_status,
 743        .size           = raid0_size,
 744        .takeover       = raid0_takeover,
 745        .quiesce        = raid0_quiesce,
 746};
 747
 748static int __init raid0_init (void)
 749{
 750        return register_md_personality (&raid0_personality);
 751}
 752
 753static void raid0_exit (void)
 754{
 755        unregister_md_personality (&raid0_personality);
 756}
 757
 758module_init(raid0_init);
 759module_exit(raid0_exit);
 760MODULE_LICENSE("GPL");
 761MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
 762MODULE_ALIAS("md-personality-2"); /* RAID0 */
 763MODULE_ALIAS("md-raid0");
 764MODULE_ALIAS("md-level-0");
 765
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.