linux/drivers/md/raid0.c
<<
>>
Prefs
   1/*
   2   raid0.c : Multiple Devices driver for Linux
   3             Copyright (C) 1994-96 Marc ZYNGIER
   4             <zyngier@ufr-info-p7.ibp.fr> or
   5             <maz@gloups.fdn.fr>
   6             Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
   7
   8
   9   RAID-0 management functions.
  10
  11   This program is free software; you can redistribute it and/or modify
  12   it under the terms of the GNU General Public License as published by
  13   the Free Software Foundation; either version 2, or (at your option)
  14   any later version.
  15   
  16   You should have received a copy of the GNU General Public License
  17   (for example /usr/src/linux/COPYING); if not, write to the Free
  18   Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.  
  19*/
  20
  21#include <linux/blkdev.h>
  22#include <linux/seq_file.h>
  23#include <linux/module.h>
  24#include <linux/slab.h>
  25#include "md.h"
  26#include "raid0.h"
  27#include "raid5.h"
  28
  29static int raid0_congested(void *data, int bits)
  30{
  31        struct mddev *mddev = data;
  32        struct r0conf *conf = mddev->private;
  33        struct md_rdev **devlist = conf->devlist;
  34        int raid_disks = conf->strip_zone[0].nb_dev;
  35        int i, ret = 0;
  36
  37        if (mddev_congested(mddev, bits))
  38                return 1;
  39
  40        for (i = 0; i < raid_disks && !ret ; i++) {
  41                struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
  42
  43                ret |= bdi_congested(&q->backing_dev_info, bits);
  44        }
  45        return ret;
  46}
  47
  48/*
  49 * inform the user of the raid configuration
  50*/
  51static void dump_zones(struct mddev *mddev)
  52{
  53        int j, k;
  54        sector_t zone_size = 0;
  55        sector_t zone_start = 0;
  56        char b[BDEVNAME_SIZE];
  57        struct r0conf *conf = mddev->private;
  58        int raid_disks = conf->strip_zone[0].nb_dev;
  59        printk(KERN_INFO "md: RAID0 configuration for %s - %d zone%s\n",
  60               mdname(mddev),
  61               conf->nr_strip_zones, conf->nr_strip_zones==1?"":"s");
  62        for (j = 0; j < conf->nr_strip_zones; j++) {
  63                printk(KERN_INFO "md: zone%d=[", j);
  64                for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
  65                        printk(KERN_CONT "%s%s", k?"/":"",
  66                        bdevname(conf->devlist[j*raid_disks
  67                                                + k]->bdev, b));
  68                printk(KERN_CONT "]\n");
  69
  70                zone_size  = conf->strip_zone[j].zone_end - zone_start;
  71                printk(KERN_INFO "      zone-offset=%10lluKB, "
  72                                "device-offset=%10lluKB, size=%10lluKB\n",
  73                        (unsigned long long)zone_start>>1,
  74                        (unsigned long long)conf->strip_zone[j].dev_start>>1,
  75                        (unsigned long long)zone_size>>1);
  76                zone_start = conf->strip_zone[j].zone_end;
  77        }
  78        printk(KERN_INFO "\n");
  79}
  80
  81static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
  82{
  83        int i, c, err;
  84        sector_t curr_zone_end, sectors;
  85        struct md_rdev *smallest, *rdev1, *rdev2, *rdev, **dev;
  86        struct strip_zone *zone;
  87        int cnt;
  88        char b[BDEVNAME_SIZE];
  89        char b2[BDEVNAME_SIZE];
  90        struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
  91
  92        if (!conf)
  93                return -ENOMEM;
  94        rdev_for_each(rdev1, mddev) {
  95                pr_debug("md/raid0:%s: looking at %s\n",
  96                         mdname(mddev),
  97                         bdevname(rdev1->bdev, b));
  98                c = 0;
  99
 100                /* round size to chunk_size */
 101                sectors = rdev1->sectors;
 102                sector_div(sectors, mddev->chunk_sectors);
 103                rdev1->sectors = sectors * mddev->chunk_sectors;
 104
 105                rdev_for_each(rdev2, mddev) {
 106                        pr_debug("md/raid0:%s:   comparing %s(%llu)"
 107                                 " with %s(%llu)\n",
 108                                 mdname(mddev),
 109                                 bdevname(rdev1->bdev,b),
 110                                 (unsigned long long)rdev1->sectors,
 111                                 bdevname(rdev2->bdev,b2),
 112                                 (unsigned long long)rdev2->sectors);
 113                        if (rdev2 == rdev1) {
 114                                pr_debug("md/raid0:%s:   END\n",
 115                                         mdname(mddev));
 116                                break;
 117                        }
 118                        if (rdev2->sectors == rdev1->sectors) {
 119                                /*
 120                                 * Not unique, don't count it as a new
 121                                 * group
 122                                 */
 123                                pr_debug("md/raid0:%s:   EQUAL\n",
 124                                         mdname(mddev));
 125                                c = 1;
 126                                break;
 127                        }
 128                        pr_debug("md/raid0:%s:   NOT EQUAL\n",
 129                                 mdname(mddev));
 130                }
 131                if (!c) {
 132                        pr_debug("md/raid0:%s:   ==> UNIQUE\n",
 133                                 mdname(mddev));
 134                        conf->nr_strip_zones++;
 135                        pr_debug("md/raid0:%s: %d zones\n",
 136                                 mdname(mddev), conf->nr_strip_zones);
 137                }
 138        }
 139        pr_debug("md/raid0:%s: FINAL %d zones\n",
 140                 mdname(mddev), conf->nr_strip_zones);
 141        err = -ENOMEM;
 142        conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
 143                                conf->nr_strip_zones, GFP_KERNEL);
 144        if (!conf->strip_zone)
 145                goto abort;
 146        conf->devlist = kzalloc(sizeof(struct md_rdev*)*
 147                                conf->nr_strip_zones*mddev->raid_disks,
 148                                GFP_KERNEL);
 149        if (!conf->devlist)
 150                goto abort;
 151
 152        /* The first zone must contain all devices, so here we check that
 153         * there is a proper alignment of slots to devices and find them all
 154         */
 155        zone = &conf->strip_zone[0];
 156        cnt = 0;
 157        smallest = NULL;
 158        dev = conf->devlist;
 159        err = -EINVAL;
 160        rdev_for_each(rdev1, mddev) {
 161                int j = rdev1->raid_disk;
 162
 163                if (mddev->level == 10) {
 164                        /* taking over a raid10-n2 array */
 165                        j /= 2;
 166                        rdev1->new_raid_disk = j;
 167                }
 168
 169                if (mddev->level == 1) {
 170                        /* taiking over a raid1 array-
 171                         * we have only one active disk
 172                         */
 173                        j = 0;
 174                        rdev1->new_raid_disk = j;
 175                }
 176
 177                if (j < 0 || j >= mddev->raid_disks) {
 178                        printk(KERN_ERR "md/raid0:%s: bad disk number %d - "
 179                               "aborting!\n", mdname(mddev), j);
 180                        goto abort;
 181                }
 182                if (dev[j]) {
 183                        printk(KERN_ERR "md/raid0:%s: multiple devices for %d - "
 184                               "aborting!\n", mdname(mddev), j);
 185                        goto abort;
 186                }
 187                dev[j] = rdev1;
 188
 189                disk_stack_limits(mddev->gendisk, rdev1->bdev,
 190                                  rdev1->data_offset << 9);
 191
 192                if (rdev1->bdev->bd_disk->queue->merge_bvec_fn)
 193                        conf->has_merge_bvec = 1;
 194
 195                if (!smallest || (rdev1->sectors < smallest->sectors))
 196                        smallest = rdev1;
 197                cnt++;
 198        }
 199        if (cnt != mddev->raid_disks) {
 200                printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - "
 201                       "aborting!\n", mdname(mddev), cnt, mddev->raid_disks);
 202                goto abort;
 203        }
 204        zone->nb_dev = cnt;
 205        zone->zone_end = smallest->sectors * cnt;
 206
 207        curr_zone_end = zone->zone_end;
 208
 209        /* now do the other zones */
 210        for (i = 1; i < conf->nr_strip_zones; i++)
 211        {
 212                int j;
 213
 214                zone = conf->strip_zone + i;
 215                dev = conf->devlist + i * mddev->raid_disks;
 216
 217                pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
 218                zone->dev_start = smallest->sectors;
 219                smallest = NULL;
 220                c = 0;
 221
 222                for (j=0; j<cnt; j++) {
 223                        rdev = conf->devlist[j];
 224                        if (rdev->sectors <= zone->dev_start) {
 225                                pr_debug("md/raid0:%s: checking %s ... nope\n",
 226                                         mdname(mddev),
 227                                         bdevname(rdev->bdev, b));
 228                                continue;
 229                        }
 230                        pr_debug("md/raid0:%s: checking %s ..."
 231                                 " contained as device %d\n",
 232                                 mdname(mddev),
 233                                 bdevname(rdev->bdev, b), c);
 234                        dev[c] = rdev;
 235                        c++;
 236                        if (!smallest || rdev->sectors < smallest->sectors) {
 237                                smallest = rdev;
 238                                pr_debug("md/raid0:%s:  (%llu) is smallest!.\n",
 239                                         mdname(mddev),
 240                                         (unsigned long long)rdev->sectors);
 241                        }
 242                }
 243
 244                zone->nb_dev = c;
 245                sectors = (smallest->sectors - zone->dev_start) * c;
 246                pr_debug("md/raid0:%s: zone->nb_dev: %d, sectors: %llu\n",
 247                         mdname(mddev),
 248                         zone->nb_dev, (unsigned long long)sectors);
 249
 250                curr_zone_end += sectors;
 251                zone->zone_end = curr_zone_end;
 252
 253                pr_debug("md/raid0:%s: current zone start: %llu\n",
 254                         mdname(mddev),
 255                         (unsigned long long)smallest->sectors);
 256        }
 257        mddev->queue->backing_dev_info.congested_fn = raid0_congested;
 258        mddev->queue->backing_dev_info.congested_data = mddev;
 259
 260        /*
 261         * now since we have the hard sector sizes, we can make sure
 262         * chunk size is a multiple of that sector size
 263         */
 264        if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) {
 265                printk(KERN_ERR "md/raid0:%s: chunk_size of %d not valid\n",
 266                       mdname(mddev),
 267                       mddev->chunk_sectors << 9);
 268                goto abort;
 269        }
 270
 271        blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
 272        blk_queue_io_opt(mddev->queue,
 273                         (mddev->chunk_sectors << 9) * mddev->raid_disks);
 274
 275        pr_debug("md/raid0:%s: done.\n", mdname(mddev));
 276        *private_conf = conf;
 277
 278        return 0;
 279abort:
 280        kfree(conf->strip_zone);
 281        kfree(conf->devlist);
 282        kfree(conf);
 283        *private_conf = NULL;
 284        return err;
 285}
 286
 287/* Find the zone which holds a particular offset
 288 * Update *sectorp to be an offset in that zone
 289 */
 290static struct strip_zone *find_zone(struct r0conf *conf,
 291                                    sector_t *sectorp)
 292{
 293        int i;
 294        struct strip_zone *z = conf->strip_zone;
 295        sector_t sector = *sectorp;
 296
 297        for (i = 0; i < conf->nr_strip_zones; i++)
 298                if (sector < z[i].zone_end) {
 299                        if (i)
 300                                *sectorp = sector - z[i-1].zone_end;
 301                        return z + i;
 302                }
 303        BUG();
 304}
 305
 306/*
 307 * remaps the bio to the target device. we separate two flows.
 308 * power 2 flow and a general flow for the sake of perfromance
 309*/
 310static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
 311                                sector_t sector, sector_t *sector_offset)
 312{
 313        unsigned int sect_in_chunk;
 314        sector_t chunk;
 315        struct r0conf *conf = mddev->private;
 316        int raid_disks = conf->strip_zone[0].nb_dev;
 317        unsigned int chunk_sects = mddev->chunk_sectors;
 318
 319        if (is_power_of_2(chunk_sects)) {
 320                int chunksect_bits = ffz(~chunk_sects);
 321                /* find the sector offset inside the chunk */
 322                sect_in_chunk  = sector & (chunk_sects - 1);
 323                sector >>= chunksect_bits;
 324                /* chunk in zone */
 325                chunk = *sector_offset;
 326                /* quotient is the chunk in real device*/
 327                sector_div(chunk, zone->nb_dev << chunksect_bits);
 328        } else{
 329                sect_in_chunk = sector_div(sector, chunk_sects);
 330                chunk = *sector_offset;
 331                sector_div(chunk, chunk_sects * zone->nb_dev);
 332        }
 333        /*
 334        *  position the bio over the real device
 335        *  real sector = chunk in device + starting of zone
 336        *       + the position in the chunk
 337        */
 338        *sector_offset = (chunk * chunk_sects) + sect_in_chunk;
 339        return conf->devlist[(zone - conf->strip_zone)*raid_disks
 340                             + sector_div(sector, zone->nb_dev)];
 341}
 342
 343/**
 344 *      raid0_mergeable_bvec -- tell bio layer if two requests can be merged
 345 *      @q: request queue
 346 *      @bvm: properties of new bio
 347 *      @biovec: the request that could be merged to it.
 348 *
 349 *      Return amount of bytes we can accept at this offset
 350 */
 351static int raid0_mergeable_bvec(struct request_queue *q,
 352                                struct bvec_merge_data *bvm,
 353                                struct bio_vec *biovec)
 354{
 355        struct mddev *mddev = q->queuedata;
 356        struct r0conf *conf = mddev->private;
 357        sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
 358        sector_t sector_offset = sector;
 359        int max;
 360        unsigned int chunk_sectors = mddev->chunk_sectors;
 361        unsigned int bio_sectors = bvm->bi_size >> 9;
 362        struct strip_zone *zone;
 363        struct md_rdev *rdev;
 364        struct request_queue *subq;
 365
 366        if (is_power_of_2(chunk_sectors))
 367                max =  (chunk_sectors - ((sector & (chunk_sectors-1))
 368                                                + bio_sectors)) << 9;
 369        else
 370                max =  (chunk_sectors - (sector_div(sector, chunk_sectors)
 371                                                + bio_sectors)) << 9;
 372        if (max < 0)
 373                max = 0; /* bio_add cannot handle a negative return */
 374        if (max <= biovec->bv_len && bio_sectors == 0)
 375                return biovec->bv_len;
 376        if (max < biovec->bv_len)
 377                /* too small already, no need to check further */
 378                return max;
 379        if (!conf->has_merge_bvec)
 380                return max;
 381
 382        /* May need to check subordinate device */
 383        sector = sector_offset;
 384        zone = find_zone(mddev->private, &sector_offset);
 385        rdev = map_sector(mddev, zone, sector, &sector_offset);
 386        subq = bdev_get_queue(rdev->bdev);
 387        if (subq->merge_bvec_fn) {
 388                bvm->bi_bdev = rdev->bdev;
 389                bvm->bi_sector = sector_offset + zone->dev_start +
 390                        rdev->data_offset;
 391                return min(max, subq->merge_bvec_fn(subq, bvm, biovec));
 392        } else
 393                return max;
 394}
 395
 396static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
 397{
 398        sector_t array_sectors = 0;
 399        struct md_rdev *rdev;
 400
 401        WARN_ONCE(sectors || raid_disks,
 402                  "%s does not support generic reshape\n", __func__);
 403
 404        rdev_for_each(rdev, mddev)
 405                array_sectors += rdev->sectors;
 406
 407        return array_sectors;
 408}
 409
 410static int raid0_stop(struct mddev *mddev);
 411
 412static int raid0_run(struct mddev *mddev)
 413{
 414        struct r0conf *conf;
 415        int ret;
 416
 417        if (mddev->chunk_sectors == 0) {
 418                printk(KERN_ERR "md/raid0:%s: chunk size must be set.\n",
 419                       mdname(mddev));
 420                return -EINVAL;
 421        }
 422        if (md_check_no_bitmap(mddev))
 423                return -EINVAL;
 424        blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
 425
 426        /* if private is not null, we are here after takeover */
 427        if (mddev->private == NULL) {
 428                ret = create_strip_zones(mddev, &conf);
 429                if (ret < 0)
 430                        return ret;
 431                mddev->private = conf;
 432        }
 433        conf = mddev->private;
 434
 435        /* calculate array device size */
 436        md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
 437
 438        printk(KERN_INFO "md/raid0:%s: md_size is %llu sectors.\n",
 439               mdname(mddev),
 440               (unsigned long long)mddev->array_sectors);
 441        /* calculate the max read-ahead size.
 442         * For read-ahead of large files to be effective, we need to
 443         * readahead at least twice a whole stripe. i.e. number of devices
 444         * multiplied by chunk size times 2.
 445         * If an individual device has an ra_pages greater than the
 446         * chunk size, then we will not drive that device as hard as it
 447         * wants.  We consider this a configuration error: a larger
 448         * chunksize should be used in that case.
 449         */
 450        {
 451                int stripe = mddev->raid_disks *
 452                        (mddev->chunk_sectors << 9) / PAGE_SIZE;
 453                if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
 454                        mddev->queue->backing_dev_info.ra_pages = 2* stripe;
 455        }
 456
 457        blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
 458        dump_zones(mddev);
 459
 460        ret = md_integrity_register(mddev);
 461        if (ret)
 462                raid0_stop(mddev);
 463
 464        return ret;
 465}
 466
 467static int raid0_stop(struct mddev *mddev)
 468{
 469        struct r0conf *conf = mddev->private;
 470
 471        blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
 472        kfree(conf->strip_zone);
 473        kfree(conf->devlist);
 474        kfree(conf);
 475        mddev->private = NULL;
 476        return 0;
 477}
 478
 479/*
 480 * Is io distribute over 1 or more chunks ?
 481*/
 482static inline int is_io_in_chunk_boundary(struct mddev *mddev,
 483                        unsigned int chunk_sects, struct bio *bio)
 484{
 485        if (likely(is_power_of_2(chunk_sects))) {
 486                return chunk_sects >= ((bio->bi_sector & (chunk_sects-1))
 487                                        + (bio->bi_size >> 9));
 488        } else{
 489                sector_t sector = bio->bi_sector;
 490                return chunk_sects >= (sector_div(sector, chunk_sects)
 491                                                + (bio->bi_size >> 9));
 492        }
 493}
 494
 495static void raid0_make_request(struct mddev *mddev, struct bio *bio)
 496{
 497        unsigned int chunk_sects;
 498        sector_t sector_offset;
 499        struct strip_zone *zone;
 500        struct md_rdev *tmp_dev;
 501
 502        if (unlikely(bio->bi_rw & REQ_FLUSH)) {
 503                md_flush_request(mddev, bio);
 504                return;
 505        }
 506
 507        chunk_sects = mddev->chunk_sectors;
 508        if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) {
 509                sector_t sector = bio->bi_sector;
 510                struct bio_pair *bp;
 511                /* Sanity check -- queue functions should prevent this happening */
 512                if (bio->bi_vcnt != 1 ||
 513                    bio->bi_idx != 0)
 514                        goto bad_map;
 515                /* This is a one page bio that upper layers
 516                 * refuse to split for us, so we need to split it.
 517                 */
 518                if (likely(is_power_of_2(chunk_sects)))
 519                        bp = bio_split(bio, chunk_sects - (sector &
 520                                                           (chunk_sects-1)));
 521                else
 522                        bp = bio_split(bio, chunk_sects -
 523                                       sector_div(sector, chunk_sects));
 524                raid0_make_request(mddev, &bp->bio1);
 525                raid0_make_request(mddev, &bp->bio2);
 526                bio_pair_release(bp);
 527                return;
 528        }
 529
 530        sector_offset = bio->bi_sector;
 531        zone = find_zone(mddev->private, &sector_offset);
 532        tmp_dev = map_sector(mddev, zone, bio->bi_sector,
 533                             &sector_offset);
 534        bio->bi_bdev = tmp_dev->bdev;
 535        bio->bi_sector = sector_offset + zone->dev_start +
 536                tmp_dev->data_offset;
 537
 538        generic_make_request(bio);
 539        return;
 540
 541bad_map:
 542        printk("md/raid0:%s: make_request bug: can't convert block across chunks"
 543               " or bigger than %dk %llu %d\n",
 544               mdname(mddev), chunk_sects / 2,
 545               (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
 546
 547        bio_io_error(bio);
 548        return;
 549}
 550
 551static void raid0_status(struct seq_file *seq, struct mddev *mddev)
 552{
 553        seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
 554        return;
 555}
 556
 557static void *raid0_takeover_raid45(struct mddev *mddev)
 558{
 559        struct md_rdev *rdev;
 560        struct r0conf *priv_conf;
 561
 562        if (mddev->degraded != 1) {
 563                printk(KERN_ERR "md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
 564                       mdname(mddev),
 565                       mddev->degraded);
 566                return ERR_PTR(-EINVAL);
 567        }
 568
 569        rdev_for_each(rdev, mddev) {
 570                /* check slot number for a disk */
 571                if (rdev->raid_disk == mddev->raid_disks-1) {
 572                        printk(KERN_ERR "md/raid0:%s: raid5 must have missing parity disk!\n",
 573                               mdname(mddev));
 574                        return ERR_PTR(-EINVAL);
 575                }
 576        }
 577
 578        /* Set new parameters */
 579        mddev->new_level = 0;
 580        mddev->new_layout = 0;
 581        mddev->new_chunk_sectors = mddev->chunk_sectors;
 582        mddev->raid_disks--;
 583        mddev->delta_disks = -1;
 584        /* make sure it will be not marked as dirty */
 585        mddev->recovery_cp = MaxSector;
 586
 587        create_strip_zones(mddev, &priv_conf);
 588        return priv_conf;
 589}
 590
 591static void *raid0_takeover_raid10(struct mddev *mddev)
 592{
 593        struct r0conf *priv_conf;
 594
 595        /* Check layout:
 596         *  - far_copies must be 1
 597         *  - near_copies must be 2
 598         *  - disks number must be even
 599         *  - all mirrors must be already degraded
 600         */
 601        if (mddev->layout != ((1 << 8) + 2)) {
 602                printk(KERN_ERR "md/raid0:%s:: Raid0 cannot takover layout: 0x%x\n",
 603                       mdname(mddev),
 604                       mddev->layout);
 605                return ERR_PTR(-EINVAL);
 606        }
 607        if (mddev->raid_disks & 1) {
 608                printk(KERN_ERR "md/raid0:%s: Raid0 cannot takover Raid10 with odd disk number.\n",
 609                       mdname(mddev));
 610                return ERR_PTR(-EINVAL);
 611        }
 612        if (mddev->degraded != (mddev->raid_disks>>1)) {
 613                printk(KERN_ERR "md/raid0:%s: All mirrors must be already degraded!\n",
 614                       mdname(mddev));
 615                return ERR_PTR(-EINVAL);
 616        }
 617
 618        /* Set new parameters */
 619        mddev->new_level = 0;
 620        mddev->new_layout = 0;
 621        mddev->new_chunk_sectors = mddev->chunk_sectors;
 622        mddev->delta_disks = - mddev->raid_disks / 2;
 623        mddev->raid_disks += mddev->delta_disks;
 624        mddev->degraded = 0;
 625        /* make sure it will be not marked as dirty */
 626        mddev->recovery_cp = MaxSector;
 627
 628        create_strip_zones(mddev, &priv_conf);
 629        return priv_conf;
 630}
 631
 632static void *raid0_takeover_raid1(struct mddev *mddev)
 633{
 634        struct r0conf *priv_conf;
 635        int chunksect;
 636
 637        /* Check layout:
 638         *  - (N - 1) mirror drives must be already faulty
 639         */
 640        if ((mddev->raid_disks - 1) != mddev->degraded) {
 641                printk(KERN_ERR "md/raid0:%s: (N - 1) mirrors drives must be already faulty!\n",
 642                       mdname(mddev));
 643                return ERR_PTR(-EINVAL);
 644        }
 645
 646        /*
 647         * a raid1 doesn't have the notion of chunk size, so
 648         * figure out the largest suitable size we can use.
 649         */
 650        chunksect = 64 * 2; /* 64K by default */
 651
 652        /* The array must be an exact multiple of chunksize */
 653        while (chunksect && (mddev->array_sectors & (chunksect - 1)))
 654                chunksect >>= 1;
 655
 656        if ((chunksect << 9) < PAGE_SIZE)
 657                /* array size does not allow a suitable chunk size */
 658                return ERR_PTR(-EINVAL);
 659
 660        /* Set new parameters */
 661        mddev->new_level = 0;
 662        mddev->new_layout = 0;
 663        mddev->new_chunk_sectors = chunksect;
 664        mddev->chunk_sectors = chunksect;
 665        mddev->delta_disks = 1 - mddev->raid_disks;
 666        mddev->raid_disks = 1;
 667        /* make sure it will be not marked as dirty */
 668        mddev->recovery_cp = MaxSector;
 669
 670        create_strip_zones(mddev, &priv_conf);
 671        return priv_conf;
 672}
 673
 674static void *raid0_takeover(struct mddev *mddev)
 675{
 676        /* raid0 can take over:
 677         *  raid4 - if all data disks are active.
 678         *  raid5 - providing it is Raid4 layout and one disk is faulty
 679         *  raid10 - assuming we have all necessary active disks
 680         *  raid1 - with (N -1) mirror drives faulty
 681         */
 682        if (mddev->level == 4)
 683                return raid0_takeover_raid45(mddev);
 684
 685        if (mddev->level == 5) {
 686                if (mddev->layout == ALGORITHM_PARITY_N)
 687                        return raid0_takeover_raid45(mddev);
 688
 689                printk(KERN_ERR "md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
 690                       mdname(mddev), ALGORITHM_PARITY_N);
 691        }
 692
 693        if (mddev->level == 10)
 694                return raid0_takeover_raid10(mddev);
 695
 696        if (mddev->level == 1)
 697                return raid0_takeover_raid1(mddev);
 698
 699        printk(KERN_ERR "Takeover from raid%i to raid0 not supported\n",
 700                mddev->level);
 701
 702        return ERR_PTR(-EINVAL);
 703}
 704
 705static void raid0_quiesce(struct mddev *mddev, int state)
 706{
 707}
 708
 709static struct md_personality raid0_personality=
 710{
 711        .name           = "raid0",
 712        .level          = 0,
 713        .owner          = THIS_MODULE,
 714        .make_request   = raid0_make_request,
 715        .run            = raid0_run,
 716        .stop           = raid0_stop,
 717        .status         = raid0_status,
 718        .size           = raid0_size,
 719        .takeover       = raid0_takeover,
 720        .quiesce        = raid0_quiesce,
 721};
 722
 723static int __init raid0_init (void)
 724{
 725        return register_md_personality (&raid0_personality);
 726}
 727
 728static void raid0_exit (void)
 729{
 730        unregister_md_personality (&raid0_personality);
 731}
 732
 733module_init(raid0_init);
 734module_exit(raid0_exit);
 735MODULE_LICENSE("GPL");
 736MODULE_DESCRIPTION("RAID0 (striping) personality for MD");
 737MODULE_ALIAS("md-personality-2"); /* RAID0 */
 738MODULE_ALIAS("md-raid0");
 739MODULE_ALIAS("md-level-0");
 740
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.