1
2
3
4
5
6
7
8
9
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/sched.h>
15#include <linux/types.h>
16
17#include <linux/mtd/mtd.h>
18#include <linux/mtd/concat.h>
19
20#include <asm/div64.h>
21
22
23
24
25
26
27
28struct mtd_concat {
29 struct mtd_info mtd;
30 int num_subdev;
31 struct mtd_info **subdev;
32};
33
34
35
36
37
38#define SIZEOF_STRUCT_MTD_CONCAT(num_subdev) \
39 ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
40
41
42
43
44
45#define CONCAT(x) ((struct mtd_concat *)(x))
46
47
48
49
50
51
52static int
53concat_read(struct mtd_info *mtd, loff_t from, size_t len,
54 size_t * retlen, u_char * buf)
55{
56 struct mtd_concat *concat = CONCAT(mtd);
57 int ret = 0, err;
58 int i;
59
60 *retlen = 0;
61
62 for (i = 0; i < concat->num_subdev; i++) {
63 struct mtd_info *subdev = concat->subdev[i];
64 size_t size, retsize;
65
66 if (from >= subdev->size) {
67
68 size = 0;
69 from -= subdev->size;
70 continue;
71 }
72 if (from + len > subdev->size)
73
74 size = subdev->size - from;
75 else
76
77 size = len;
78
79 err = subdev->read(subdev, from, size, &retsize, buf);
80
81
82 if (unlikely(err)) {
83 if (err == -EBADMSG) {
84 mtd->ecc_stats.failed++;
85 ret = err;
86 } else if (err == -EUCLEAN) {
87 mtd->ecc_stats.corrected++;
88
89 if (!ret)
90 ret = err;
91 } else
92 return err;
93 }
94
95 *retlen += retsize;
96 len -= size;
97 if (len == 0)
98 return ret;
99
100 buf += size;
101 from = 0;
102 }
103 return -EINVAL;
104}
105
106static int
107concat_write(struct mtd_info *mtd, loff_t to, size_t len,
108 size_t * retlen, const u_char * buf)
109{
110 struct mtd_concat *concat = CONCAT(mtd);
111 int err = -EINVAL;
112 int i;
113
114 if (!(mtd->flags & MTD_WRITEABLE))
115 return -EROFS;
116
117 *retlen = 0;
118
119 for (i = 0; i < concat->num_subdev; i++) {
120 struct mtd_info *subdev = concat->subdev[i];
121 size_t size, retsize;
122
123 if (to >= subdev->size) {
124 size = 0;
125 to -= subdev->size;
126 continue;
127 }
128 if (to + len > subdev->size)
129 size = subdev->size - to;
130 else
131 size = len;
132
133 if (!(subdev->flags & MTD_WRITEABLE))
134 err = -EROFS;
135 else
136 err = subdev->write(subdev, to, size, &retsize, buf);
137
138 if (err)
139 break;
140
141 *retlen += retsize;
142 len -= size;
143 if (len == 0)
144 break;
145
146 err = -EINVAL;
147 buf += size;
148 to = 0;
149 }
150 return err;
151}
152
153static int
154concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
155 unsigned long count, loff_t to, size_t * retlen)
156{
157 struct mtd_concat *concat = CONCAT(mtd);
158 struct kvec *vecs_copy;
159 unsigned long entry_low, entry_high;
160 size_t total_len = 0;
161 int i;
162 int err = -EINVAL;
163
164 if (!(mtd->flags & MTD_WRITEABLE))
165 return -EROFS;
166
167 *retlen = 0;
168
169
170 for (i = 0; i < count; i++)
171 total_len += vecs[i].iov_len;
172
173
174 if ((to + total_len) > mtd->size)
175 return -EINVAL;
176
177
178 if (mtd->writesize > 1) {
179 uint64_t __to = to;
180 if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize))
181 return -EINVAL;
182 }
183
184
185 vecs_copy = kmalloc(sizeof(struct kvec) * count, GFP_KERNEL);
186 if (!vecs_copy)
187 return -ENOMEM;
188 memcpy(vecs_copy, vecs, sizeof(struct kvec) * count);
189
190 entry_low = 0;
191 for (i = 0; i < concat->num_subdev; i++) {
192 struct mtd_info *subdev = concat->subdev[i];
193 size_t size, wsize, retsize, old_iov_len;
194
195 if (to >= subdev->size) {
196 to -= subdev->size;
197 continue;
198 }
199
200 size = min(total_len, (size_t)(subdev->size - to));
201 wsize = size;
202
203 entry_high = entry_low;
204 while (entry_high < count) {
205 if (size <= vecs_copy[entry_high].iov_len)
206 break;
207 size -= vecs_copy[entry_high++].iov_len;
208 }
209
210 old_iov_len = vecs_copy[entry_high].iov_len;
211 vecs_copy[entry_high].iov_len = size;
212
213 if (!(subdev->flags & MTD_WRITEABLE))
214 err = -EROFS;
215 else
216 err = subdev->writev(subdev, &vecs_copy[entry_low],
217 entry_high - entry_low + 1, to, &retsize);
218
219 vecs_copy[entry_high].iov_len = old_iov_len - size;
220 vecs_copy[entry_high].iov_base += size;
221
222 entry_low = entry_high;
223
224 if (err)
225 break;
226
227 *retlen += retsize;
228 total_len -= wsize;
229
230 if (total_len == 0)
231 break;
232
233 err = -EINVAL;
234 to = 0;
235 }
236
237 kfree(vecs_copy);
238 return err;
239}
240
241static int
242concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
243{
244 struct mtd_concat *concat = CONCAT(mtd);
245 struct mtd_oob_ops devops = *ops;
246 int i, err, ret = 0;
247
248 ops->retlen = ops->oobretlen = 0;
249
250 for (i = 0; i < concat->num_subdev; i++) {
251 struct mtd_info *subdev = concat->subdev[i];
252
253 if (from >= subdev->size) {
254 from -= subdev->size;
255 continue;
256 }
257
258
259 if (from + devops.len > subdev->size)
260 devops.len = subdev->size - from;
261
262 err = subdev->read_oob(subdev, from, &devops);
263 ops->retlen += devops.retlen;
264 ops->oobretlen += devops.oobretlen;
265
266
267 if (unlikely(err)) {
268 if (err == -EBADMSG) {
269 mtd->ecc_stats.failed++;
270 ret = err;
271 } else if (err == -EUCLEAN) {
272 mtd->ecc_stats.corrected++;
273
274 if (!ret)
275 ret = err;
276 } else
277 return err;
278 }
279
280 if (devops.datbuf) {
281 devops.len = ops->len - ops->retlen;
282 if (!devops.len)
283 return ret;
284 devops.datbuf += devops.retlen;
285 }
286 if (devops.oobbuf) {
287 devops.ooblen = ops->ooblen - ops->oobretlen;
288 if (!devops.ooblen)
289 return ret;
290 devops.oobbuf += ops->oobretlen;
291 }
292
293 from = 0;
294 }
295 return -EINVAL;
296}
297
298static int
299concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
300{
301 struct mtd_concat *concat = CONCAT(mtd);
302 struct mtd_oob_ops devops = *ops;
303 int i, err;
304
305 if (!(mtd->flags & MTD_WRITEABLE))
306 return -EROFS;
307
308 ops->retlen = 0;
309
310 for (i = 0; i < concat->num_subdev; i++) {
311 struct mtd_info *subdev = concat->subdev[i];
312
313 if (to >= subdev->size) {
314 to -= subdev->size;
315 continue;
316 }
317
318
319 if (to + devops.len > subdev->size)
320 devops.len = subdev->size - to;
321
322 err = subdev->write_oob(subdev, to, &devops);
323 ops->retlen += devops.retlen;
324 if (err)
325 return err;
326
327 if (devops.datbuf) {
328 devops.len = ops->len - ops->retlen;
329 if (!devops.len)
330 return 0;
331 devops.datbuf += devops.retlen;
332 }
333 if (devops.oobbuf) {
334 devops.ooblen = ops->ooblen - ops->oobretlen;
335 if (!devops.ooblen)
336 return 0;
337 devops.oobbuf += devops.oobretlen;
338 }
339 to = 0;
340 }
341 return -EINVAL;
342}
343
344static void concat_erase_callback(struct erase_info *instr)
345{
346 wake_up((wait_queue_head_t *) instr->priv);
347}
348
349static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
350{
351 int err;
352 wait_queue_head_t waitq;
353 DECLARE_WAITQUEUE(wait, current);
354
355
356
357
358 init_waitqueue_head(&waitq);
359
360 erase->mtd = mtd;
361 erase->callback = concat_erase_callback;
362 erase->priv = (unsigned long) &waitq;
363
364
365
366
367
368 err = mtd->erase(mtd, erase);
369 if (!err) {
370 set_current_state(TASK_UNINTERRUPTIBLE);
371 add_wait_queue(&waitq, &wait);
372 if (erase->state != MTD_ERASE_DONE
373 && erase->state != MTD_ERASE_FAILED)
374 schedule();
375 remove_wait_queue(&waitq, &wait);
376 set_current_state(TASK_RUNNING);
377
378 err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0;
379 }
380 return err;
381}
382
383static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
384{
385 struct mtd_concat *concat = CONCAT(mtd);
386 struct mtd_info *subdev;
387 int i, err;
388 u_int32_t length, offset = 0;
389 struct erase_info *erase;
390
391 if (!(mtd->flags & MTD_WRITEABLE))
392 return -EROFS;
393
394 if (instr->addr > concat->mtd.size)
395 return -EINVAL;
396
397 if (instr->len + instr->addr > concat->mtd.size)
398 return -EINVAL;
399
400
401
402
403
404
405
406 if (!concat->mtd.numeraseregions) {
407
408 if (instr->addr & (concat->mtd.erasesize - 1))
409 return -EINVAL;
410 if (instr->len & (concat->mtd.erasesize - 1))
411 return -EINVAL;
412 } else {
413
414 struct mtd_erase_region_info *erase_regions =
415 concat->mtd.eraseregions;
416
417
418
419
420 for (i = 0; i < concat->mtd.numeraseregions &&
421 instr->addr >= erase_regions[i].offset; i++) ;
422 --i;
423
424
425
426
427
428
429 if (instr->addr & (erase_regions[i].erasesize - 1))
430 return -EINVAL;
431
432
433
434
435 for (; i < concat->mtd.numeraseregions &&
436 (instr->addr + instr->len) >= erase_regions[i].offset;
437 ++i) ;
438 --i;
439
440
441
442 if ((instr->addr + instr->len) & (erase_regions[i].erasesize -
443 1))
444 return -EINVAL;
445 }
446
447 instr->fail_addr = 0xffffffff;
448
449
450 erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
451
452 if (!erase)
453 return -ENOMEM;
454
455 *erase = *instr;
456 length = instr->len;
457
458
459
460
461
462 for (i = 0; i < concat->num_subdev; i++) {
463 subdev = concat->subdev[i];
464 if (subdev->size <= erase->addr) {
465 erase->addr -= subdev->size;
466 offset += subdev->size;
467 } else {
468 break;
469 }
470 }
471
472
473 BUG_ON(i >= concat->num_subdev);
474
475
476 err = 0;
477 for (; length > 0; i++) {
478
479 subdev = concat->subdev[i];
480
481
482 if (erase->addr + length > subdev->size)
483 erase->len = subdev->size - erase->addr;
484 else
485 erase->len = length;
486
487 if (!(subdev->flags & MTD_WRITEABLE)) {
488 err = -EROFS;
489 break;
490 }
491 length -= erase->len;
492 if ((err = concat_dev_erase(subdev, erase))) {
493
494
495 BUG_ON(err == -EINVAL);
496 if (erase->fail_addr != 0xffffffff)
497 instr->fail_addr = erase->fail_addr + offset;
498 break;
499 }
500
501
502
503
504
505
506
507
508 erase->addr = 0;
509 offset += subdev->size;
510 }
511 instr->state = erase->state;
512 kfree(erase);
513 if (err)
514 return err;
515
516 if (instr->callback)
517 instr->callback(instr);
518 return 0;
519}
520
521static int concat_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
522{
523 struct mtd_concat *concat = CONCAT(mtd);
524 int i, err = -EINVAL;
525
526 if ((len + ofs) > mtd->size)
527 return -EINVAL;
528
529 for (i = 0; i < concat->num_subdev; i++) {
530 struct mtd_info *subdev = concat->subdev[i];
531 size_t size;
532
533 if (ofs >= subdev->size) {
534 size = 0;
535 ofs -= subdev->size;
536 continue;
537 }
538 if (ofs + len > subdev->size)
539 size = subdev->size - ofs;
540 else
541 size = len;
542
543 err = subdev->lock(subdev, ofs, size);
544
545 if (err)
546 break;
547
548 len -= size;
549 if (len == 0)
550 break;
551
552 err = -EINVAL;
553 ofs = 0;
554 }
555
556 return err;
557}
558
559static int concat_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
560{
561 struct mtd_concat *concat = CONCAT(mtd);
562 int i, err = 0;
563
564 if ((len + ofs) > mtd->size)
565 return -EINVAL;
566
567 for (i = 0; i < concat->num_subdev; i++) {
568 struct mtd_info *subdev = concat->subdev[i];
569 size_t size;
570
571 if (ofs >= subdev->size) {
572 size = 0;
573 ofs -= subdev->size;
574 continue;
575 }
576 if (ofs + len > subdev->size)
577 size = subdev->size - ofs;
578 else
579 size = len;
580
581 err = subdev->unlock(subdev, ofs, size);
582
583 if (err)
584 break;
585
586 len -= size;
587 if (len == 0)
588 break;
589
590 err = -EINVAL;
591 ofs = 0;
592 }
593
594 return err;
595}
596
597static void concat_sync(struct mtd_info *mtd)
598{
599 struct mtd_concat *concat = CONCAT(mtd);
600 int i;
601
602 for (i = 0; i < concat->num_subdev; i++) {
603 struct mtd_info *subdev = concat->subdev[i];
604 subdev->sync(subdev);
605 }
606}
607
608static int concat_suspend(struct mtd_info *mtd)
609{
610 struct mtd_concat *concat = CONCAT(mtd);
611 int i, rc = 0;
612
613 for (i = 0; i < concat->num_subdev; i++) {
614 struct mtd_info *subdev = concat->subdev[i];
615 if ((rc = subdev->suspend(subdev)) < 0)
616 return rc;
617 }
618 return rc;
619}
620
621static void concat_resume(struct mtd_info *mtd)
622{
623 struct mtd_concat *concat = CONCAT(mtd);
624 int i;
625
626 for (i = 0; i < concat->num_subdev; i++) {
627 struct mtd_info *subdev = concat->subdev[i];
628 subdev->resume(subdev);
629 }
630}
631
632static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
633{
634 struct mtd_concat *concat = CONCAT(mtd);
635 int i, res = 0;
636
637 if (!concat->subdev[0]->block_isbad)
638 return res;
639
640 if (ofs > mtd->size)
641 return -EINVAL;
642
643 for (i = 0; i < concat->num_subdev; i++) {
644 struct mtd_info *subdev = concat->subdev[i];
645
646 if (ofs >= subdev->size) {
647 ofs -= subdev->size;
648 continue;
649 }
650
651 res = subdev->block_isbad(subdev, ofs);
652 break;
653 }
654
655 return res;
656}
657
658static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
659{
660 struct mtd_concat *concat = CONCAT(mtd);
661 int i, err = -EINVAL;
662
663 if (!concat->subdev[0]->block_markbad)
664 return 0;
665
666 if (ofs > mtd->size)
667 return -EINVAL;
668
669 for (i = 0; i < concat->num_subdev; i++) {
670 struct mtd_info *subdev = concat->subdev[i];
671
672 if (ofs >= subdev->size) {
673 ofs -= subdev->size;
674 continue;
675 }
676
677 err = subdev->block_markbad(subdev, ofs);
678 if (!err)
679 mtd->ecc_stats.badblocks++;
680 break;
681 }
682
683 return err;
684}
685
686
687
688
689
690
691
692struct mtd_info *mtd_concat_create(struct mtd_info *subdev[],
693 int num_devs,
694 char *name)
695{
696 int i;
697 size_t size;
698 struct mtd_concat *concat;
699 u_int32_t max_erasesize, curr_erasesize;
700 int num_erase_region;
701
702 printk(KERN_NOTICE "Concatenating MTD devices:\n");
703 for (i = 0; i < num_devs; i++)
704 printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
705 printk(KERN_NOTICE "into device \"%s\"\n", name);
706
707
708 size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
709 concat = kzalloc(size, GFP_KERNEL);
710 if (!concat) {
711 printk
712 ("memory allocation error while creating concatenated device \"%s\"\n",
713 name);
714 return NULL;
715 }
716 concat->subdev = (struct mtd_info **) (concat + 1);
717
718
719
720
721
722 concat->mtd.type = subdev[0]->type;
723 concat->mtd.flags = subdev[0]->flags;
724 concat->mtd.size = subdev[0]->size;
725 concat->mtd.erasesize = subdev[0]->erasesize;
726 concat->mtd.writesize = subdev[0]->writesize;
727 concat->mtd.subpage_sft = subdev[0]->subpage_sft;
728 concat->mtd.oobsize = subdev[0]->oobsize;
729 concat->mtd.oobavail = subdev[0]->oobavail;
730 if (subdev[0]->writev)
731 concat->mtd.writev = concat_writev;
732 if (subdev[0]->read_oob)
733 concat->mtd.read_oob = concat_read_oob;
734 if (subdev[0]->write_oob)
735 concat->mtd.write_oob = concat_write_oob;
736 if (subdev[0]->block_isbad)
737 concat->mtd.block_isbad = concat_block_isbad;
738 if (subdev[0]->block_markbad)
739 concat->mtd.block_markbad = concat_block_markbad;
740
741 concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
742
743 concat->subdev[0] = subdev[0];
744
745 for (i = 1; i < num_devs; i++) {
746 if (concat->mtd.type != subdev[i]->type) {
747 kfree(concat);
748 printk("Incompatible device type on \"%s\"\n",
749 subdev[i]->name);
750 return NULL;
751 }
752 if (concat->mtd.flags != subdev[i]->flags) {
753
754
755
756
757 if ((concat->mtd.flags ^ subdev[i]->
758 flags) & ~MTD_WRITEABLE) {
759 kfree(concat);
760 printk("Incompatible device flags on \"%s\"\n",
761 subdev[i]->name);
762 return NULL;
763 } else
764
765
766 concat->mtd.flags |=
767 subdev[i]->flags & MTD_WRITEABLE;
768 }
769 concat->mtd.size += subdev[i]->size;
770 concat->mtd.ecc_stats.badblocks +=
771 subdev[i]->ecc_stats.badblocks;
772 if (concat->mtd.writesize != subdev[i]->writesize ||
773 concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
774 concat->mtd.oobsize != subdev[i]->oobsize ||
775 !concat->mtd.read_oob != !subdev[i]->read_oob ||
776 !concat->mtd.write_oob != !subdev[i]->write_oob) {
777 kfree(concat);
778 printk("Incompatible OOB or ECC data on \"%s\"\n",
779 subdev[i]->name);
780 return NULL;
781 }
782 concat->subdev[i] = subdev[i];
783
784 }
785
786 concat->mtd.ecclayout = subdev[0]->ecclayout;
787
788 concat->num_subdev = num_devs;
789 concat->mtd.name = name;
790
791 concat->mtd.erase = concat_erase;
792 concat->mtd.read = concat_read;
793 concat->mtd.write = concat_write;
794 concat->mtd.sync = concat_sync;
795 concat->mtd.lock = concat_lock;
796 concat->mtd.unlock = concat_unlock;
797 concat->mtd.suspend = concat_suspend;
798 concat->mtd.resume = concat_resume;
799
800
801
802
803
804
805
806 max_erasesize = curr_erasesize = subdev[0]->erasesize;
807 num_erase_region = 1;
808 for (i = 0; i < num_devs; i++) {
809 if (subdev[i]->numeraseregions == 0) {
810
811 if (subdev[i]->erasesize != curr_erasesize) {
812
813 ++num_erase_region;
814 curr_erasesize = subdev[i]->erasesize;
815 if (curr_erasesize > max_erasesize)
816 max_erasesize = curr_erasesize;
817 }
818 } else {
819
820 int j;
821 for (j = 0; j < subdev[i]->numeraseregions; j++) {
822
823
824 if (subdev[i]->eraseregions[j].erasesize !=
825 curr_erasesize) {
826 ++num_erase_region;
827 curr_erasesize =
828 subdev[i]->eraseregions[j].
829 erasesize;
830 if (curr_erasesize > max_erasesize)
831 max_erasesize = curr_erasesize;
832 }
833 }
834 }
835 }
836
837 if (num_erase_region == 1) {
838
839
840
841
842 concat->mtd.erasesize = curr_erasesize;
843 concat->mtd.numeraseregions = 0;
844 } else {
845
846
847
848
849 struct mtd_erase_region_info *erase_region_p;
850 u_int32_t begin, position;
851
852 concat->mtd.erasesize = max_erasesize;
853 concat->mtd.numeraseregions = num_erase_region;
854 concat->mtd.eraseregions = erase_region_p =
855 kmalloc(num_erase_region *
856 sizeof (struct mtd_erase_region_info), GFP_KERNEL);
857 if (!erase_region_p) {
858 kfree(concat);
859 printk
860 ("memory allocation error while creating erase region list"
861 " for device \"%s\"\n", name);
862 return NULL;
863 }
864
865
866
867
868
869 curr_erasesize = subdev[0]->erasesize;
870 begin = position = 0;
871 for (i = 0; i < num_devs; i++) {
872 if (subdev[i]->numeraseregions == 0) {
873
874 if (subdev[i]->erasesize != curr_erasesize) {
875
876
877
878
879 erase_region_p->offset = begin;
880 erase_region_p->erasesize =
881 curr_erasesize;
882 erase_region_p->numblocks =
883 (position - begin) / curr_erasesize;
884 begin = position;
885
886 curr_erasesize = subdev[i]->erasesize;
887 ++erase_region_p;
888 }
889 position += subdev[i]->size;
890 } else {
891
892 int j;
893 for (j = 0; j < subdev[i]->numeraseregions; j++) {
894
895 if (subdev[i]->eraseregions[j].
896 erasesize != curr_erasesize) {
897 erase_region_p->offset = begin;
898 erase_region_p->erasesize =
899 curr_erasesize;
900 erase_region_p->numblocks =
901 (position -
902 begin) / curr_erasesize;
903 begin = position;
904
905 curr_erasesize =
906 subdev[i]->eraseregions[j].
907 erasesize;
908 ++erase_region_p;
909 }
910 position +=
911 subdev[i]->eraseregions[j].
912 numblocks * curr_erasesize;
913 }
914 }
915 }
916
917 erase_region_p->offset = begin;
918 erase_region_p->erasesize = curr_erasesize;
919 erase_region_p->numblocks = (position - begin) / curr_erasesize;
920 }
921
922 return &concat->mtd;
923}
924
925
926
927
928
929void mtd_concat_destroy(struct mtd_info *mtd)
930{
931 struct mtd_concat *concat = CONCAT(mtd);
932 if (concat->mtd.numeraseregions)
933 kfree(concat->mtd.eraseregions);
934 kfree(concat);
935}
936
937EXPORT_SYMBOL(mtd_concat_create);
938EXPORT_SYMBOL(mtd_concat_destroy);
939
940MODULE_LICENSE("GPL");
941MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>");
942MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");
943