2 * Copyright (C) 2010-2011 Neil Brown
3 * Copyright (C) 2010-2011 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
8 #include <linux/slab.h>
9 #include <linux/module.h>
17 #include <linux/device-mapper.h>
19 #define DM_MSG_PREFIX "raid"
22 * The following flags are used by dm-raid.c to set up the array state.
23 * They must be cleared before md_run is called.
25 #define FirstUse 10 /* rdev flag */
29 * Two DM devices, one to hold metadata and one to hold the
30 * actual data/parity. The reason for this is to not confuse
31 * ti->len and give more flexibility in altering size and
34 * While it is possible for this device to be associated
35 * with a different physical device than the data_dev, it
36 * is intended for it to be the same.
37 * |--------- Physical Device ---------|
38 * |- meta_dev -|------ data_dev ------|
40 struct dm_dev *meta_dev;
41 struct dm_dev *data_dev;
46 * Flags for rs->print_flags field.
49 #define DMPF_NOSYNC 0x2
50 #define DMPF_REBUILD 0x4
51 #define DMPF_DAEMON_SLEEP 0x8
52 #define DMPF_MIN_RECOVERY_RATE 0x10
53 #define DMPF_MAX_RECOVERY_RATE 0x20
54 #define DMPF_MAX_WRITE_BEHIND 0x40
55 #define DMPF_STRIPE_CACHE 0x80
56 #define DMPF_REGION_SIZE 0x100
57 #define DMPF_RAID10_COPIES 0x200
58 #define DMPF_RAID10_FORMAT 0x400
63 uint32_t bitmap_loaded;
67 struct raid_type *raid_type;
68 struct dm_target_callbacks callbacks;
70 struct raid_dev dev[0];
73 /* Supported raid types and properties. */
74 static struct raid_type {
75 const char *name; /* RAID algorithm. */
76 const char *descr; /* Descriptor text for logging. */
77 const unsigned parity_devs; /* # of parity devices. */
78 const unsigned minimal_devs; /* minimal # of devices in set. */
79 const unsigned level; /* RAID level. */
80 const unsigned algorithm; /* RAID algorithm. */
82 {"raid1", "RAID1 (mirroring)", 0, 2, 1, 0 /* NONE */},
83 {"raid10", "RAID10 (striped mirrors)", 0, 2, 10, UINT_MAX /* Varies */},
84 {"raid4", "RAID4 (dedicated parity disk)", 1, 2, 5, ALGORITHM_PARITY_0},
85 {"raid5_la", "RAID5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
86 {"raid5_ra", "RAID5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
87 {"raid5_ls", "RAID5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},
88 {"raid5_rs", "RAID5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},
89 {"raid6_zr", "RAID6 (zero restart)", 2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART},
90 {"raid6_nr", "RAID6 (N restart)", 2, 4, 6, ALGORITHM_ROTATING_N_RESTART},
91 {"raid6_nc", "RAID6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE}
94 static char *raid10_md_layout_to_format(int layout)
97 * Bit 16 and 17 stand for "offset" and "use_far_sets"
98 * Refer to MD's raid10.c for details
100 if ((layout & 0x10000) && (layout & 0x20000))
103 if ((layout & 0xFF) > 1)
109 static unsigned raid10_md_layout_to_copies(int layout)
111 if ((layout & 0xFF) > 1)
112 return layout & 0xFF;
113 return (layout >> 8) & 0xFF;
116 static int raid10_format_to_md_layout(char *format, unsigned copies)
118 unsigned n = 1, f = 1;
120 if (!strcmp("near", format))
125 if (!strcmp("offset", format))
126 return 0x30000 | (f << 8) | n;
128 if (!strcmp("far", format))
129 return 0x20000 | (f << 8) | n;
134 static struct raid_type *get_raid_type(char *name)
138 for (i = 0; i < ARRAY_SIZE(raid_types); i++)
139 if (!strcmp(raid_types[i].name, name))
140 return &raid_types[i];
145 static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *raid_type, unsigned raid_devs)
150 if (raid_devs <= raid_type->parity_devs) {
151 ti->error = "Insufficient number of devices";
152 return ERR_PTR(-EINVAL);
155 rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL);
157 ti->error = "Cannot allocate raid context";
158 return ERR_PTR(-ENOMEM);
164 rs->raid_type = raid_type;
165 rs->md.raid_disks = raid_devs;
166 rs->md.level = raid_type->level;
167 rs->md.new_level = rs->md.level;
168 rs->md.layout = raid_type->algorithm;
169 rs->md.new_layout = rs->md.layout;
170 rs->md.delta_disks = 0;
171 rs->md.recovery_cp = 0;
173 for (i = 0; i < raid_devs; i++)
174 md_rdev_init(&rs->dev[i].rdev);
177 * Remaining items to be initialized by further RAID params:
180 * rs->md.chunk_sectors
181 * rs->md.new_chunk_sectors
188 static void context_free(struct raid_set *rs)
192 for (i = 0; i < rs->md.raid_disks; i++) {
193 if (rs->dev[i].meta_dev)
194 dm_put_device(rs->ti, rs->dev[i].meta_dev);
195 md_rdev_clear(&rs->dev[i].rdev);
196 if (rs->dev[i].data_dev)
197 dm_put_device(rs->ti, rs->dev[i].data_dev);
204 * For every device we have two words
205 * <meta_dev>: meta device name or '-' if missing
206 * <data_dev>: data device name or '-' if missing
208 * The following are permitted:
211 * <meta_dev> <data_dev>
213 * The following is not allowed:
216 * This code parses those words. If there is a failure,
217 * the caller must use context_free to unwind the operations.
219 static int dev_parms(struct raid_set *rs, char **argv)
223 int metadata_available = 0;
226 for (i = 0; i < rs->md.raid_disks; i++, argv += 2) {
227 rs->dev[i].rdev.raid_disk = i;
229 rs->dev[i].meta_dev = NULL;
230 rs->dev[i].data_dev = NULL;
233 * There are no offsets, since there is a separate device
234 * for data and metadata.
236 rs->dev[i].rdev.data_offset = 0;
237 rs->dev[i].rdev.mddev = &rs->md;
239 if (strcmp(argv[0], "-")) {
240 ret = dm_get_device(rs->ti, argv[0],
241 dm_table_get_mode(rs->ti->table),
242 &rs->dev[i].meta_dev);
243 rs->ti->error = "RAID metadata device lookup failure";
247 rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL);
248 if (!rs->dev[i].rdev.sb_page)
252 if (!strcmp(argv[1], "-")) {
253 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) &&
254 (!rs->dev[i].rdev.recovery_offset)) {
255 rs->ti->error = "Drive designated for rebuild not specified";
259 rs->ti->error = "No data device supplied with metadata device";
260 if (rs->dev[i].meta_dev)
266 ret = dm_get_device(rs->ti, argv[1],
267 dm_table_get_mode(rs->ti->table),
268 &rs->dev[i].data_dev);
270 rs->ti->error = "RAID device lookup failure";
274 if (rs->dev[i].meta_dev) {
275 metadata_available = 1;
276 rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev;
278 rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev;
279 list_add(&rs->dev[i].rdev.same_set, &rs->md.disks);
280 if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
284 if (metadata_available) {
286 rs->md.persistent = 1;
287 rs->md.major_version = 2;
288 } else if (rebuild && !rs->md.recovery_cp) {
290 * Without metadata, we will not be able to tell if the array
291 * is in-sync or not - we must assume it is not. Therefore,
292 * it is impossible to rebuild a drive.
294 * Even if there is metadata, the on-disk information may
295 * indicate that the array is not in-sync and it will then
298 * User could specify 'nosync' option if desperate.
300 DMERR("Unable to rebuild drive while array is not in-sync");
301 rs->ti->error = "RAID device lookup failure";
309 * validate_region_size
311 * @region_size: region size in sectors. If 0, pick a size (4MiB default).
313 * Set rs->md.bitmap_info.chunksize (which really refers to 'region size').
314 * Ensure that (ti->len/region_size < 2^21) - required by MD bitmap.
316 * Returns: 0 on success, -EINVAL on failure.
318 static int validate_region_size(struct raid_set *rs, unsigned long region_size)
320 unsigned long min_region_size = rs->ti->len / (1 << 21);
324 * Choose a reasonable default. All figures in sectors.
326 if (min_region_size > (1 << 13)) {
327 /* If not a power of 2, make it the next power of 2 */
328 region_size = roundup_pow_of_two(min_region_size);
329 DMINFO("Choosing default region size of %lu sectors",
332 DMINFO("Choosing default region size of 4MiB");
333 region_size = 1 << 13; /* sectors */
337 * Validate user-supplied value.
339 if (region_size > rs->ti->len) {
340 rs->ti->error = "Supplied region size is too large";
344 if (region_size < min_region_size) {
345 DMERR("Supplied region_size (%lu sectors) below minimum (%lu)",
346 region_size, min_region_size);
347 rs->ti->error = "Supplied region size is too small";
351 if (!is_power_of_2(region_size)) {
352 rs->ti->error = "Region size is not a power of 2";
356 if (region_size < rs->md.chunk_sectors) {
357 rs->ti->error = "Region size is smaller than the chunk size";
363 * Convert sectors to bytes.
365 rs->md.bitmap_info.chunksize = (region_size << 9);
371 * validate_raid_redundancy
374 * Determine if there are enough devices in the array that haven't
375 * failed (or are being rebuilt) to form a usable array.
377 * Returns: 0 on success, -EINVAL on failure.
379 static int validate_raid_redundancy(struct raid_set *rs)
381 unsigned i, rebuild_cnt = 0;
382 unsigned rebuilds_per_group = 0, copies, d;
383 unsigned group_size, last_group_start;
385 for (i = 0; i < rs->md.raid_disks; i++)
386 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
387 !rs->dev[i].rdev.sb_page)
390 switch (rs->raid_type->level) {
392 if (rebuild_cnt >= rs->md.raid_disks)
398 if (rebuild_cnt > rs->raid_type->parity_devs)
402 copies = raid10_md_layout_to_copies(rs->md.layout);
403 if (rebuild_cnt < copies)
407 * It is possible to have a higher rebuild count for RAID10,
408 * as long as the failed devices occur in different mirror
409 * groups (i.e. different stripes).
411 * When checking "near" format, make sure no adjacent devices
412 * have failed beyond what can be handled. In addition to the
413 * simple case where the number of devices is a multiple of the
414 * number of copies, we must also handle cases where the number
415 * of devices is not a multiple of the number of copies.
416 * E.g. dev1 dev2 dev3 dev4 dev5
420 if (!strcmp("near", raid10_md_layout_to_format(rs->md.layout))) {
421 for (i = 0; i < rs->md.raid_disks * copies; i++) {
423 rebuilds_per_group = 0;
424 d = i % rs->md.raid_disks;
425 if ((!rs->dev[d].rdev.sb_page ||
426 !test_bit(In_sync, &rs->dev[d].rdev.flags)) &&
427 (++rebuilds_per_group >= copies))
434 * When checking "far" and "offset" formats, we need to ensure
435 * that the device that holds its copy is not also dead or
436 * being rebuilt. (Note that "far" and "offset" formats only
437 * support two copies right now. These formats also only ever
438 * use the 'use_far_sets' variant.)
440 * This check is somewhat complicated by the need to account
441 * for arrays that are not a multiple of (far) copies. This
442 * results in the need to treat the last (potentially larger)
445 group_size = (rs->md.raid_disks / copies);
446 last_group_start = (rs->md.raid_disks / group_size) - 1;
447 last_group_start *= group_size;
448 for (i = 0; i < rs->md.raid_disks; i++) {
449 if (!(i % copies) && !(i > last_group_start))
450 rebuilds_per_group = 0;
451 if ((!rs->dev[i].rdev.sb_page ||
452 !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
453 (++rebuilds_per_group >= copies))
469 * Possible arguments are...
470 * <chunk_size> [optional_args]
472 * Argument definitions
473 * <chunk_size> The number of sectors per disk that
474 * will form the "stripe"
475 * [[no]sync] Force or prevent recovery of the
477 * [rebuild <idx>] Rebuild the drive indicated by the index
478 * [daemon_sleep <ms>] Time between bitmap daemon work to
480 * [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization
481 * [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization
482 * [write_mostly <idx>] Indicate a write mostly drive via index
483 * [max_write_behind <sectors>] See '-write-behind=' (man mdadm)
484 * [stripe_cache <sectors>] Stripe cache size for higher RAIDs
485 * [region_size <sectors>] Defines granularity of bitmap
487 * RAID10-only options:
488 * [raid10_copies <# copies>] Number of copies. (Default: 2)
489 * [raid10_format <near|far|offset>] Layout algorithm. (Default: near)
491 static int parse_raid_params(struct raid_set *rs, char **argv,
492 unsigned num_raid_params)
494 char *raid10_format = "near";
495 unsigned raid10_copies = 2;
497 unsigned long value, region_size = 0;
498 sector_t sectors_per_dev = rs->ti->len;
503 * First, parse the in-order required arguments
504 * "chunk_size" is the only argument of this type.
506 if ((kstrtoul(argv[0], 10, &value) < 0)) {
507 rs->ti->error = "Bad chunk size";
509 } else if (rs->raid_type->level == 1) {
511 DMERR("Ignoring chunk size parameter for RAID 1");
513 } else if (!is_power_of_2(value)) {
514 rs->ti->error = "Chunk size must be a power of 2";
516 } else if (value < 8) {
517 rs->ti->error = "Chunk size value is too small";
521 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value;
526 * We set each individual device as In_sync with a completed
527 * 'recovery_offset'. If there has been a device failure or
528 * replacement then one of the following cases applies:
530 * 1) User specifies 'rebuild'.
531 * - Device is reset when param is read.
532 * 2) A new device is supplied.
533 * - No matching superblock found, resets device.
534 * 3) Device failure was transient and returns on reload.
535 * - Failure noticed, resets device for bitmap replay.
536 * 4) Device hadn't completed recovery after previous failure.
537 * - Superblock is read and overrides recovery_offset.
539 * What is found in the superblocks of the devices is always
540 * authoritative, unless 'rebuild' or '[no]sync' was specified.
542 for (i = 0; i < rs->md.raid_disks; i++) {
543 set_bit(In_sync, &rs->dev[i].rdev.flags);
544 rs->dev[i].rdev.recovery_offset = MaxSector;
548 * Second, parse the unordered optional arguments
550 for (i = 0; i < num_raid_params; i++) {
551 if (!strcasecmp(argv[i], "nosync")) {
552 rs->md.recovery_cp = MaxSector;
553 rs->print_flags |= DMPF_NOSYNC;
556 if (!strcasecmp(argv[i], "sync")) {
557 rs->md.recovery_cp = 0;
558 rs->print_flags |= DMPF_SYNC;
562 /* The rest of the optional arguments come in key/value pairs */
563 if ((i + 1) >= num_raid_params) {
564 rs->ti->error = "Wrong number of raid parameters given";
570 /* Parameters that take a string value are checked here. */
571 if (!strcasecmp(key, "raid10_format")) {
572 if (rs->raid_type->level != 10) {
573 rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type";
576 if (strcmp("near", argv[i]) &&
577 strcmp("far", argv[i]) &&
578 strcmp("offset", argv[i])) {
579 rs->ti->error = "Invalid 'raid10_format' value given";
582 raid10_format = argv[i];
583 rs->print_flags |= DMPF_RAID10_FORMAT;
587 if (kstrtoul(argv[i], 10, &value) < 0) {
588 rs->ti->error = "Bad numerical argument given in raid params";
592 /* Parameters that take a numeric value are checked here */
593 if (!strcasecmp(key, "rebuild")) {
594 if (value >= rs->md.raid_disks) {
595 rs->ti->error = "Invalid rebuild index given";
598 clear_bit(In_sync, &rs->dev[value].rdev.flags);
599 rs->dev[value].rdev.recovery_offset = 0;
600 rs->print_flags |= DMPF_REBUILD;
601 } else if (!strcasecmp(key, "write_mostly")) {
602 if (rs->raid_type->level != 1) {
603 rs->ti->error = "write_mostly option is only valid for RAID1";
606 if (value >= rs->md.raid_disks) {
607 rs->ti->error = "Invalid write_mostly drive index given";
610 set_bit(WriteMostly, &rs->dev[value].rdev.flags);
611 } else if (!strcasecmp(key, "max_write_behind")) {
612 if (rs->raid_type->level != 1) {
613 rs->ti->error = "max_write_behind option is only valid for RAID1";
616 rs->print_flags |= DMPF_MAX_WRITE_BEHIND;
619 * In device-mapper, we specify things in sectors, but
620 * MD records this value in kB
623 if (value > COUNTER_MAX) {
624 rs->ti->error = "Max write-behind limit out of range";
627 rs->md.bitmap_info.max_write_behind = value;
628 } else if (!strcasecmp(key, "daemon_sleep")) {
629 rs->print_flags |= DMPF_DAEMON_SLEEP;
630 if (!value || (value > MAX_SCHEDULE_TIMEOUT)) {
631 rs->ti->error = "daemon sleep period out of range";
634 rs->md.bitmap_info.daemon_sleep = value;
635 } else if (!strcasecmp(key, "stripe_cache")) {
636 rs->print_flags |= DMPF_STRIPE_CACHE;
639 * In device-mapper, we specify things in sectors, but
640 * MD records this value in kB
644 if ((rs->raid_type->level != 5) &&
645 (rs->raid_type->level != 6)) {
646 rs->ti->error = "Inappropriate argument: stripe_cache";
649 if (raid5_set_cache_size(&rs->md, (int)value)) {
650 rs->ti->error = "Bad stripe_cache size";
653 } else if (!strcasecmp(key, "min_recovery_rate")) {
654 rs->print_flags |= DMPF_MIN_RECOVERY_RATE;
655 if (value > INT_MAX) {
656 rs->ti->error = "min_recovery_rate out of range";
659 rs->md.sync_speed_min = (int)value;
660 } else if (!strcasecmp(key, "max_recovery_rate")) {
661 rs->print_flags |= DMPF_MAX_RECOVERY_RATE;
662 if (value > INT_MAX) {
663 rs->ti->error = "max_recovery_rate out of range";
666 rs->md.sync_speed_max = (int)value;
667 } else if (!strcasecmp(key, "region_size")) {
668 rs->print_flags |= DMPF_REGION_SIZE;
670 } else if (!strcasecmp(key, "raid10_copies") &&
671 (rs->raid_type->level == 10)) {
672 if ((value < 2) || (value > 0xFF)) {
673 rs->ti->error = "Bad value for 'raid10_copies'";
676 rs->print_flags |= DMPF_RAID10_COPIES;
677 raid10_copies = value;
679 DMERR("Unable to parse RAID parameter: %s", key);
680 rs->ti->error = "Unable to parse RAID parameters";
685 if (validate_region_size(rs, region_size))
688 if (rs->md.chunk_sectors)
689 max_io_len = rs->md.chunk_sectors;
691 max_io_len = region_size;
693 if (dm_set_target_max_io_len(rs->ti, max_io_len))
696 if (rs->raid_type->level == 10) {
697 if (raid10_copies > rs->md.raid_disks) {
698 rs->ti->error = "Not enough devices to satisfy specification";
703 * If the format is not "near", we only support
704 * two copies at the moment.
706 if (strcmp("near", raid10_format) && (raid10_copies > 2)) {
707 rs->ti->error = "Too many copies for given RAID10 format.";
711 /* (Len * #mirrors) / #devices */
712 sectors_per_dev = rs->ti->len * raid10_copies;
713 sector_div(sectors_per_dev, rs->md.raid_disks);
715 rs->md.layout = raid10_format_to_md_layout(raid10_format,
717 rs->md.new_layout = rs->md.layout;
718 } else if ((rs->raid_type->level > 1) &&
719 sector_div(sectors_per_dev,
720 (rs->md.raid_disks - rs->raid_type->parity_devs))) {
721 rs->ti->error = "Target length not divisible by number of data devices";
724 rs->md.dev_sectors = sectors_per_dev;
726 /* Assume there are no metadata devices until the drives are parsed */
727 rs->md.persistent = 0;
733 static void do_table_event(struct work_struct *ws)
735 struct raid_set *rs = container_of(ws, struct raid_set, md.event_work);
737 dm_table_event(rs->ti->table);
740 static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
742 struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
744 if (rs->raid_type->level == 1)
745 return md_raid1_congested(&rs->md, bits);
747 if (rs->raid_type->level == 10)
748 return md_raid10_congested(&rs->md, bits);
750 return md_raid5_congested(&rs->md, bits);
754 * This structure is never routinely used by userspace, unlike md superblocks.
755 * Devices with this superblock should only ever be accessed via device-mapper.
757 #define DM_RAID_MAGIC 0x64526D44
758 struct dm_raid_superblock {
759 __le32 magic; /* "DmRd" */
760 __le32 features; /* Used to indicate possible future changes */
762 __le32 num_devices; /* Number of devices in this array. (Max 64) */
763 __le32 array_position; /* The position of this drive in the array */
765 __le64 events; /* Incremented by md when superblock updated */
766 __le64 failed_devices; /* Bit field of devices to indicate failures */
769 * This offset tracks the progress of the repair or replacement of
770 * an individual drive.
772 __le64 disk_recovery_offset;
775 * This offset tracks the progress of the initial array
776 * synchronisation/parity calculation.
778 __le64 array_resync_offset;
781 * RAID characteristics
785 __le32 stripe_sectors;
787 /* Remainder of a logical block is zero-filled when writing (see super_sync()). */
790 static int read_disk_sb(struct md_rdev *rdev, int size)
792 BUG_ON(!rdev->sb_page);
797 if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, 1)) {
798 DMERR("Failed to read superblock of device at position %d",
800 md_error(rdev->mddev, rdev);
809 static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
812 uint64_t failed_devices;
813 struct dm_raid_superblock *sb;
814 struct raid_set *rs = container_of(mddev, struct raid_set, md);
816 sb = page_address(rdev->sb_page);
817 failed_devices = le64_to_cpu(sb->failed_devices);
819 for (i = 0; i < mddev->raid_disks; i++)
820 if (!rs->dev[i].data_dev ||
821 test_bit(Faulty, &(rs->dev[i].rdev.flags)))
822 failed_devices |= (1ULL << i);
824 memset(sb + 1, 0, rdev->sb_size - sizeof(*sb));
826 sb->magic = cpu_to_le32(DM_RAID_MAGIC);
827 sb->features = cpu_to_le32(0); /* No features yet */
829 sb->num_devices = cpu_to_le32(mddev->raid_disks);
830 sb->array_position = cpu_to_le32(rdev->raid_disk);
832 sb->events = cpu_to_le64(mddev->events);
833 sb->failed_devices = cpu_to_le64(failed_devices);
835 sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset);
836 sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp);
838 sb->level = cpu_to_le32(mddev->level);
839 sb->layout = cpu_to_le32(mddev->layout);
840 sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors);
846 * This function creates a superblock if one is not found on the device
847 * and will decide which superblock to use if there's a choice.
849 * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise
851 static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
854 struct dm_raid_superblock *sb;
855 struct dm_raid_superblock *refsb;
856 uint64_t events_sb, events_refsb;
859 rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
860 if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) {
861 DMERR("superblock size of a logical block is no longer valid");
865 ret = read_disk_sb(rdev, rdev->sb_size);
869 sb = page_address(rdev->sb_page);
872 * Two cases that we want to write new superblocks and rebuild:
873 * 1) New device (no matching magic number)
874 * 2) Device specified for rebuild (!In_sync w/ offset == 0)
876 if ((sb->magic != cpu_to_le32(DM_RAID_MAGIC)) ||
877 (!test_bit(In_sync, &rdev->flags) && !rdev->recovery_offset)) {
878 super_sync(rdev->mddev, rdev);
880 set_bit(FirstUse, &rdev->flags);
882 /* Force writing of superblocks to disk */
883 set_bit(MD_CHANGE_DEVS, &rdev->mddev->flags);
885 /* Any superblock is better than none, choose that if given */
886 return refdev ? 0 : 1;
892 events_sb = le64_to_cpu(sb->events);
894 refsb = page_address(refdev->sb_page);
895 events_refsb = le64_to_cpu(refsb->events);
897 return (events_sb > events_refsb) ? 1 : 0;
900 static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev)
903 struct raid_set *rs = container_of(mddev, struct raid_set, md);
905 uint64_t failed_devices;
906 struct dm_raid_superblock *sb;
907 uint32_t new_devs = 0;
908 uint32_t rebuilds = 0;
910 struct dm_raid_superblock *sb2;
912 sb = page_address(rdev->sb_page);
913 events_sb = le64_to_cpu(sb->events);
914 failed_devices = le64_to_cpu(sb->failed_devices);
917 * Initialise to 1 if this is a new superblock.
919 mddev->events = events_sb ? : 1;
922 * Reshaping is not currently allowed
924 if (le32_to_cpu(sb->level) != mddev->level) {
925 DMERR("Reshaping arrays not yet supported. (RAID level change)");
928 if (le32_to_cpu(sb->layout) != mddev->layout) {
929 DMERR("Reshaping arrays not yet supported. (RAID layout change)");
930 DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
931 DMERR(" Old layout: %s w/ %d copies",
932 raid10_md_layout_to_format(le32_to_cpu(sb->layout)),
933 raid10_md_layout_to_copies(le32_to_cpu(sb->layout)));
934 DMERR(" New layout: %s w/ %d copies",
935 raid10_md_layout_to_format(mddev->layout),
936 raid10_md_layout_to_copies(mddev->layout));
939 if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) {
940 DMERR("Reshaping arrays not yet supported. (stripe sectors change)");
944 /* We can only change the number of devices in RAID1 right now */
945 if ((rs->raid_type->level != 1) &&
946 (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) {
947 DMERR("Reshaping arrays not yet supported. (device count change)");
951 if (!(rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC)))
952 mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset);
955 * During load, we set FirstUse if a new superblock was written.
956 * There are two reasons we might not have a superblock:
957 * 1) The array is brand new - in which case, all of the
958 * devices must have their In_sync bit set. Also,
959 * recovery_cp must be 0, unless forced.
960 * 2) This is a new device being added to an old array
961 * and the new device needs to be rebuilt - in which
962 * case the In_sync bit will /not/ be set and
963 * recovery_cp must be MaxSector.
965 rdev_for_each(r, mddev) {
966 if (!test_bit(In_sync, &r->flags)) {
967 DMINFO("Device %d specified for rebuild: "
968 "Clearing superblock", r->raid_disk);
970 } else if (test_bit(FirstUse, &r->flags))
975 if (new_devs == mddev->raid_disks) {
976 DMINFO("Superblocks created for new array");
977 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
978 } else if (new_devs) {
979 DMERR("New device injected "
980 "into existing array without 'rebuild' "
981 "parameter specified");
984 } else if (new_devs) {
985 DMERR("'rebuild' devices cannot be "
986 "injected into an array with other first-time devices");
988 } else if (mddev->recovery_cp != MaxSector) {
989 DMERR("'rebuild' specified while array is not in-sync");
994 * Now we set the Faulty bit for those devices that are
995 * recorded in the superblock as failed.
997 rdev_for_each(r, mddev) {
1000 sb2 = page_address(r->sb_page);
1001 sb2->failed_devices = 0;
1004 * Check for any device re-ordering.
1006 if (!test_bit(FirstUse, &r->flags) && (r->raid_disk >= 0)) {
1007 role = le32_to_cpu(sb2->array_position);
1008 if (role != r->raid_disk) {
1009 if (rs->raid_type->level != 1) {
1010 rs->ti->error = "Cannot change device "
1011 "positions in RAID array";
1014 DMINFO("RAID1 device #%d now at position #%d",
1015 role, r->raid_disk);
1019 * Partial recovery is performed on
1020 * returning failed devices.
1022 if (failed_devices & (1 << role))
1023 set_bit(Faulty, &r->flags);
1030 static int super_validate(struct mddev *mddev, struct md_rdev *rdev)
1032 struct dm_raid_superblock *sb = page_address(rdev->sb_page);
1035 * If mddev->events is not set, we know we have not yet initialized
1038 if (!mddev->events && super_init_validation(mddev, rdev))
1041 mddev->bitmap_info.offset = 4096 >> 9; /* Enable bitmap creation */
1042 rdev->mddev->bitmap_info.default_offset = 4096 >> 9;
1043 if (!test_bit(FirstUse, &rdev->flags)) {
1044 rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset);
1045 if (rdev->recovery_offset != MaxSector)
1046 clear_bit(In_sync, &rdev->flags);
1050 * If a device comes back, set it as not In_sync and no longer faulty.
1052 if (test_bit(Faulty, &rdev->flags)) {
1053 clear_bit(Faulty, &rdev->flags);
1054 clear_bit(In_sync, &rdev->flags);
1055 rdev->saved_raid_disk = rdev->raid_disk;
1056 rdev->recovery_offset = 0;
1059 clear_bit(FirstUse, &rdev->flags);
1065 * Analyse superblocks and select the freshest.
1067 static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
1070 struct raid_dev *dev;
1071 struct md_rdev *rdev, *tmp, *freshest;
1072 struct mddev *mddev = &rs->md;
1075 rdev_for_each_safe(rdev, tmp, mddev) {
1077 * Skipping super_load due to DMPF_SYNC will cause
1078 * the array to undergo initialization again as
1079 * though it were new. This is the intended effect
1080 * of the "sync" directive.
1082 * When reshaping capability is added, we must ensure
1083 * that the "sync" directive is disallowed during the
1086 if (rs->print_flags & DMPF_SYNC)
1089 if (!rdev->meta_bdev)
1092 ret = super_load(rdev, freshest);
1101 dev = container_of(rdev, struct raid_dev, rdev);
1103 dm_put_device(ti, dev->meta_dev);
1105 dev->meta_dev = NULL;
1106 rdev->meta_bdev = NULL;
1109 put_page(rdev->sb_page);
1111 rdev->sb_page = NULL;
1113 rdev->sb_loaded = 0;
1116 * We might be able to salvage the data device
1117 * even though the meta device has failed. For
1118 * now, we behave as though '- -' had been
1119 * set for this device in the table.
1122 dm_put_device(ti, dev->data_dev);
1124 dev->data_dev = NULL;
1127 list_del(&rdev->same_set);
1134 if (validate_raid_redundancy(rs)) {
1135 rs->ti->error = "Insufficient redundancy to activate array";
1140 * Validation of the freshest device provides the source of
1141 * validation for the remaining devices.
1143 ti->error = "Unable to assemble array: Invalid superblocks";
1144 if (super_validate(mddev, freshest))
1147 rdev_for_each(rdev, mddev)
1148 if ((rdev != freshest) && super_validate(mddev, rdev))
1155 * Construct a RAID4/5/6 mapping:
1157 * <raid_type> <#raid_params> <raid_params> \
1158 * <#raid_devs> { <meta_dev1> <dev1> .. <meta_devN> <devN> }
1160 * <raid_params> varies by <raid_type>. See 'parse_raid_params' for
1161 * details on possible <raid_params>.
1163 static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
1166 struct raid_type *rt;
1167 unsigned long num_raid_params, num_raid_devs;
1168 struct raid_set *rs = NULL;
1170 /* Must have at least <raid_type> <#raid_params> */
1172 ti->error = "Too few arguments";
1177 rt = get_raid_type(argv[0]);
1179 ti->error = "Unrecognised raid_type";
1185 /* number of RAID parameters */
1186 if (kstrtoul(argv[0], 10, &num_raid_params) < 0) {
1187 ti->error = "Cannot understand number of RAID parameters";
1193 /* Skip over RAID params for now and find out # of devices */
1194 if (num_raid_params + 1 > argc) {
1195 ti->error = "Arguments do not agree with counts given";
1199 if ((kstrtoul(argv[num_raid_params], 10, &num_raid_devs) < 0) ||
1200 (num_raid_devs >= INT_MAX)) {
1201 ti->error = "Cannot understand number of raid devices";
1205 rs = context_alloc(ti, rt, (unsigned)num_raid_devs);
1209 ret = parse_raid_params(rs, argv, (unsigned)num_raid_params);
1215 argc -= num_raid_params + 1; /* +1: we already have num_raid_devs */
1216 argv += num_raid_params + 1;
1218 if (argc != (num_raid_devs * 2)) {
1219 ti->error = "Supplied RAID devices does not match the count given";
1223 ret = dev_parms(rs, argv);
1227 rs->md.sync_super = super_sync;
1228 ret = analyse_superblocks(ti, rs);
1232 INIT_WORK(&rs->md.event_work, do_table_event);
1234 ti->num_flush_bios = 1;
1236 mutex_lock(&rs->md.reconfig_mutex);
1237 ret = md_run(&rs->md);
1238 rs->md.in_sync = 0; /* Assume already marked dirty */
1239 mutex_unlock(&rs->md.reconfig_mutex);
1242 ti->error = "Fail to run raid array";
1246 if (ti->len != rs->md.array_sectors) {
1247 ti->error = "Array size does not match requested target length";
1251 rs->callbacks.congested_fn = raid_is_congested;
1252 dm_table_add_target_callbacks(ti->table, &rs->callbacks);
1254 mddev_suspend(&rs->md);
1265 static void raid_dtr(struct dm_target *ti)
1267 struct raid_set *rs = ti->private;
1269 list_del_init(&rs->callbacks.list);
1274 static int raid_map(struct dm_target *ti, struct bio *bio)
1276 struct raid_set *rs = ti->private;
1277 struct mddev *mddev = &rs->md;
1279 mddev->pers->make_request(mddev, bio);
1281 return DM_MAPIO_SUBMITTED;
1284 static const char *decipher_sync_action(struct mddev *mddev)
1286 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
1289 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
1290 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
1291 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
1294 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
1295 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
1297 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
1302 if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
1309 static void raid_status(struct dm_target *ti, status_type_t type,
1310 unsigned status_flags, char *result, unsigned maxlen)
1312 struct raid_set *rs = ti->private;
1313 unsigned raid_param_cnt = 1; /* at least 1 for chunksize */
1315 int i, array_in_sync = 0;
1319 case STATUSTYPE_INFO:
1320 DMEMIT("%s %d ", rs->raid_type->name, rs->md.raid_disks);
1322 if (test_bit(MD_RECOVERY_RUNNING, &rs->md.recovery))
1323 sync = rs->md.curr_resync_completed;
1325 sync = rs->md.recovery_cp;
1327 if (sync >= rs->md.resync_max_sectors) {
1332 sync = rs->md.resync_max_sectors;
1333 } else if (test_bit(MD_RECOVERY_REQUESTED, &rs->md.recovery)) {
1335 * If "check" or "repair" is occurring, the array has
1336 * undergone and initial sync and the health characters
1337 * should not be 'a' anymore.
1342 * The array may be doing an initial sync, or it may
1343 * be rebuilding individual components. If all the
1344 * devices are In_sync, then it is the array that is
1345 * being initialized.
1347 for (i = 0; i < rs->md.raid_disks; i++)
1348 if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
1353 * Status characters:
1354 * 'D' = Dead/Failed device
1355 * 'a' = Alive but not in-sync
1356 * 'A' = Alive and in-sync
1358 for (i = 0; i < rs->md.raid_disks; i++) {
1359 if (test_bit(Faulty, &rs->dev[i].rdev.flags))
1361 else if (!array_in_sync ||
1362 !test_bit(In_sync, &rs->dev[i].rdev.flags))
1370 * The in-sync ratio shows the progress of:
1371 * - Initializing the array
1372 * - Rebuilding a subset of devices of the array
1373 * The user can distinguish between the two by referring
1374 * to the status characters.
1376 DMEMIT(" %llu/%llu",
1377 (unsigned long long) sync,
1378 (unsigned long long) rs->md.resync_max_sectors);
1382 * See Documentation/device-mapper/dm-raid.c for
1383 * information on each of these states.
1385 DMEMIT(" %s", decipher_sync_action(&rs->md));
1388 * resync_mismatches/mismatch_cnt
1389 * This field shows the number of discrepancies found when
1390 * performing a "check" of the array.
1393 (strcmp(rs->md.last_sync_action, "check")) ? 0 :
1394 (unsigned long long)
1395 atomic64_read(&rs->md.resync_mismatches));
1397 case STATUSTYPE_TABLE:
1398 /* The string you would use to construct this array */
1399 for (i = 0; i < rs->md.raid_disks; i++) {
1400 if ((rs->print_flags & DMPF_REBUILD) &&
1401 rs->dev[i].data_dev &&
1402 !test_bit(In_sync, &rs->dev[i].rdev.flags))
1403 raid_param_cnt += 2; /* for rebuilds */
1404 if (rs->dev[i].data_dev &&
1405 test_bit(WriteMostly, &rs->dev[i].rdev.flags))
1406 raid_param_cnt += 2;
1409 raid_param_cnt += (hweight32(rs->print_flags & ~DMPF_REBUILD) * 2);
1410 if (rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC))
1413 DMEMIT("%s %u %u", rs->raid_type->name,
1414 raid_param_cnt, rs->md.chunk_sectors);
1416 if ((rs->print_flags & DMPF_SYNC) &&
1417 (rs->md.recovery_cp == MaxSector))
1419 if (rs->print_flags & DMPF_NOSYNC)
1422 for (i = 0; i < rs->md.raid_disks; i++)
1423 if ((rs->print_flags & DMPF_REBUILD) &&
1424 rs->dev[i].data_dev &&
1425 !test_bit(In_sync, &rs->dev[i].rdev.flags))
1426 DMEMIT(" rebuild %u", i);
1428 if (rs->print_flags & DMPF_DAEMON_SLEEP)
1429 DMEMIT(" daemon_sleep %lu",
1430 rs->md.bitmap_info.daemon_sleep);
1432 if (rs->print_flags & DMPF_MIN_RECOVERY_RATE)
1433 DMEMIT(" min_recovery_rate %d", rs->md.sync_speed_min);
1435 if (rs->print_flags & DMPF_MAX_RECOVERY_RATE)
1436 DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max);
1438 for (i = 0; i < rs->md.raid_disks; i++)
1439 if (rs->dev[i].data_dev &&
1440 test_bit(WriteMostly, &rs->dev[i].rdev.flags))
1441 DMEMIT(" write_mostly %u", i);
1443 if (rs->print_flags & DMPF_MAX_WRITE_BEHIND)
1444 DMEMIT(" max_write_behind %lu",
1445 rs->md.bitmap_info.max_write_behind);
1447 if (rs->print_flags & DMPF_STRIPE_CACHE) {
1448 struct r5conf *conf = rs->md.private;
1450 /* convert from kiB to sectors */
1451 DMEMIT(" stripe_cache %d",
1452 conf ? conf->max_nr_stripes * 2 : 0);
1455 if (rs->print_flags & DMPF_REGION_SIZE)
1456 DMEMIT(" region_size %lu",
1457 rs->md.bitmap_info.chunksize >> 9);
1459 if (rs->print_flags & DMPF_RAID10_COPIES)
1460 DMEMIT(" raid10_copies %u",
1461 raid10_md_layout_to_copies(rs->md.layout));
1463 if (rs->print_flags & DMPF_RAID10_FORMAT)
1464 DMEMIT(" raid10_format %s",
1465 raid10_md_layout_to_format(rs->md.layout));
1467 DMEMIT(" %d", rs->md.raid_disks);
1468 for (i = 0; i < rs->md.raid_disks; i++) {
1469 if (rs->dev[i].meta_dev)
1470 DMEMIT(" %s", rs->dev[i].meta_dev->name);
1474 if (rs->dev[i].data_dev)
1475 DMEMIT(" %s", rs->dev[i].data_dev->name);
1482 static int raid_message(struct dm_target *ti, unsigned argc, char **argv)
1484 struct raid_set *rs = ti->private;
1485 struct mddev *mddev = &rs->md;
1487 if (!strcasecmp(argv[0], "reshape")) {
1488 DMERR("Reshape not supported.");
1492 if (!mddev->pers || !mddev->pers->sync_request)
1495 if (!strcasecmp(argv[0], "frozen"))
1496 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
1498 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
1500 if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) {
1501 if (mddev->sync_thread) {
1502 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1503 md_reap_sync_thread(mddev);
1505 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
1506 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
1508 else if (!strcasecmp(argv[0], "resync"))
1509 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1510 else if (!strcasecmp(argv[0], "recover")) {
1511 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
1512 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1514 if (!strcasecmp(argv[0], "check"))
1515 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
1516 else if (!!strcasecmp(argv[0], "repair"))
1518 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
1519 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
1521 if (mddev->ro == 2) {
1522 /* A write to sync_action is enough to justify
1523 * canceling read-auto mode
1526 if (!mddev->suspended)
1527 md_wakeup_thread(mddev->sync_thread);
1529 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1530 if (!mddev->suspended)
1531 md_wakeup_thread(mddev->thread);
1536 static int raid_iterate_devices(struct dm_target *ti,
1537 iterate_devices_callout_fn fn, void *data)
1539 struct raid_set *rs = ti->private;
1543 for (i = 0; !ret && i < rs->md.raid_disks; i++)
1544 if (rs->dev[i].data_dev)
1546 rs->dev[i].data_dev,
1547 0, /* No offset on data devs */
1554 static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
1556 struct raid_set *rs = ti->private;
1557 unsigned chunk_size = rs->md.chunk_sectors << 9;
1558 struct r5conf *conf = rs->md.private;
1560 blk_limits_io_min(limits, chunk_size);
1561 blk_limits_io_opt(limits, chunk_size * (conf->raid_disks - conf->max_degraded));
1564 static void raid_presuspend(struct dm_target *ti)
1566 struct raid_set *rs = ti->private;
1568 md_stop_writes(&rs->md);
1571 static void raid_postsuspend(struct dm_target *ti)
1573 struct raid_set *rs = ti->private;
1575 mddev_suspend(&rs->md);
1578 static void attempt_restore_of_faulty_devices(struct raid_set *rs)
1581 uint64_t failed_devices, cleared_failed_devices = 0;
1582 unsigned long flags;
1583 struct dm_raid_superblock *sb;
1586 for (i = 0; i < rs->md.raid_disks; i++) {
1587 r = &rs->dev[i].rdev;
1588 if (test_bit(Faulty, &r->flags) && r->sb_page &&
1589 sync_page_io(r, 0, r->sb_size, r->sb_page, READ, 1)) {
1590 DMINFO("Faulty %s device #%d has readable super block."
1591 " Attempting to revive it.",
1592 rs->raid_type->name, i);
1595 * Faulty bit may be set, but sometimes the array can
1596 * be suspended before the personalities can respond
1597 * by removing the device from the array (i.e. calling
1598 * 'hot_remove_disk'). If they haven't yet removed
1599 * the failed device, its 'raid_disk' number will be
1600 * '>= 0' - meaning we must call this function
1603 if ((r->raid_disk >= 0) &&
1604 (r->mddev->pers->hot_remove_disk(r->mddev, r) != 0))
1605 /* Failed to revive this device, try next */
1609 r->saved_raid_disk = i;
1611 clear_bit(Faulty, &r->flags);
1612 clear_bit(WriteErrorSeen, &r->flags);
1613 clear_bit(In_sync, &r->flags);
1614 if (r->mddev->pers->hot_add_disk(r->mddev, r)) {
1616 r->saved_raid_disk = -1;
1619 r->recovery_offset = 0;
1620 cleared_failed_devices |= 1 << i;
1624 if (cleared_failed_devices) {
1625 rdev_for_each(r, &rs->md) {
1626 sb = page_address(r->sb_page);
1627 failed_devices = le64_to_cpu(sb->failed_devices);
1628 failed_devices &= ~cleared_failed_devices;
1629 sb->failed_devices = cpu_to_le64(failed_devices);
1634 static void raid_resume(struct dm_target *ti)
1636 struct raid_set *rs = ti->private;
1638 set_bit(MD_CHANGE_DEVS, &rs->md.flags);
1639 if (!rs->bitmap_loaded) {
1640 bitmap_load(&rs->md);
1641 rs->bitmap_loaded = 1;
1644 * A secondary resume while the device is active.
1645 * Take this opportunity to check whether any failed
1646 * devices are reachable again.
1648 attempt_restore_of_faulty_devices(rs);
1651 clear_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
1652 mddev_resume(&rs->md);
1655 static struct target_type raid_target = {
1657 .version = {1, 5, 2},
1658 .module = THIS_MODULE,
1662 .status = raid_status,
1663 .message = raid_message,
1664 .iterate_devices = raid_iterate_devices,
1665 .io_hints = raid_io_hints,
1666 .presuspend = raid_presuspend,
1667 .postsuspend = raid_postsuspend,
1668 .resume = raid_resume,
1671 static int __init dm_raid_init(void)
1673 DMINFO("Loading target version %u.%u.%u",
1674 raid_target.version[0],
1675 raid_target.version[1],
1676 raid_target.version[2]);
1677 return dm_register_target(&raid_target);
1680 static void __exit dm_raid_exit(void)
1682 dm_unregister_target(&raid_target);
1685 module_init(dm_raid_init);
1686 module_exit(dm_raid_exit);
1688 MODULE_DESCRIPTION(DM_NAME " raid4/5/6 target");
1689 MODULE_ALIAS("dm-raid1");
1690 MODULE_ALIAS("dm-raid10");
1691 MODULE_ALIAS("dm-raid4");
1692 MODULE_ALIAS("dm-raid5");
1693 MODULE_ALIAS("dm-raid6");
1694 MODULE_AUTHOR("Neil Brown <dm-devel@redhat.com>");
1695 MODULE_LICENSE("GPL");