1#![allow(dead_code)]
8
9#[cfg(feature = "alloc")]
10extern crate alloc;
11
12#[cfg(feature = "alloc")]
13use alloc::{collections::BTreeMap, string::String, vec, vec::Vec};
14
15const RAID_SUPER_MAGIC: u32 = 0xA92B_4EFC;
21
22const SUPERBLOCK_SIZE: usize = 256;
24
25const DEFAULT_CHUNK_SIZE: u64 = 512 * 1024;
27
28#[derive(Debug, Clone, Copy, PartialEq, Eq)]
34pub enum RaidLevel {
35 Raid0,
37 Raid1,
39 Raid5,
41}
42
43impl RaidLevel {
44 pub fn min_disks(&self) -> usize {
46 match self {
47 Self::Raid0 => 2,
48 Self::Raid1 => 2,
49 Self::Raid5 => 3,
50 }
51 }
52
53 pub fn data_disk_count(&self, total: usize) -> usize {
55 match self {
56 Self::Raid0 => total,
57 Self::Raid1 => 1,
58 Self::Raid5 => total.saturating_sub(1),
59 }
60 }
61}
62
63#[derive(Debug, Clone, Copy, PartialEq, Eq)]
69pub enum DiskState {
70 Active,
72 Degraded,
74 Failed,
76 Rebuilding,
78 Spare,
80}
81
82#[cfg(feature = "alloc")]
88#[derive(Debug, Clone, PartialEq)]
89pub struct RaidDisk {
90 pub id: u32,
92 pub path: String,
94 pub state: DiskState,
96 pub size_blocks: u64,
98 pub last_error_tick: u64,
100}
101
102#[cfg(feature = "alloc")]
103impl RaidDisk {
104 pub fn new(id: u32, path: &str, size_blocks: u64) -> Self {
106 Self {
107 id,
108 path: String::from(path),
109 state: DiskState::Active,
110 size_blocks,
111 last_error_tick: 0,
112 }
113 }
114
115 pub fn mark_failed(&mut self, tick: u64) {
117 self.state = DiskState::Failed;
118 self.last_error_tick = tick;
119 }
120
121 pub fn mark_rebuilding(&mut self) {
123 self.state = DiskState::Rebuilding;
124 }
125
126 pub fn mark_active(&mut self) {
128 self.state = DiskState::Active;
129 }
130}
131
132#[cfg(feature = "alloc")]
138#[derive(Debug, Clone, PartialEq)]
139pub struct Superblock {
140 pub magic: u32,
142 pub major_version: u32,
144 pub minor_version: u32,
146 pub set_uuid: [u8; 16],
148 pub set_name: String,
150 pub ctime: u64,
152 pub level: RaidLevel,
154 pub layout: u32,
156 pub size: u64,
158 pub raid_disks: u32,
160 pub dev_number: u32,
162 pub events: u64,
164 pub data_offset: u64,
166 pub data_size: u64,
168}
169
170#[cfg(feature = "alloc")]
171impl Superblock {
172 pub fn new(name: &str, level: RaidLevel, raid_disks: u32, size: u64) -> Self {
174 Self {
175 magic: RAID_SUPER_MAGIC,
176 major_version: 1,
177 minor_version: 2,
178 set_uuid: [0u8; 16],
179 set_name: String::from(name),
180 ctime: 0,
181 level,
182 layout: 0,
183 size,
184 raid_disks,
185 dev_number: 0,
186 events: 0,
187 data_offset: 2048, data_size: size,
189 }
190 }
191
192 pub fn serialize(&self) -> Vec<u8> {
194 let mut buf = Vec::with_capacity(SUPERBLOCK_SIZE);
195
196 buf.extend_from_slice(&self.magic.to_le_bytes());
197 buf.extend_from_slice(&self.major_version.to_le_bytes());
198 buf.extend_from_slice(&self.minor_version.to_le_bytes());
199 buf.extend_from_slice(&0u32.to_le_bytes());
201 buf.extend_from_slice(&self.set_uuid);
202
203 let name_bytes = self.set_name.as_bytes();
205 let name_len = core::cmp::min(name_bytes.len(), 32);
206 buf.extend_from_slice(&name_bytes[..name_len]);
207 buf.resize(buf.len() + (32 - name_len), 0);
208
209 buf.extend_from_slice(&self.ctime.to_le_bytes());
210
211 let level_val: u32 = match self.level {
212 RaidLevel::Raid0 => 0,
213 RaidLevel::Raid1 => 1,
214 RaidLevel::Raid5 => 5,
215 };
216 buf.extend_from_slice(&level_val.to_le_bytes());
217 buf.extend_from_slice(&self.layout.to_le_bytes());
218 buf.extend_from_slice(&self.size.to_le_bytes());
219 buf.extend_from_slice(&self.raid_disks.to_le_bytes());
220 buf.extend_from_slice(&self.dev_number.to_le_bytes());
221 buf.extend_from_slice(&self.events.to_le_bytes());
222 buf.extend_from_slice(&self.data_offset.to_le_bytes());
223 buf.extend_from_slice(&self.data_size.to_le_bytes());
224
225 buf.resize(SUPERBLOCK_SIZE, 0);
227 buf
228 }
229
230 pub fn deserialize(data: &[u8]) -> Option<Self> {
232 if data.len() < SUPERBLOCK_SIZE {
233 return None;
234 }
235
236 let magic = u32::from_le_bytes([data[0], data[1], data[2], data[3]]);
237 if magic != RAID_SUPER_MAGIC {
238 return None;
239 }
240
241 let major_version = u32::from_le_bytes([data[4], data[5], data[6], data[7]]);
242 let minor_version = u32::from_le_bytes([data[8], data[9], data[10], data[11]]);
243
244 let mut set_uuid = [0u8; 16];
245 set_uuid.copy_from_slice(&data[16..32]);
246
247 let name_end = data[32..64].iter().position(|&b| b == 0).unwrap_or(32);
249 let set_name = String::from_utf8_lossy(&data[32..32 + name_end]).into_owned();
250
251 let ctime = u64::from_le_bytes([
252 data[64], data[65], data[66], data[67], data[68], data[69], data[70], data[71],
253 ]);
254
255 let level_val = u32::from_le_bytes([data[72], data[73], data[74], data[75]]);
256 let level = match level_val {
257 0 => RaidLevel::Raid0,
258 1 => RaidLevel::Raid1,
259 5 => RaidLevel::Raid5,
260 _ => return None,
261 };
262
263 let layout = u32::from_le_bytes([data[76], data[77], data[78], data[79]]);
264 let size = u64::from_le_bytes([
265 data[80], data[81], data[82], data[83], data[84], data[85], data[86], data[87],
266 ]);
267 let raid_disks = u32::from_le_bytes([data[88], data[89], data[90], data[91]]);
268 let dev_number = u32::from_le_bytes([data[92], data[93], data[94], data[95]]);
269 let events = u64::from_le_bytes([
270 data[96], data[97], data[98], data[99], data[100], data[101], data[102], data[103],
271 ]);
272 let data_offset = u64::from_le_bytes([
273 data[104], data[105], data[106], data[107], data[108], data[109], data[110], data[111],
274 ]);
275 let data_size = u64::from_le_bytes([
276 data[112], data[113], data[114], data[115], data[116], data[117], data[118], data[119],
277 ]);
278
279 Some(Self {
280 magic,
281 major_version,
282 minor_version,
283 set_uuid,
284 set_name,
285 ctime,
286 level,
287 layout,
288 size,
289 raid_disks,
290 dev_number,
291 events,
292 data_offset,
293 data_size,
294 })
295 }
296}
297
298#[derive(Debug, Clone, Copy, PartialEq, Eq)]
304pub enum ArrayState {
305 Inactive,
307 Clean,
309 Active,
311 Degraded,
313 Rebuilding,
315}
316
317#[derive(Debug, Clone, Copy, PartialEq, Eq)]
323pub enum RaidError {
324 NotEnoughDisks,
326 ArrayDegraded,
328 ArrayFailed,
330 DiskNotFound,
332 InvalidAddress,
334 AlreadyExists,
336 NotFound,
338 IoError,
340 InvalidConfig,
342}
343
344#[derive(Debug, Clone, Copy)]
346pub struct StripeMap {
347 pub disk_index: usize,
349 pub disk_offset: u64,
351 pub parity_disk: Option<usize>,
353}
354
355#[cfg(feature = "alloc")]
357#[derive(Debug, PartialEq)]
358pub struct RaidArray {
359 pub name: String,
361 pub uuid: [u8; 16],
363 pub level: RaidLevel,
365 pub chunk_size: u64,
367 pub disks: Vec<RaidDisk>,
369 pub spares: Vec<RaidDisk>,
371 pub state: ArrayState,
373 pub superblock: Superblock,
375 pub rebuild_progress: u8,
377}
378
379#[cfg(feature = "alloc")]
380impl RaidArray {
381 pub fn new(name: &str, level: RaidLevel, disks: Vec<RaidDisk>) -> Result<Self, RaidError> {
383 if disks.len() < level.min_disks() {
384 return Err(RaidError::NotEnoughDisks);
385 }
386
387 let min_size = disks.iter().map(|d| d.size_blocks).min().unwrap_or(0);
389
390 let superblock = Superblock::new(name, level, disks.len() as u32, min_size);
391
392 Ok(Self {
393 name: String::from(name),
394 uuid: [0u8; 16],
395 level,
396 chunk_size: DEFAULT_CHUNK_SIZE / 512, disks,
398 spares: Vec::new(),
399 state: ArrayState::Active,
400 superblock,
401 rebuild_progress: 0,
402 })
403 }
404
405 pub fn stripe_map(&self, logical_block: u64) -> Result<StripeMap, RaidError> {
407 if self.disks.is_empty() {
408 return Err(RaidError::NotEnoughDisks);
409 }
410
411 let num_disks = self.disks.len() as u64;
412 let stripe = logical_block / self.chunk_size;
413 let offset_in_chunk = logical_block % self.chunk_size;
414
415 let disk_index = (stripe % num_disks) as usize;
416 let disk_offset = (stripe / num_disks) * self.chunk_size + offset_in_chunk;
417
418 Ok(StripeMap {
419 disk_index,
420 disk_offset,
421 parity_disk: None,
422 })
423 }
424
425 pub fn raid5_map(&self, logical_block: u64) -> Result<StripeMap, RaidError> {
427 if self.disks.len() < 3 {
428 return Err(RaidError::NotEnoughDisks);
429 }
430
431 let num_disks = self.disks.len() as u64;
432 let data_disks = num_disks - 1;
433 let stripe = logical_block / self.chunk_size;
434 let offset_in_chunk = logical_block % self.chunk_size;
435
436 let stripe_row = stripe / data_disks;
438 let data_index = stripe % data_disks;
440
441 let parity_disk = (num_disks - 1 - (stripe_row % num_disks)) as usize;
443
444 let mut physical_disk = data_index as usize;
446 if physical_disk >= parity_disk {
447 physical_disk += 1;
448 }
449
450 let disk_offset = stripe_row * self.chunk_size + offset_in_chunk;
451
452 Ok(StripeMap {
453 disk_index: physical_disk,
454 disk_offset,
455 parity_disk: Some(parity_disk),
456 })
457 }
458
459 pub fn read_stripe(&self, logical_block: u64) -> Result<StripeMap, RaidError> {
461 match self.level {
462 RaidLevel::Raid0 => self.stripe_map(logical_block),
463 RaidLevel::Raid1 => self.mirror_read(logical_block),
464 RaidLevel::Raid5 => self.raid5_map(logical_block),
465 }
466 }
467
468 pub fn write_stripe(&self, logical_block: u64) -> Result<Vec<StripeMap>, RaidError> {
470 match self.level {
471 RaidLevel::Raid0 => {
472 let map = self.stripe_map(logical_block)?;
473 Ok(vec![map])
474 }
475 RaidLevel::Raid1 => self.mirror_write(logical_block),
476 RaidLevel::Raid5 => {
477 let map = self.raid5_map(logical_block)?;
478 let mut writes = vec![map];
480 if let Some(parity_idx) = map.parity_disk {
481 writes.push(StripeMap {
482 disk_index: parity_idx,
483 disk_offset: map.disk_offset,
484 parity_disk: None,
485 });
486 }
487 Ok(writes)
488 }
489 }
490 }
491
492 fn mirror_read(&self, logical_block: u64) -> Result<StripeMap, RaidError> {
494 for (i, disk) in self.disks.iter().enumerate() {
496 if disk.state == DiskState::Active {
497 return Ok(StripeMap {
498 disk_index: i,
499 disk_offset: logical_block,
500 parity_disk: None,
501 });
502 }
503 }
504 Err(RaidError::ArrayFailed)
505 }
506
507 fn mirror_write(&self, logical_block: u64) -> Result<Vec<StripeMap>, RaidError> {
509 let mut writes = Vec::new();
510 for (i, disk) in self.disks.iter().enumerate() {
511 if disk.state == DiskState::Active || disk.state == DiskState::Rebuilding {
512 writes.push(StripeMap {
513 disk_index: i,
514 disk_offset: logical_block,
515 parity_disk: None,
516 });
517 }
518 }
519 if writes.is_empty() {
520 return Err(RaidError::ArrayFailed);
521 }
522 Ok(writes)
523 }
524
525 pub fn compute_parity(blocks: &[&[u8]]) -> Vec<u8> {
527 if blocks.is_empty() {
528 return Vec::new();
529 }
530 let len = blocks[0].len();
531 let mut parity = vec![0u8; len];
532 for block in blocks {
533 xor_blocks(&mut parity, &block[..core::cmp::min(block.len(), len)]);
534 }
535 parity
536 }
537
538 pub fn rebuild(&mut self, failed_disk_idx: usize) -> Result<(), RaidError> {
540 if failed_disk_idx >= self.disks.len() {
541 return Err(RaidError::DiskNotFound);
542 }
543
544 match self.level {
545 RaidLevel::Raid0 => {
546 Err(RaidError::ArrayFailed)
548 }
549 RaidLevel::Raid1 => {
550 let has_active = self.disks.iter().any(|d| d.state == DiskState::Active);
552
553 if !has_active {
554 return Err(RaidError::ArrayFailed);
555 }
556
557 self.disks[failed_disk_idx].mark_rebuilding();
558 self.state = ArrayState::Rebuilding;
559 self.rebuild_progress = 100;
561 self.disks[failed_disk_idx].mark_active();
562 self.update_state();
563 Ok(())
564 }
565 RaidLevel::Raid5 => {
566 let failed_count = self
568 .disks
569 .iter()
570 .filter(|d| d.state == DiskState::Failed)
571 .count();
572
573 if failed_count > 1 {
574 return Err(RaidError::ArrayFailed);
575 }
576
577 self.disks[failed_disk_idx].mark_rebuilding();
578 self.state = ArrayState::Rebuilding;
579 self.rebuild_progress = 100;
581 self.disks[failed_disk_idx].mark_active();
582 self.update_state();
583 Ok(())
584 }
585 }
586 }
587
588 pub fn check_health(&mut self) -> ArrayState {
590 self.update_state();
591 self.state
592 }
593
594 pub fn replace_disk(&mut self, failed_disk_idx: usize) -> Result<(), RaidError> {
596 if failed_disk_idx >= self.disks.len() {
597 return Err(RaidError::DiskNotFound);
598 }
599 if self.disks[failed_disk_idx].state != DiskState::Failed {
600 return Err(RaidError::InvalidConfig);
601 }
602
603 let spare_idx = self.spares.iter().position(|s| s.state == DiskState::Spare);
605
606 if let Some(idx) = spare_idx {
607 let mut spare = self.spares.remove(idx);
608 spare.state = DiskState::Rebuilding;
609 spare.id = self.disks[failed_disk_idx].id;
610 self.disks[failed_disk_idx] = spare;
611 self.rebuild(failed_disk_idx)
612 } else {
613 Err(RaidError::DiskNotFound)
614 }
615 }
616
617 pub fn add_spare(&mut self, disk: RaidDisk) {
619 let mut spare = disk;
620 spare.state = DiskState::Spare;
621 self.spares.push(spare);
622 }
623
624 pub fn active_disk_count(&self) -> usize {
626 self.disks
627 .iter()
628 .filter(|d| d.state == DiskState::Active)
629 .count()
630 }
631
632 pub fn failed_disk_count(&self) -> usize {
634 self.disks
635 .iter()
636 .filter(|d| d.state == DiskState::Failed)
637 .count()
638 }
639
640 pub fn capacity_blocks(&self) -> u64 {
642 let min_size = self.disks.iter().map(|d| d.size_blocks).min().unwrap_or(0);
643
644 match self.level {
645 RaidLevel::Raid0 => min_size * self.disks.len() as u64,
646 RaidLevel::Raid1 => min_size,
647 RaidLevel::Raid5 => min_size * (self.disks.len() as u64 - 1),
648 }
649 }
650
651 fn update_state(&mut self) {
653 let failed = self.failed_disk_count();
654 let rebuilding = self
655 .disks
656 .iter()
657 .filter(|d| d.state == DiskState::Rebuilding)
658 .count();
659
660 self.state = match self.level {
661 RaidLevel::Raid0 => {
662 if failed > 0 {
663 ArrayState::Inactive
664 } else {
665 ArrayState::Active
666 }
667 }
668 RaidLevel::Raid1 => {
669 if failed >= self.disks.len() {
670 ArrayState::Inactive
671 } else if rebuilding > 0 {
672 ArrayState::Rebuilding
673 } else if failed > 0 {
674 ArrayState::Degraded
675 } else {
676 ArrayState::Active
677 }
678 }
679 RaidLevel::Raid5 => {
680 if failed > 1 {
681 ArrayState::Inactive
682 } else if rebuilding > 0 {
683 ArrayState::Rebuilding
684 } else if failed == 1 {
685 ArrayState::Degraded
686 } else {
687 ArrayState::Active
688 }
689 }
690 };
691 }
692}
693
694pub fn xor_blocks(dest: &mut [u8], src: &[u8]) {
696 let len = core::cmp::min(dest.len(), src.len());
697 for i in 0..len {
698 dest[i] ^= src[i];
699 }
700}
701
702#[cfg(feature = "alloc")]
708pub struct RaidManager {
709 arrays: BTreeMap<String, RaidArray>,
711}
712
713#[cfg(feature = "alloc")]
714impl Default for RaidManager {
715 fn default() -> Self {
716 Self::new()
717 }
718}
719
720#[cfg(feature = "alloc")]
721impl RaidManager {
722 pub fn new() -> Self {
724 Self {
725 arrays: BTreeMap::new(),
726 }
727 }
728
729 pub fn create_array(
731 &mut self,
732 name: &str,
733 level: RaidLevel,
734 disks: Vec<RaidDisk>,
735 ) -> Result<(), RaidError> {
736 if self.arrays.contains_key(name) {
737 return Err(RaidError::AlreadyExists);
738 }
739 let array = RaidArray::new(name, level, disks)?;
740 self.arrays.insert(String::from(name), array);
741 Ok(())
742 }
743
744 pub fn destroy_array(&mut self, name: &str) -> Result<(), RaidError> {
746 self.arrays
747 .remove(name)
748 .map(|_| ())
749 .ok_or(RaidError::NotFound)
750 }
751
752 pub fn add_spare(&mut self, array_name: &str, disk: RaidDisk) -> Result<(), RaidError> {
754 let array = self.arrays.get_mut(array_name).ok_or(RaidError::NotFound)?;
755 array.add_spare(disk);
756 Ok(())
757 }
758
759 pub fn get_status(&self) -> Vec<(&str, ArrayState, usize, usize)> {
761 self.arrays
762 .iter()
763 .map(|(name, array)| {
764 (
765 name.as_str(),
766 array.state,
767 array.active_disk_count(),
768 array.disks.len(),
769 )
770 })
771 .collect()
772 }
773
774 pub fn get_array(&self, name: &str) -> Option<&RaidArray> {
776 self.arrays.get(name)
777 }
778
779 pub fn get_array_mut(&mut self, name: &str) -> Option<&mut RaidArray> {
781 self.arrays.get_mut(name)
782 }
783
784 pub fn array_count(&self) -> usize {
786 self.arrays.len()
787 }
788}
789
790#[cfg(test)]
795mod tests {
796 use super::*;
797
798 fn make_disks(count: usize, size: u64) -> Vec<RaidDisk> {
799 (0..count)
800 .map(|i| {
801 RaidDisk::new(
802 i as u32,
803 &alloc::format!("/dev/vd{}", (b'a' + i as u8) as char),
804 size,
805 )
806 })
807 .collect()
808 }
809
810 #[test]
811 fn test_raid_level_min_disks() {
812 assert_eq!(RaidLevel::Raid0.min_disks(), 2);
813 assert_eq!(RaidLevel::Raid1.min_disks(), 2);
814 assert_eq!(RaidLevel::Raid5.min_disks(), 3);
815 }
816
817 #[test]
818 fn test_raid_level_data_disks() {
819 assert_eq!(RaidLevel::Raid0.data_disk_count(4), 4);
820 assert_eq!(RaidLevel::Raid1.data_disk_count(2), 1);
821 assert_eq!(RaidLevel::Raid5.data_disk_count(4), 3);
822 }
823
824 #[test]
825 fn test_raid0_not_enough_disks() {
826 let disks = make_disks(1, 1000);
827 assert_eq!(
828 RaidArray::new("md0", RaidLevel::Raid0, disks),
829 Err(RaidError::NotEnoughDisks)
830 );
831 }
832
833 #[test]
834 fn test_raid0_stripe_map() {
835 let disks = make_disks(3, 10000);
836 let array = RaidArray::new("md0", RaidLevel::Raid0, disks).unwrap();
837
838 let map = array.stripe_map(0).unwrap();
839 assert_eq!(map.disk_index, 0);
840 assert_eq!(map.disk_offset, 0);
841
842 let map2 = array.stripe_map(array.chunk_size).unwrap();
844 assert_eq!(map2.disk_index, 1);
845 }
846
847 #[test]
848 fn test_raid1_mirror_read() {
849 let disks = make_disks(2, 10000);
850 let array = RaidArray::new("md1", RaidLevel::Raid1, disks).unwrap();
851 let map = array.read_stripe(500).unwrap();
852 assert_eq!(map.disk_offset, 500);
853 }
854
855 #[test]
856 fn test_raid1_mirror_write() {
857 let disks = make_disks(2, 10000);
858 let array = RaidArray::new("md1", RaidLevel::Raid1, disks).unwrap();
859 let writes = array.write_stripe(100).unwrap();
860 assert_eq!(writes.len(), 2);
862 }
863
864 #[test]
865 fn test_raid5_map() {
866 let disks = make_disks(4, 10000);
867 let array = RaidArray::new("md5", RaidLevel::Raid5, disks).unwrap();
868 let map = array.raid5_map(0).unwrap();
869 assert!(map.parity_disk.is_some());
870 assert_ne!(map.disk_index, map.parity_disk.unwrap());
872 }
873
874 #[test]
875 fn test_xor_blocks() {
876 let mut a = [0xAA, 0xBB, 0xCC, 0xDD];
877 let b = [0x55, 0x44, 0x33, 0x22];
878 xor_blocks(&mut a, &b);
879 assert_eq!(a, [0xFF, 0xFF, 0xFF, 0xFF]);
880 }
881
882 #[test]
883 fn test_compute_parity() {
884 let block1: &[u8] = &[0xFF, 0x00, 0xAA];
885 let block2: &[u8] = &[0x00, 0xFF, 0x55];
886 let parity = RaidArray::compute_parity(&[block1, block2]);
887 assert_eq!(parity, &[0xFF, 0xFF, 0xFF]);
888
889 let mut recovered = parity.clone();
891 xor_blocks(&mut recovered, block1);
892 assert_eq!(recovered, block2);
893 }
894
895 #[test]
896 fn test_superblock_serialize_deserialize() {
897 let sb = Superblock::new("md0", RaidLevel::Raid5, 4, 100000);
898 let bytes = sb.serialize();
899 assert_eq!(bytes.len(), SUPERBLOCK_SIZE);
900
901 let parsed = Superblock::deserialize(&bytes).unwrap();
902 assert_eq!(parsed.magic, RAID_SUPER_MAGIC);
903 assert_eq!(parsed.level, RaidLevel::Raid5);
904 assert_eq!(parsed.raid_disks, 4);
905 assert_eq!(parsed.set_name, "md0");
906 }
907
908 #[test]
909 fn test_superblock_bad_magic() {
910 let mut bytes = vec![0u8; SUPERBLOCK_SIZE];
911 bytes[0..4].copy_from_slice(&0xDEAD_BEEFu32.to_le_bytes());
912 assert!(Superblock::deserialize(&bytes).is_none());
913 }
914
915 #[test]
916 fn test_capacity_blocks() {
917 let disks = make_disks(4, 10000);
918 let r0 = RaidArray::new("r0", RaidLevel::Raid0, disks.clone()).unwrap();
919 assert_eq!(r0.capacity_blocks(), 40000);
920
921 let r1 = RaidArray::new("r1", RaidLevel::Raid1, make_disks(2, 10000)).unwrap();
922 assert_eq!(r1.capacity_blocks(), 10000);
923
924 let r5 = RaidArray::new("r5", RaidLevel::Raid5, disks).unwrap();
925 assert_eq!(r5.capacity_blocks(), 30000);
926 }
927
928 #[test]
929 fn test_raid_manager_create_destroy() {
930 let mut mgr = RaidManager::new();
931 let disks = make_disks(3, 10000);
932 mgr.create_array("md0", RaidLevel::Raid5, disks).unwrap();
933 assert_eq!(mgr.array_count(), 1);
934
935 let disks2 = make_disks(2, 10000);
937 assert_eq!(
938 mgr.create_array("md0", RaidLevel::Raid1, disks2),
939 Err(RaidError::AlreadyExists)
940 );
941
942 mgr.destroy_array("md0").unwrap();
943 assert_eq!(mgr.array_count(), 0);
944 }
945
946 #[test]
947 fn test_raid_manager_status() {
948 let mut mgr = RaidManager::new();
949 let disks = make_disks(2, 5000);
950 mgr.create_array("md0", RaidLevel::Raid1, disks).unwrap();
951
952 let status = mgr.get_status();
953 assert_eq!(status.len(), 1);
954 assert_eq!(status[0].0, "md0");
955 assert_eq!(status[0].1, ArrayState::Active);
956 assert_eq!(status[0].2, 2); assert_eq!(status[0].3, 2); }
959}