⚠️ VeridianOS Kernel Documentation - This is low-level kernel code. All functions are unsafe unless explicitly marked otherwise. no_std

veridian_kernel/drivers/raid/
manager.rs

1//! Software RAID Implementation
2//!
3//! Supports RAID levels 0 (striping), 1 (mirroring), and 5 (striping with
4//! distributed parity). Includes stripe mapping, XOR parity computation,
5//! array health monitoring, hot-spare replacement, and rebuild.
6
7#![allow(dead_code)]
8
9#[cfg(feature = "alloc")]
10extern crate alloc;
11
12#[cfg(feature = "alloc")]
13use alloc::{collections::BTreeMap, string::String, vec, vec::Vec};
14
15// ---------------------------------------------------------------------------
16// RAID Superblock
17// ---------------------------------------------------------------------------
18
19/// mdadm v1.2 compatible superblock magic.
20const RAID_SUPER_MAGIC: u32 = 0xA92B_4EFC;
21
22/// Superblock size in bytes.
23const SUPERBLOCK_SIZE: usize = 256;
24
25/// Default chunk size (512 KB).
26const DEFAULT_CHUNK_SIZE: u64 = 512 * 1024;
27
28// ---------------------------------------------------------------------------
29// RAID Level
30// ---------------------------------------------------------------------------
31
32/// Supported RAID levels.
33#[derive(Debug, Clone, Copy, PartialEq, Eq)]
34pub enum RaidLevel {
35    /// Striping (no redundancy).
36    Raid0,
37    /// Mirroring (full redundancy).
38    Raid1,
39    /// Striping with distributed parity.
40    Raid5,
41}
42
43impl RaidLevel {
44    /// Minimum number of disks for this level.
45    pub fn min_disks(&self) -> usize {
46        match self {
47            Self::Raid0 => 2,
48            Self::Raid1 => 2,
49            Self::Raid5 => 3,
50        }
51    }
52
53    /// Number of data disks given total disk count.
54    pub fn data_disk_count(&self, total: usize) -> usize {
55        match self {
56            Self::Raid0 => total,
57            Self::Raid1 => 1,
58            Self::Raid5 => total.saturating_sub(1),
59        }
60    }
61}
62
63// ---------------------------------------------------------------------------
64// Disk State
65// ---------------------------------------------------------------------------
66
67/// State of an individual disk in a RAID array.
68#[derive(Debug, Clone, Copy, PartialEq, Eq)]
69pub enum DiskState {
70    /// Disk is active and healthy.
71    Active,
72    /// Disk has some errors but is still functional.
73    Degraded,
74    /// Disk has failed and is offline.
75    Failed,
76    /// Disk is being rebuilt.
77    Rebuilding,
78    /// Disk is a hot spare.
79    Spare,
80}
81
82// ---------------------------------------------------------------------------
83// RAID Disk
84// ---------------------------------------------------------------------------
85
86/// A physical disk member of a RAID array.
87#[cfg(feature = "alloc")]
88#[derive(Debug, Clone, PartialEq)]
89pub struct RaidDisk {
90    /// Disk identifier.
91    pub id: u32,
92    /// Device path (e.g., "/dev/vda").
93    pub path: String,
94    /// Current state.
95    pub state: DiskState,
96    /// Size in blocks.
97    pub size_blocks: u64,
98    /// Tick count of last error (0 = no error).
99    pub last_error_tick: u64,
100}
101
102#[cfg(feature = "alloc")]
103impl RaidDisk {
104    /// Create a new active disk.
105    pub fn new(id: u32, path: &str, size_blocks: u64) -> Self {
106        Self {
107            id,
108            path: String::from(path),
109            state: DiskState::Active,
110            size_blocks,
111            last_error_tick: 0,
112        }
113    }
114
115    /// Mark disk as failed.
116    pub fn mark_failed(&mut self, tick: u64) {
117        self.state = DiskState::Failed;
118        self.last_error_tick = tick;
119    }
120
121    /// Mark disk as rebuilding.
122    pub fn mark_rebuilding(&mut self) {
123        self.state = DiskState::Rebuilding;
124    }
125
126    /// Mark disk as active.
127    pub fn mark_active(&mut self) {
128        self.state = DiskState::Active;
129    }
130}
131
132// ---------------------------------------------------------------------------
133// RAID Superblock
134// ---------------------------------------------------------------------------
135
136/// mdadm v1.2-compatible superblock.
137#[cfg(feature = "alloc")]
138#[derive(Debug, Clone, PartialEq)]
139pub struct Superblock {
140    /// Magic number (0xA92B4EFC).
141    pub magic: u32,
142    /// Major version (1).
143    pub major_version: u32,
144    /// Minor version (2).
145    pub minor_version: u32,
146    /// Array UUID.
147    pub set_uuid: [u8; 16],
148    /// Array name.
149    pub set_name: String,
150    /// Creation time (tick count).
151    pub ctime: u64,
152    /// RAID level.
153    pub level: RaidLevel,
154    /// Layout (left-symmetric = 0 for RAID5).
155    pub layout: u32,
156    /// Usable size per device in blocks.
157    pub size: u64,
158    /// Number of RAID disks (not counting spares).
159    pub raid_disks: u32,
160    /// Device number within array.
161    pub dev_number: u32,
162    /// Event count (incremented on every state change).
163    pub events: u64,
164    /// Data offset in blocks from start of device.
165    pub data_offset: u64,
166    /// Data size in blocks.
167    pub data_size: u64,
168}
169
170#[cfg(feature = "alloc")]
171impl Superblock {
172    /// Create a new superblock for an array.
173    pub fn new(name: &str, level: RaidLevel, raid_disks: u32, size: u64) -> Self {
174        Self {
175            magic: RAID_SUPER_MAGIC,
176            major_version: 1,
177            minor_version: 2,
178            set_uuid: [0u8; 16],
179            set_name: String::from(name),
180            ctime: 0,
181            level,
182            layout: 0,
183            size,
184            raid_disks,
185            dev_number: 0,
186            events: 0,
187            data_offset: 2048, // Standard mdadm v1.2 offset
188            data_size: size,
189        }
190    }
191
192    /// Serialize superblock to bytes.
193    pub fn serialize(&self) -> Vec<u8> {
194        let mut buf = Vec::with_capacity(SUPERBLOCK_SIZE);
195
196        buf.extend_from_slice(&self.magic.to_le_bytes());
197        buf.extend_from_slice(&self.major_version.to_le_bytes());
198        buf.extend_from_slice(&self.minor_version.to_le_bytes());
199        // Pad to 12 bytes
200        buf.extend_from_slice(&0u32.to_le_bytes());
201        buf.extend_from_slice(&self.set_uuid);
202
203        // Set name (32 bytes, null-padded)
204        let name_bytes = self.set_name.as_bytes();
205        let name_len = core::cmp::min(name_bytes.len(), 32);
206        buf.extend_from_slice(&name_bytes[..name_len]);
207        buf.resize(buf.len() + (32 - name_len), 0);
208
209        buf.extend_from_slice(&self.ctime.to_le_bytes());
210
211        let level_val: u32 = match self.level {
212            RaidLevel::Raid0 => 0,
213            RaidLevel::Raid1 => 1,
214            RaidLevel::Raid5 => 5,
215        };
216        buf.extend_from_slice(&level_val.to_le_bytes());
217        buf.extend_from_slice(&self.layout.to_le_bytes());
218        buf.extend_from_slice(&self.size.to_le_bytes());
219        buf.extend_from_slice(&self.raid_disks.to_le_bytes());
220        buf.extend_from_slice(&self.dev_number.to_le_bytes());
221        buf.extend_from_slice(&self.events.to_le_bytes());
222        buf.extend_from_slice(&self.data_offset.to_le_bytes());
223        buf.extend_from_slice(&self.data_size.to_le_bytes());
224
225        // Pad to SUPERBLOCK_SIZE
226        buf.resize(SUPERBLOCK_SIZE, 0);
227        buf
228    }
229
230    /// Deserialize superblock from bytes.
231    pub fn deserialize(data: &[u8]) -> Option<Self> {
232        if data.len() < SUPERBLOCK_SIZE {
233            return None;
234        }
235
236        let magic = u32::from_le_bytes([data[0], data[1], data[2], data[3]]);
237        if magic != RAID_SUPER_MAGIC {
238            return None;
239        }
240
241        let major_version = u32::from_le_bytes([data[4], data[5], data[6], data[7]]);
242        let minor_version = u32::from_le_bytes([data[8], data[9], data[10], data[11]]);
243
244        let mut set_uuid = [0u8; 16];
245        set_uuid.copy_from_slice(&data[16..32]);
246
247        // Set name (32 bytes at offset 32)
248        let name_end = data[32..64].iter().position(|&b| b == 0).unwrap_or(32);
249        let set_name = String::from_utf8_lossy(&data[32..32 + name_end]).into_owned();
250
251        let ctime = u64::from_le_bytes([
252            data[64], data[65], data[66], data[67], data[68], data[69], data[70], data[71],
253        ]);
254
255        let level_val = u32::from_le_bytes([data[72], data[73], data[74], data[75]]);
256        let level = match level_val {
257            0 => RaidLevel::Raid0,
258            1 => RaidLevel::Raid1,
259            5 => RaidLevel::Raid5,
260            _ => return None,
261        };
262
263        let layout = u32::from_le_bytes([data[76], data[77], data[78], data[79]]);
264        let size = u64::from_le_bytes([
265            data[80], data[81], data[82], data[83], data[84], data[85], data[86], data[87],
266        ]);
267        let raid_disks = u32::from_le_bytes([data[88], data[89], data[90], data[91]]);
268        let dev_number = u32::from_le_bytes([data[92], data[93], data[94], data[95]]);
269        let events = u64::from_le_bytes([
270            data[96], data[97], data[98], data[99], data[100], data[101], data[102], data[103],
271        ]);
272        let data_offset = u64::from_le_bytes([
273            data[104], data[105], data[106], data[107], data[108], data[109], data[110], data[111],
274        ]);
275        let data_size = u64::from_le_bytes([
276            data[112], data[113], data[114], data[115], data[116], data[117], data[118], data[119],
277        ]);
278
279        Some(Self {
280            magic,
281            major_version,
282            minor_version,
283            set_uuid,
284            set_name,
285            ctime,
286            level,
287            layout,
288            size,
289            raid_disks,
290            dev_number,
291            events,
292            data_offset,
293            data_size,
294        })
295    }
296}
297
298// ---------------------------------------------------------------------------
299// Array State
300// ---------------------------------------------------------------------------
301
302/// RAID array operational state.
303#[derive(Debug, Clone, Copy, PartialEq, Eq)]
304pub enum ArrayState {
305    /// Array is not started.
306    Inactive,
307    /// All disks are healthy, no writes pending.
308    Clean,
309    /// Array is operating normally.
310    Active,
311    /// One or more disks have failed but array is still operational.
312    Degraded,
313    /// A failed disk is being rebuilt.
314    Rebuilding,
315}
316
317// ---------------------------------------------------------------------------
318// RAID Array
319// ---------------------------------------------------------------------------
320
321/// RAID error type.
322#[derive(Debug, Clone, Copy, PartialEq, Eq)]
323pub enum RaidError {
324    /// Not enough disks for the RAID level.
325    NotEnoughDisks,
326    /// Array is degraded and cannot tolerate more failures.
327    ArrayDegraded,
328    /// Array has failed (too many disk failures).
329    ArrayFailed,
330    /// Disk not found.
331    DiskNotFound,
332    /// Invalid block address.
333    InvalidAddress,
334    /// Array already exists.
335    AlreadyExists,
336    /// Array not found.
337    NotFound,
338    /// I/O error on disk.
339    IoError,
340    /// Invalid configuration.
341    InvalidConfig,
342}
343
344/// Stripe mapping result: which disk and offset to access.
345#[derive(Debug, Clone, Copy)]
346pub struct StripeMap {
347    /// Index of the data disk within the array's disk list.
348    pub disk_index: usize,
349    /// Block offset on that disk.
350    pub disk_offset: u64,
351    /// For RAID5: index of the parity disk.
352    pub parity_disk: Option<usize>,
353}
354
355/// A RAID array composed of multiple disks.
356#[cfg(feature = "alloc")]
357#[derive(Debug, PartialEq)]
358pub struct RaidArray {
359    /// Array name.
360    pub name: String,
361    /// Array UUID.
362    pub uuid: [u8; 16],
363    /// RAID level.
364    pub level: RaidLevel,
365    /// Chunk size in blocks.
366    pub chunk_size: u64,
367    /// Member disks.
368    pub disks: Vec<RaidDisk>,
369    /// Hot spare disks.
370    pub spares: Vec<RaidDisk>,
371    /// Array state.
372    pub state: ArrayState,
373    /// Superblock.
374    pub superblock: Superblock,
375    /// Rebuild progress (0-100).
376    pub rebuild_progress: u8,
377}
378
379#[cfg(feature = "alloc")]
380impl RaidArray {
381    /// Create a new RAID array.
382    pub fn new(name: &str, level: RaidLevel, disks: Vec<RaidDisk>) -> Result<Self, RaidError> {
383        if disks.len() < level.min_disks() {
384            return Err(RaidError::NotEnoughDisks);
385        }
386
387        // Use the smallest disk's size for uniform striping
388        let min_size = disks.iter().map(|d| d.size_blocks).min().unwrap_or(0);
389
390        let superblock = Superblock::new(name, level, disks.len() as u32, min_size);
391
392        Ok(Self {
393            name: String::from(name),
394            uuid: [0u8; 16],
395            level,
396            chunk_size: DEFAULT_CHUNK_SIZE / 512, // Convert to blocks (assuming 512-byte blocks)
397            disks,
398            spares: Vec::new(),
399            state: ArrayState::Active,
400            superblock,
401            rebuild_progress: 0,
402        })
403    }
404
405    /// Map a logical block to a physical stripe location (RAID0).
406    pub fn stripe_map(&self, logical_block: u64) -> Result<StripeMap, RaidError> {
407        if self.disks.is_empty() {
408            return Err(RaidError::NotEnoughDisks);
409        }
410
411        let num_disks = self.disks.len() as u64;
412        let stripe = logical_block / self.chunk_size;
413        let offset_in_chunk = logical_block % self.chunk_size;
414
415        let disk_index = (stripe % num_disks) as usize;
416        let disk_offset = (stripe / num_disks) * self.chunk_size + offset_in_chunk;
417
418        Ok(StripeMap {
419            disk_index,
420            disk_offset,
421            parity_disk: None,
422        })
423    }
424
425    /// Map a logical block for RAID5 (left-symmetric parity rotation).
426    pub fn raid5_map(&self, logical_block: u64) -> Result<StripeMap, RaidError> {
427        if self.disks.len() < 3 {
428            return Err(RaidError::NotEnoughDisks);
429        }
430
431        let num_disks = self.disks.len() as u64;
432        let data_disks = num_disks - 1;
433        let stripe = logical_block / self.chunk_size;
434        let offset_in_chunk = logical_block % self.chunk_size;
435
436        // Which full stripe (row of chunks across all data disks)
437        let stripe_row = stripe / data_disks;
438        // Position within the stripe row
439        let data_index = stripe % data_disks;
440
441        // Left-symmetric: parity rotates backward
442        let parity_disk = (num_disks - 1 - (stripe_row % num_disks)) as usize;
443
444        // Map data disk: skip over the parity position
445        let mut physical_disk = data_index as usize;
446        if physical_disk >= parity_disk {
447            physical_disk += 1;
448        }
449
450        let disk_offset = stripe_row * self.chunk_size + offset_in_chunk;
451
452        Ok(StripeMap {
453            disk_index: physical_disk,
454            disk_offset,
455            parity_disk: Some(parity_disk),
456        })
457    }
458
459    /// Read a stripe (dispatch by RAID level).
460    pub fn read_stripe(&self, logical_block: u64) -> Result<StripeMap, RaidError> {
461        match self.level {
462            RaidLevel::Raid0 => self.stripe_map(logical_block),
463            RaidLevel::Raid1 => self.mirror_read(logical_block),
464            RaidLevel::Raid5 => self.raid5_map(logical_block),
465        }
466    }
467
468    /// Write a stripe (dispatch by RAID level, returns all disks to write).
469    pub fn write_stripe(&self, logical_block: u64) -> Result<Vec<StripeMap>, RaidError> {
470        match self.level {
471            RaidLevel::Raid0 => {
472                let map = self.stripe_map(logical_block)?;
473                Ok(vec![map])
474            }
475            RaidLevel::Raid1 => self.mirror_write(logical_block),
476            RaidLevel::Raid5 => {
477                let map = self.raid5_map(logical_block)?;
478                // For RAID5 write: need to update data disk + parity disk
479                let mut writes = vec![map];
480                if let Some(parity_idx) = map.parity_disk {
481                    writes.push(StripeMap {
482                        disk_index: parity_idx,
483                        disk_offset: map.disk_offset,
484                        parity_disk: None,
485                    });
486                }
487                Ok(writes)
488            }
489        }
490    }
491
492    /// RAID1: read from any active mirror.
493    fn mirror_read(&self, logical_block: u64) -> Result<StripeMap, RaidError> {
494        // Read from first active disk
495        for (i, disk) in self.disks.iter().enumerate() {
496            if disk.state == DiskState::Active {
497                return Ok(StripeMap {
498                    disk_index: i,
499                    disk_offset: logical_block,
500                    parity_disk: None,
501                });
502            }
503        }
504        Err(RaidError::ArrayFailed)
505    }
506
507    /// RAID1: write to all active mirrors.
508    fn mirror_write(&self, logical_block: u64) -> Result<Vec<StripeMap>, RaidError> {
509        let mut writes = Vec::new();
510        for (i, disk) in self.disks.iter().enumerate() {
511            if disk.state == DiskState::Active || disk.state == DiskState::Rebuilding {
512                writes.push(StripeMap {
513                    disk_index: i,
514                    disk_offset: logical_block,
515                    parity_disk: None,
516                });
517            }
518        }
519        if writes.is_empty() {
520            return Err(RaidError::ArrayFailed);
521        }
522        Ok(writes)
523    }
524
525    /// Compute XOR parity across data blocks.
526    pub fn compute_parity(blocks: &[&[u8]]) -> Vec<u8> {
527        if blocks.is_empty() {
528            return Vec::new();
529        }
530        let len = blocks[0].len();
531        let mut parity = vec![0u8; len];
532        for block in blocks {
533            xor_blocks(&mut parity, &block[..core::cmp::min(block.len(), len)]);
534        }
535        parity
536    }
537
538    /// Rebuild a failed disk from parity and remaining data.
539    pub fn rebuild(&mut self, failed_disk_idx: usize) -> Result<(), RaidError> {
540        if failed_disk_idx >= self.disks.len() {
541            return Err(RaidError::DiskNotFound);
542        }
543
544        match self.level {
545            RaidLevel::Raid0 => {
546                // RAID0 cannot rebuild
547                Err(RaidError::ArrayFailed)
548            }
549            RaidLevel::Raid1 => {
550                // Find an active source disk
551                let has_active = self.disks.iter().any(|d| d.state == DiskState::Active);
552
553                if !has_active {
554                    return Err(RaidError::ArrayFailed);
555                }
556
557                self.disks[failed_disk_idx].mark_rebuilding();
558                self.state = ArrayState::Rebuilding;
559                // In production: copy all data from active mirror to rebuilding disk
560                self.rebuild_progress = 100;
561                self.disks[failed_disk_idx].mark_active();
562                self.update_state();
563                Ok(())
564            }
565            RaidLevel::Raid5 => {
566                // Need all other disks to be active
567                let failed_count = self
568                    .disks
569                    .iter()
570                    .filter(|d| d.state == DiskState::Failed)
571                    .count();
572
573                if failed_count > 1 {
574                    return Err(RaidError::ArrayFailed);
575                }
576
577                self.disks[failed_disk_idx].mark_rebuilding();
578                self.state = ArrayState::Rebuilding;
579                // In production: XOR all other disks to reconstruct failed disk
580                self.rebuild_progress = 100;
581                self.disks[failed_disk_idx].mark_active();
582                self.update_state();
583                Ok(())
584            }
585        }
586    }
587
588    /// Check array health and update state.
589    pub fn check_health(&mut self) -> ArrayState {
590        self.update_state();
591        self.state
592    }
593
594    /// Replace a failed disk with a spare.
595    pub fn replace_disk(&mut self, failed_disk_idx: usize) -> Result<(), RaidError> {
596        if failed_disk_idx >= self.disks.len() {
597            return Err(RaidError::DiskNotFound);
598        }
599        if self.disks[failed_disk_idx].state != DiskState::Failed {
600            return Err(RaidError::InvalidConfig);
601        }
602
603        // Find a spare
604        let spare_idx = self.spares.iter().position(|s| s.state == DiskState::Spare);
605
606        if let Some(idx) = spare_idx {
607            let mut spare = self.spares.remove(idx);
608            spare.state = DiskState::Rebuilding;
609            spare.id = self.disks[failed_disk_idx].id;
610            self.disks[failed_disk_idx] = spare;
611            self.rebuild(failed_disk_idx)
612        } else {
613            Err(RaidError::DiskNotFound)
614        }
615    }
616
617    /// Add a hot spare disk.
618    pub fn add_spare(&mut self, disk: RaidDisk) {
619        let mut spare = disk;
620        spare.state = DiskState::Spare;
621        self.spares.push(spare);
622    }
623
624    /// Get the number of active disks.
625    pub fn active_disk_count(&self) -> usize {
626        self.disks
627            .iter()
628            .filter(|d| d.state == DiskState::Active)
629            .count()
630    }
631
632    /// Get the number of failed disks.
633    pub fn failed_disk_count(&self) -> usize {
634        self.disks
635            .iter()
636            .filter(|d| d.state == DiskState::Failed)
637            .count()
638    }
639
640    /// Get usable capacity in blocks.
641    pub fn capacity_blocks(&self) -> u64 {
642        let min_size = self.disks.iter().map(|d| d.size_blocks).min().unwrap_or(0);
643
644        match self.level {
645            RaidLevel::Raid0 => min_size * self.disks.len() as u64,
646            RaidLevel::Raid1 => min_size,
647            RaidLevel::Raid5 => min_size * (self.disks.len() as u64 - 1),
648        }
649    }
650
651    /// Update array state based on disk states.
652    fn update_state(&mut self) {
653        let failed = self.failed_disk_count();
654        let rebuilding = self
655            .disks
656            .iter()
657            .filter(|d| d.state == DiskState::Rebuilding)
658            .count();
659
660        self.state = match self.level {
661            RaidLevel::Raid0 => {
662                if failed > 0 {
663                    ArrayState::Inactive
664                } else {
665                    ArrayState::Active
666                }
667            }
668            RaidLevel::Raid1 => {
669                if failed >= self.disks.len() {
670                    ArrayState::Inactive
671                } else if rebuilding > 0 {
672                    ArrayState::Rebuilding
673                } else if failed > 0 {
674                    ArrayState::Degraded
675                } else {
676                    ArrayState::Active
677                }
678            }
679            RaidLevel::Raid5 => {
680                if failed > 1 {
681                    ArrayState::Inactive
682                } else if rebuilding > 0 {
683                    ArrayState::Rebuilding
684                } else if failed == 1 {
685                    ArrayState::Degraded
686                } else {
687                    ArrayState::Active
688                }
689            }
690        };
691    }
692}
693
694/// XOR a source block into a destination block.
695pub fn xor_blocks(dest: &mut [u8], src: &[u8]) {
696    let len = core::cmp::min(dest.len(), src.len());
697    for i in 0..len {
698        dest[i] ^= src[i];
699    }
700}
701
702// ---------------------------------------------------------------------------
703// RAID Manager
704// ---------------------------------------------------------------------------
705
706/// RAID manager: manages multiple RAID arrays.
707#[cfg(feature = "alloc")]
708pub struct RaidManager {
709    /// Arrays indexed by name.
710    arrays: BTreeMap<String, RaidArray>,
711}
712
713#[cfg(feature = "alloc")]
714impl Default for RaidManager {
715    fn default() -> Self {
716        Self::new()
717    }
718}
719
720#[cfg(feature = "alloc")]
721impl RaidManager {
722    /// Create a new RAID manager.
723    pub fn new() -> Self {
724        Self {
725            arrays: BTreeMap::new(),
726        }
727    }
728
729    /// Create a new RAID array.
730    pub fn create_array(
731        &mut self,
732        name: &str,
733        level: RaidLevel,
734        disks: Vec<RaidDisk>,
735    ) -> Result<(), RaidError> {
736        if self.arrays.contains_key(name) {
737            return Err(RaidError::AlreadyExists);
738        }
739        let array = RaidArray::new(name, level, disks)?;
740        self.arrays.insert(String::from(name), array);
741        Ok(())
742    }
743
744    /// Destroy an array.
745    pub fn destroy_array(&mut self, name: &str) -> Result<(), RaidError> {
746        self.arrays
747            .remove(name)
748            .map(|_| ())
749            .ok_or(RaidError::NotFound)
750    }
751
752    /// Add a hot spare to an array.
753    pub fn add_spare(&mut self, array_name: &str, disk: RaidDisk) -> Result<(), RaidError> {
754        let array = self.arrays.get_mut(array_name).ok_or(RaidError::NotFound)?;
755        array.add_spare(disk);
756        Ok(())
757    }
758
759    /// Get status summary of all arrays.
760    pub fn get_status(&self) -> Vec<(&str, ArrayState, usize, usize)> {
761        self.arrays
762            .iter()
763            .map(|(name, array)| {
764                (
765                    name.as_str(),
766                    array.state,
767                    array.active_disk_count(),
768                    array.disks.len(),
769                )
770            })
771            .collect()
772    }
773
774    /// Get an array by name.
775    pub fn get_array(&self, name: &str) -> Option<&RaidArray> {
776        self.arrays.get(name)
777    }
778
779    /// Get a mutable reference to an array by name.
780    pub fn get_array_mut(&mut self, name: &str) -> Option<&mut RaidArray> {
781        self.arrays.get_mut(name)
782    }
783
784    /// Number of managed arrays.
785    pub fn array_count(&self) -> usize {
786        self.arrays.len()
787    }
788}
789
790// ---------------------------------------------------------------------------
791// Tests
792// ---------------------------------------------------------------------------
793
794#[cfg(test)]
795mod tests {
796    use super::*;
797
798    fn make_disks(count: usize, size: u64) -> Vec<RaidDisk> {
799        (0..count)
800            .map(|i| {
801                RaidDisk::new(
802                    i as u32,
803                    &alloc::format!("/dev/vd{}", (b'a' + i as u8) as char),
804                    size,
805                )
806            })
807            .collect()
808    }
809
810    #[test]
811    fn test_raid_level_min_disks() {
812        assert_eq!(RaidLevel::Raid0.min_disks(), 2);
813        assert_eq!(RaidLevel::Raid1.min_disks(), 2);
814        assert_eq!(RaidLevel::Raid5.min_disks(), 3);
815    }
816
817    #[test]
818    fn test_raid_level_data_disks() {
819        assert_eq!(RaidLevel::Raid0.data_disk_count(4), 4);
820        assert_eq!(RaidLevel::Raid1.data_disk_count(2), 1);
821        assert_eq!(RaidLevel::Raid5.data_disk_count(4), 3);
822    }
823
824    #[test]
825    fn test_raid0_not_enough_disks() {
826        let disks = make_disks(1, 1000);
827        assert_eq!(
828            RaidArray::new("md0", RaidLevel::Raid0, disks),
829            Err(RaidError::NotEnoughDisks)
830        );
831    }
832
833    #[test]
834    fn test_raid0_stripe_map() {
835        let disks = make_disks(3, 10000);
836        let array = RaidArray::new("md0", RaidLevel::Raid0, disks).unwrap();
837
838        let map = array.stripe_map(0).unwrap();
839        assert_eq!(map.disk_index, 0);
840        assert_eq!(map.disk_offset, 0);
841
842        // Second chunk goes to disk 1
843        let map2 = array.stripe_map(array.chunk_size).unwrap();
844        assert_eq!(map2.disk_index, 1);
845    }
846
847    #[test]
848    fn test_raid1_mirror_read() {
849        let disks = make_disks(2, 10000);
850        let array = RaidArray::new("md1", RaidLevel::Raid1, disks).unwrap();
851        let map = array.read_stripe(500).unwrap();
852        assert_eq!(map.disk_offset, 500);
853    }
854
855    #[test]
856    fn test_raid1_mirror_write() {
857        let disks = make_disks(2, 10000);
858        let array = RaidArray::new("md1", RaidLevel::Raid1, disks).unwrap();
859        let writes = array.write_stripe(100).unwrap();
860        // Should write to both mirrors
861        assert_eq!(writes.len(), 2);
862    }
863
864    #[test]
865    fn test_raid5_map() {
866        let disks = make_disks(4, 10000);
867        let array = RaidArray::new("md5", RaidLevel::Raid5, disks).unwrap();
868        let map = array.raid5_map(0).unwrap();
869        assert!(map.parity_disk.is_some());
870        // Data disk should not be the parity disk
871        assert_ne!(map.disk_index, map.parity_disk.unwrap());
872    }
873
874    #[test]
875    fn test_xor_blocks() {
876        let mut a = [0xAA, 0xBB, 0xCC, 0xDD];
877        let b = [0x55, 0x44, 0x33, 0x22];
878        xor_blocks(&mut a, &b);
879        assert_eq!(a, [0xFF, 0xFF, 0xFF, 0xFF]);
880    }
881
882    #[test]
883    fn test_compute_parity() {
884        let block1: &[u8] = &[0xFF, 0x00, 0xAA];
885        let block2: &[u8] = &[0x00, 0xFF, 0x55];
886        let parity = RaidArray::compute_parity(&[block1, block2]);
887        assert_eq!(parity, &[0xFF, 0xFF, 0xFF]);
888
889        // XOR parity with one block should give the other
890        let mut recovered = parity.clone();
891        xor_blocks(&mut recovered, block1);
892        assert_eq!(recovered, block2);
893    }
894
895    #[test]
896    fn test_superblock_serialize_deserialize() {
897        let sb = Superblock::new("md0", RaidLevel::Raid5, 4, 100000);
898        let bytes = sb.serialize();
899        assert_eq!(bytes.len(), SUPERBLOCK_SIZE);
900
901        let parsed = Superblock::deserialize(&bytes).unwrap();
902        assert_eq!(parsed.magic, RAID_SUPER_MAGIC);
903        assert_eq!(parsed.level, RaidLevel::Raid5);
904        assert_eq!(parsed.raid_disks, 4);
905        assert_eq!(parsed.set_name, "md0");
906    }
907
908    #[test]
909    fn test_superblock_bad_magic() {
910        let mut bytes = vec![0u8; SUPERBLOCK_SIZE];
911        bytes[0..4].copy_from_slice(&0xDEAD_BEEFu32.to_le_bytes());
912        assert!(Superblock::deserialize(&bytes).is_none());
913    }
914
915    #[test]
916    fn test_capacity_blocks() {
917        let disks = make_disks(4, 10000);
918        let r0 = RaidArray::new("r0", RaidLevel::Raid0, disks.clone()).unwrap();
919        assert_eq!(r0.capacity_blocks(), 40000);
920
921        let r1 = RaidArray::new("r1", RaidLevel::Raid1, make_disks(2, 10000)).unwrap();
922        assert_eq!(r1.capacity_blocks(), 10000);
923
924        let r5 = RaidArray::new("r5", RaidLevel::Raid5, disks).unwrap();
925        assert_eq!(r5.capacity_blocks(), 30000);
926    }
927
928    #[test]
929    fn test_raid_manager_create_destroy() {
930        let mut mgr = RaidManager::new();
931        let disks = make_disks(3, 10000);
932        mgr.create_array("md0", RaidLevel::Raid5, disks).unwrap();
933        assert_eq!(mgr.array_count(), 1);
934
935        // Duplicate name should fail
936        let disks2 = make_disks(2, 10000);
937        assert_eq!(
938            mgr.create_array("md0", RaidLevel::Raid1, disks2),
939            Err(RaidError::AlreadyExists)
940        );
941
942        mgr.destroy_array("md0").unwrap();
943        assert_eq!(mgr.array_count(), 0);
944    }
945
946    #[test]
947    fn test_raid_manager_status() {
948        let mut mgr = RaidManager::new();
949        let disks = make_disks(2, 5000);
950        mgr.create_array("md0", RaidLevel::Raid1, disks).unwrap();
951
952        let status = mgr.get_status();
953        assert_eq!(status.len(), 1);
954        assert_eq!(status[0].0, "md0");
955        assert_eq!(status[0].1, ArrayState::Active);
956        assert_eq!(status[0].2, 2); // active disks
957        assert_eq!(status[0].3, 2); // total disks
958    }
959}