⚠️ VeridianOS Kernel Documentation - This is low-level kernel code. All functions are unsafe unless explicitly marked otherwise. no_std

veridian_kernel/drivers/
iommu.rs

1//! IOMMU (I/O Memory Management Unit) Foundation
2//!
3//! Provides DMA address translation and device isolation via Intel VT-d
4//! (or equivalent on other architectures). This module handles:
5//! - DMAR table detection from ACPI
6//! - IOMMU unit discovery and capability reading
7//! - Identity mapping for known-safe devices
8//! - DMA coherency flags and scatter-gather list support
9//!
10//! Full IOMMU page table management is deferred to a later phase; this
11//! module provides the foundation for safe DMA with identity mapping.
12
13use alloc::vec::Vec;
14use core::sync::atomic::{AtomicBool, Ordering};
15
16use spin::Mutex;
17
18use crate::error::{KernelError, KernelResult};
19
20// ---------------------------------------------------------------------------
21// DMAR Table Structures (Intel VT-d DMA Remapping)
22// ---------------------------------------------------------------------------
23
24/// DMAR Remapping Structure types (per Intel VT-d specification).
25#[derive(Debug, Clone, Copy, PartialEq, Eq)]
26#[repr(u16)]
27pub enum DmarStructureType {
28    /// DMA Remapping Hardware Unit Definition (DRHD).
29    Drhd = 0,
30    /// Reserved Memory Region Reporting (RMRR).
31    Rmrr = 1,
32    /// ACPI Namespace Device Declaration (ANDD).
33    Andd = 3,
34}
35
36/// A DMA Remapping Hardware Unit discovered from the ACPI DMAR table.
37#[derive(Debug, Clone)]
38pub struct DrhdUnit {
39    /// Segment number (PCI segment group).
40    pub segment: u16,
41    /// Register base address (MMIO).
42    pub register_base: u64,
43    /// Whether this unit covers all PCI devices in the segment.
44    pub include_all: bool,
45    /// Device scope entries (bus:dev.fn tuples).
46    pub device_scope: Vec<DeviceScope>,
47}
48
49/// A Reserved Memory Region Reporting entry.
50#[derive(Debug, Clone)]
51pub struct RmrrRegion {
52    /// Segment number.
53    pub segment: u16,
54    /// Base address of reserved region.
55    pub base_address: u64,
56    /// Limit address (inclusive) of reserved region.
57    pub limit_address: u64,
58    /// Device scope entries that require this reserved region.
59    pub device_scope: Vec<DeviceScope>,
60}
61
62/// Device scope entry within DRHD or RMRR structures.
63#[derive(Debug, Clone, Copy)]
64pub struct DeviceScope {
65    /// Scope type (1=PCI endpoint, 2=PCI sub-hierarchy, 3=IOAPIC, etc.).
66    pub scope_type: u8,
67    /// Enumeration ID (IOAPIC ID or HPET number).
68    pub enumeration_id: u8,
69    /// Start bus number.
70    pub start_bus: u8,
71    /// Path entries (dev:fn pairs).
72    pub path: [(u8, u8); 4],
73    /// Number of valid path entries.
74    pub path_len: u8,
75}
76
77impl DeviceScope {
78    #[allow(dead_code)]
79    const fn empty() -> Self {
80        Self {
81            scope_type: 0,
82            enumeration_id: 0,
83            start_bus: 0,
84            path: [(0, 0); 4],
85            path_len: 0,
86        }
87    }
88}
89
90/// Parsed DMAR (DMA Remapping) table information.
91#[derive(Debug, Clone)]
92pub struct DmarInfo {
93    /// Host address width (physical address bits - 1).
94    pub host_address_width: u8,
95    /// Global flags from the DMAR header.
96    pub flags: u8,
97    /// DMA Remapping Hardware Units.
98    pub drhd_units: Vec<DrhdUnit>,
99    /// Reserved Memory Region Reporting entries.
100    pub rmrr_regions: Vec<RmrrRegion>,
101}
102
103// ---------------------------------------------------------------------------
104// Scatter-Gather List for multi-buffer DMA
105// ---------------------------------------------------------------------------
106
107/// A single entry in a scatter-gather list.
108#[derive(Debug, Clone, Copy)]
109pub struct ScatterGatherEntry {
110    /// Physical address of the buffer segment.
111    pub phys_addr: u64,
112    /// Length of the buffer segment in bytes.
113    pub length: u32,
114}
115
116/// Scatter-gather list for multi-buffer DMA transfers.
117///
118/// Devices like NVMe and network NICs use scatter-gather to describe
119/// non-contiguous physical memory regions for a single logical transfer.
120#[derive(Debug, Clone)]
121pub struct ScatterGatherList {
122    /// Entries in the scatter-gather list.
123    pub entries: Vec<ScatterGatherEntry>,
124    /// Total byte length across all entries.
125    pub total_length: u64,
126}
127
128impl ScatterGatherList {
129    /// Create an empty scatter-gather list.
130    pub fn new() -> Self {
131        Self {
132            entries: Vec::new(),
133            total_length: 0,
134        }
135    }
136
137    /// Add a physical buffer segment to the scatter-gather list.
138    pub fn add_entry(&mut self, phys_addr: u64, length: u32) {
139        self.entries.push(ScatterGatherEntry { phys_addr, length });
140        self.total_length += length as u64;
141    }
142
143    /// Number of entries in the list.
144    pub fn entry_count(&self) -> usize {
145        self.entries.len()
146    }
147}
148
149impl Default for ScatterGatherList {
150    fn default() -> Self {
151        Self::new()
152    }
153}
154
155// ---------------------------------------------------------------------------
156// DMA Coherency
157// ---------------------------------------------------------------------------
158
159/// DMA coherency policy for buffer allocation.
160#[derive(Debug, Clone, Copy, PartialEq, Eq)]
161pub enum DmaCoherency {
162    /// Hardware maintains cache coherency (default on x86).
163    Coherent,
164    /// Software must explicitly manage cache flushes.
165    NonCoherent,
166    /// Write-combining: optimized for sequential writes (framebuffers).
167    WriteCombining,
168}
169
170/// DMA direction hint for buffer mapping.
171#[derive(Debug, Clone, Copy, PartialEq, Eq)]
172pub enum DmaDirection {
173    /// Device reads from memory (host -> device).
174    ToDevice,
175    /// Device writes to memory (device -> host).
176    FromDevice,
177    /// Bidirectional DMA.
178    Bidirectional,
179}
180
181/// A DMA-mapped buffer with coherency and direction tracking.
182#[derive(Debug)]
183pub struct DmaMappedBuffer {
184    /// Virtual address of the buffer.
185    pub virt_addr: usize,
186    /// Physical address for DMA (may differ from actual phys with IOMMU).
187    pub dma_addr: u64,
188    /// Buffer size in bytes.
189    pub size: usize,
190    /// Coherency policy.
191    pub coherency: DmaCoherency,
192    /// Transfer direction.
193    pub direction: DmaDirection,
194}
195
196// ---------------------------------------------------------------------------
197// Global IOMMU state
198// ---------------------------------------------------------------------------
199
200/// Whether IOMMU/DMAR has been detected and initialized.
201static IOMMU_INITIALIZED: AtomicBool = AtomicBool::new(false);
202
203/// Parsed DMAR information (None if no DMAR table found).
204static DMAR_STATE: Mutex<Option<DmarInfo>> = Mutex::new(None);
205
206/// Check whether IOMMU support has been initialized.
207pub fn is_initialized() -> bool {
208    IOMMU_INITIALIZED.load(Ordering::Acquire)
209}
210
211/// Access parsed DMAR information.
212pub fn with_dmar_info<R, F: FnOnce(&DmarInfo) -> R>(f: F) -> Option<R> {
213    let guard = DMAR_STATE.lock();
214    guard.as_ref().map(f)
215}
216
217// ---------------------------------------------------------------------------
218// DMAR/DRHD Table Parsing
219// ---------------------------------------------------------------------------
220
221/// DMAR remapping structure type codes.
222const DMAR_TYPE_DRHD: u16 = 0;
223const DMAR_TYPE_RMRR: u16 = 1;
224
225/// DMAR table header size: 36-byte ACPI SDT header + 1 byte host_address_width
226/// + 1 byte flags + 10 reserved bytes = 48 bytes.
227const DMAR_HEADER_SIZE: usize = 48;
228
229/// Intel VT-d register offsets.
230const _VTD_CAP_REG: u64 = 0x08;
231const _VTD_ECAP_REG: u64 = 0x10;
232
233/// Represents a parsed IOMMU hardware unit with its capabilities.
234#[derive(Debug, Clone)]
235pub struct IommuUnit {
236    /// Register base address (MMIO).
237    pub register_base: u64,
238    /// PCI segment group.
239    pub segment: u16,
240    /// Capability register value (offset 0x08).
241    pub capability: u64,
242    /// Extended capability register value (offset 0x10).
243    pub extended_capability: u64,
244    /// Whether this unit covers all devices.
245    pub include_all: bool,
246}
247
248/// IOMMU context table (256 entries per PCI bus, page-aligned).
249///
250/// Each entry maps a PCI device:function pair to a DMA domain with its
251/// own set of page tables.
252#[derive(Debug)]
253pub struct IommuContextTable {
254    /// Physical address of the context table page.
255    pub phys_addr: u64,
256    /// PCI bus number this table covers.
257    pub bus: u8,
258}
259
260/// Parse a device scope entry from raw DMAR data.
261///
262/// Device scope entries appear within DRHD and RMRR structures.
263/// Format: type(1) + length(1) + reserved(2) + enum_id(1) + start_bus(1) +
264/// path(variable)
265fn parse_device_scope(data: &[u8]) -> Vec<DeviceScope> {
266    let mut scopes = Vec::new();
267    let mut offset = 0;
268
269    while offset + 6 <= data.len() {
270        let scope_type = data[offset];
271        let scope_len = data[offset + 1] as usize;
272
273        if scope_len < 6 || offset + scope_len > data.len() {
274            break;
275        }
276
277        let enumeration_id = data[offset + 4];
278        let start_bus = data[offset + 5];
279
280        // Parse path entries (dev:fn pairs, 2 bytes each)
281        let path_start = offset + 6;
282        let path_bytes = scope_len - 6;
283        let num_path_entries = path_bytes / 2;
284        let mut path = [(0u8, 0u8); 4];
285        let path_count = num_path_entries.min(4);
286
287        for (i, slot) in path.iter_mut().enumerate().take(path_count) {
288            let p = path_start + i * 2;
289            if p + 1 < data.len() {
290                *slot = (data[p], data[p + 1]);
291            }
292        }
293
294        scopes.push(DeviceScope {
295            scope_type,
296            enumeration_id,
297            start_bus,
298            path,
299            path_len: path_count as u8,
300        });
301
302        offset += scope_len;
303    }
304
305    scopes
306}
307
308/// Parse the ACPI DMAR table from raw bytes.
309///
310/// Walks the variable-length remapping structure entries and extracts
311/// DRHD (hardware unit) and RMRR (reserved memory) entries.
312pub fn parse_dmar(dmar_data: &[u8]) -> KernelResult<DmarInfo> {
313    if dmar_data.len() < DMAR_HEADER_SIZE {
314        return Err(KernelError::InvalidArgument {
315            name: "DMAR table",
316            value: "too small",
317        });
318    }
319
320    // Host address width at offset 36 (after 36-byte ACPI SDT header)
321    let host_address_width = dmar_data[36];
322    let flags = dmar_data[37];
323
324    let mut drhd_units = Vec::new();
325    let mut rmrr_regions = Vec::new();
326
327    let mut offset = DMAR_HEADER_SIZE;
328
329    while offset + 4 <= dmar_data.len() {
330        // Remapping structure header: type(2) + length(2)
331        let struct_type = u16::from_le_bytes([dmar_data[offset], dmar_data[offset + 1]]);
332        let struct_len =
333            u16::from_le_bytes([dmar_data[offset + 2], dmar_data[offset + 3]]) as usize;
334
335        if struct_len < 4 || offset + struct_len > dmar_data.len() {
336            break;
337        }
338
339        match struct_type {
340            DMAR_TYPE_DRHD => {
341                // DRHD structure: type(2) + len(2) + flags(1) + reserved(1)
342                // + segment(2) + register_base(8) + device_scope(variable)
343                if struct_len >= 16 {
344                    let drhd_flags = dmar_data[offset + 4];
345                    let segment =
346                        u16::from_le_bytes([dmar_data[offset + 6], dmar_data[offset + 7]]);
347                    let register_base = u64::from_le_bytes([
348                        dmar_data[offset + 8],
349                        dmar_data[offset + 9],
350                        dmar_data[offset + 10],
351                        dmar_data[offset + 11],
352                        dmar_data[offset + 12],
353                        dmar_data[offset + 13],
354                        dmar_data[offset + 14],
355                        dmar_data[offset + 15],
356                    ]);
357
358                    let include_all = (drhd_flags & 0x01) != 0;
359
360                    // Parse device scope entries (after the 16-byte DRHD header)
361                    let scope_data = if struct_len > 16 {
362                        &dmar_data[offset + 16..offset + struct_len]
363                    } else {
364                        &[]
365                    };
366                    let device_scope = parse_device_scope(scope_data);
367
368                    crate::println!(
369                        "[IOMMU]   DRHD: seg={}, base={:#x}, include_all={}, scopes={}",
370                        segment,
371                        register_base,
372                        include_all,
373                        device_scope.len()
374                    );
375
376                    drhd_units.push(DrhdUnit {
377                        segment,
378                        register_base,
379                        include_all,
380                        device_scope,
381                    });
382                }
383            }
384            DMAR_TYPE_RMRR => {
385                // RMRR structure: type(2) + len(2) + reserved(2) + segment(2)
386                // + base_addr(8) + limit_addr(8) + device_scope(variable)
387                if struct_len >= 24 {
388                    let segment =
389                        u16::from_le_bytes([dmar_data[offset + 6], dmar_data[offset + 7]]);
390                    let base_address = u64::from_le_bytes([
391                        dmar_data[offset + 8],
392                        dmar_data[offset + 9],
393                        dmar_data[offset + 10],
394                        dmar_data[offset + 11],
395                        dmar_data[offset + 12],
396                        dmar_data[offset + 13],
397                        dmar_data[offset + 14],
398                        dmar_data[offset + 15],
399                    ]);
400                    let limit_address = u64::from_le_bytes([
401                        dmar_data[offset + 16],
402                        dmar_data[offset + 17],
403                        dmar_data[offset + 18],
404                        dmar_data[offset + 19],
405                        dmar_data[offset + 20],
406                        dmar_data[offset + 21],
407                        dmar_data[offset + 22],
408                        dmar_data[offset + 23],
409                    ]);
410
411                    let scope_data = if struct_len > 24 {
412                        &dmar_data[offset + 24..offset + struct_len]
413                    } else {
414                        &[]
415                    };
416                    let device_scope = parse_device_scope(scope_data);
417
418                    crate::println!(
419                        "[IOMMU]   RMRR: seg={}, base={:#x}, limit={:#x}, scopes={}",
420                        segment,
421                        base_address,
422                        limit_address,
423                        device_scope.len()
424                    );
425
426                    rmrr_regions.push(RmrrRegion {
427                        segment,
428                        base_address,
429                        limit_address,
430                        device_scope,
431                    });
432                }
433            }
434            _ => {
435                // Skip unknown structure types (ATSR=2, ANDD=3, SATC=4, etc.)
436                crate::println!(
437                    "[IOMMU]   Unknown DMAR structure type {} (len={})",
438                    struct_type,
439                    struct_len
440                );
441            }
442        }
443
444        offset += struct_len;
445    }
446
447    Ok(DmarInfo {
448        host_address_width,
449        flags,
450        drhd_units,
451        rmrr_regions,
452    })
453}
454
455/// Initialize an IOMMU hardware unit from a parsed DRHD entry.
456///
457/// Reads capability registers from the MMIO region. Full translation
458/// enablement (root table pointer, global command register) requires
459/// MMIO mapping which is deferred until the VMM supports MMIO allocation.
460pub fn init_iommu_unit(drhd: &DrhdUnit) -> KernelResult<IommuUnit> {
461    // The register base needs to be MMIO-mapped to read capability registers.
462    // For now, record the unit with zero capabilities (MMIO mapping is done
463    // by the VMM when translation is actually enabled).
464    crate::println!(
465        "[IOMMU] Initializing IOMMU unit at {:#x} (segment {})",
466        drhd.register_base,
467        drhd.segment
468    );
469
470    Ok(IommuUnit {
471        register_base: drhd.register_base,
472        segment: drhd.segment,
473        capability: 0,
474        extended_capability: 0,
475        include_all: drhd.include_all,
476    })
477}
478
479/// Create an identity domain for DMA address translation.
480///
481/// In identity mapping mode, all DMA addresses translate to the same
482/// physical address. This is the simplest IOMMU configuration and
483/// allows devices to perform DMA without address translation overhead.
484///
485/// Returns the physical address of the identity-mapped root table page.
486pub fn create_identity_domain() -> KernelResult<u64> {
487    // Allocate a page for the root table (4096 bytes, 256 entries)
488    let frame = crate::mm::FRAME_ALLOCATOR
489        .lock()
490        .allocate_frames(1, None)
491        .map_err(|_| KernelError::OutOfMemory {
492            requested: 4096,
493            available: 0,
494        })?;
495
496    let phys_addr = frame.as_u64() * 4096;
497    let virt_addr = crate::mm::phys_to_virt_addr(phys_addr) as usize;
498
499    // Zero the root table page
500    // SAFETY: virt_addr points to a freshly allocated page mapped via
501    // the bootloader's physical memory mapping. We zero it to create
502    // an empty root table (all entries not-present).
503    unsafe {
504        core::ptr::write_bytes(virt_addr as *mut u8, 0, 4096);
505    }
506
507    crate::println!(
508        "[IOMMU] Created identity domain root table at phys {:#x}",
509        phys_addr
510    );
511
512    Ok(phys_addr)
513}
514
515/// Initialize the IOMMU subsystem by parsing the ACPI DMAR table.
516///
517/// This function reads the DMAR table from ACPI (if present), discovers
518/// IOMMU hardware units and reserved memory regions, and stores the
519/// parsed information for use by device drivers.
520///
521/// Non-fatal: returns Ok(false) if no DMAR table is found (common on
522/// older hardware or VMs without IOMMU emulation).
523#[cfg(target_arch = "x86_64")]
524pub fn init() -> KernelResult<bool> {
525    if IOMMU_INITIALIZED.load(Ordering::Acquire) {
526        return Ok(true);
527    }
528
529    // Check if ACPI parser found a DMAR table during boot.
530    let dmar_data = crate::arch::x86_64::acpi::with_acpi_info(|info| {
531        if !info.has_dmar || info.dmar_address == 0 {
532            return None;
533        }
534
535        let addr = info.dmar_address as usize;
536        let len = info.dmar_length as usize;
537
538        crate::println!("[IOMMU] DMAR table at {:#x}, len {} bytes", addr, len);
539
540        // SAFETY: dmar_address was captured from a valid ACPI table mapped
541        // by the bootloader. The table remains in memory for the kernel's
542        // lifetime. dmar_length was read from the table header.
543        Some(unsafe { core::slice::from_raw_parts(addr as *const u8, len) })
544    });
545
546    let dmar_bytes = match dmar_data {
547        Some(Some(bytes)) => bytes,
548        _ => {
549            crate::println!("[IOMMU] No DMAR table found (IOMMU not available)");
550            IOMMU_INITIALIZED.store(true, Ordering::Release);
551            return Ok(false);
552        }
553    };
554
555    // Parse DRHD/RMRR structures from the DMAR table
556    match parse_dmar(dmar_bytes) {
557        Ok(info) => {
558            let num_units = info.drhd_units.len();
559            let num_rmrr = info.rmrr_regions.len();
560
561            // Initialize each IOMMU unit (read capabilities)
562            for drhd in &info.drhd_units {
563                if let Err(e) = init_iommu_unit(drhd) {
564                    crate::println!(
565                        "[IOMMU] Warning: failed to init unit at {:#x}: {:?}",
566                        drhd.register_base,
567                        e
568                    );
569                }
570            }
571
572            *DMAR_STATE.lock() = Some(info);
573            IOMMU_INITIALIZED.store(true, Ordering::Release);
574            crate::println!(
575                "[IOMMU] DMAR parsed: {} DRHD units, {} RMRR regions",
576                num_units,
577                num_rmrr
578            );
579            Ok(true)
580        }
581        Err(e) => {
582            crate::println!("[IOMMU] DMAR parse error: {:?}", e);
583            IOMMU_INITIALIZED.store(true, Ordering::Release);
584            Ok(false)
585        }
586    }
587}
588
589/// Initialize IOMMU on non-x86 architectures (stub).
590#[cfg(not(target_arch = "x86_64"))]
591pub fn init() -> KernelResult<bool> {
592    println!("[IOMMU] IOMMU not supported on this architecture");
593    IOMMU_INITIALIZED.store(true, Ordering::Release);
594    Ok(false)
595}
596
597// ---------------------------------------------------------------------------
598// Identity Mapping (Phase 1 of IOMMU support)
599// ---------------------------------------------------------------------------
600
601/// Create an identity mapping for a DMA region.
602///
603/// In identity mapping mode, DMA addresses equal physical addresses.
604/// This is the simplest IOMMU configuration and is used as a first step
605/// before full page-table-based translation is implemented.
606///
607/// Returns the DMA address (equal to phys_addr in identity mapping mode).
608pub fn identity_map_dma(phys_addr: u64, size: usize) -> KernelResult<u64> {
609    // Without IOMMU hardware, DMA addresses are always physical addresses.
610    // When IOMMU is present but in identity mapping mode, the mapping is
611    // set up in the IOMMU page tables to translate DMA addr -> same phys addr.
612    let _ = size; // Used when IOMMU page tables are implemented.
613    Ok(phys_addr)
614}
615
616/// Remove a DMA identity mapping.
617pub fn unmap_dma(dma_addr: u64, size: usize) -> KernelResult<()> {
618    // No-op in identity mapping mode.
619    let _ = (dma_addr, size);
620    Ok(())
621}
622
623// ---------------------------------------------------------------------------
624// DMA Buffer Allocation Helpers
625// ---------------------------------------------------------------------------
626
627/// Allocate a physically contiguous DMA buffer.
628///
629/// Returns a `DmaMappedBuffer` with both virtual and DMA addresses.
630/// The buffer is cache-coherent by default on x86_64.
631pub fn alloc_dma_buffer(size: usize, direction: DmaDirection) -> KernelResult<DmaMappedBuffer> {
632    let num_frames = size.div_ceil(4096);
633
634    // Allocate contiguous physical frames.
635    let frame = crate::mm::FRAME_ALLOCATOR
636        .lock()
637        .allocate_frames(num_frames, None)
638        .map_err(|_| KernelError::OutOfMemory {
639            requested: size,
640            available: 0,
641        })?;
642
643    let phys_addr = frame.as_u64() * 4096;
644    let virt_addr = crate::mm::phys_to_virt_addr(phys_addr) as usize;
645
646    // Identity map for DMA (physical address = DMA address).
647    let dma_addr = identity_map_dma(phys_addr, size)?;
648
649    // Zero the buffer.
650    // SAFETY: virt_addr points to freshly allocated physical memory mapped
651    // into the kernel's virtual address space via the bootloader's physical
652    // memory mapping. The buffer is num_frames * 4096 bytes.
653    unsafe {
654        core::ptr::write_bytes(virt_addr as *mut u8, 0, num_frames * 4096);
655    }
656
657    Ok(DmaMappedBuffer {
658        virt_addr,
659        dma_addr,
660        size: num_frames * 4096,
661        coherency: DmaCoherency::Coherent, // x86 is always coherent
662        direction,
663    })
664}
665
666/// Free a DMA-mapped buffer.
667pub fn free_dma_buffer(buffer: DmaMappedBuffer) -> KernelResult<()> {
668    let num_frames = buffer.size / 4096;
669    let frame_number = crate::mm::FrameNumber::new(buffer.dma_addr / 4096);
670
671    // Remove DMA mapping.
672    unmap_dma(buffer.dma_addr, buffer.size)?;
673
674    // Return frames to the allocator.
675    let _ = crate::mm::FRAME_ALLOCATOR
676        .lock()
677        .free_frames(frame_number, num_frames);
678
679    Ok(())
680}
681
682// ---------------------------------------------------------------------------
683// Tests
684// ---------------------------------------------------------------------------
685
686#[cfg(test)]
687mod tests {
688    use super::*;
689
690    #[test]
691    fn test_parse_dmar_too_small() {
692        let data = [0u8; 10];
693        assert!(parse_dmar(&data).is_err());
694    }
695
696    #[test]
697    fn test_parse_dmar_empty() {
698        // Minimal valid DMAR header (48 bytes) with no entries
699        let mut data = [0u8; 48];
700        // Signature "DMAR" at offset 0
701        data[0] = b'D';
702        data[1] = b'M';
703        data[2] = b'A';
704        data[3] = b'R';
705        // Length = 48
706        data[4] = 48;
707        // Host address width at offset 36
708        data[36] = 39; // 40-bit physical addresses
709                       // Flags at offset 37
710        data[37] = 0x01; // INTR_REMAP
711
712        let info = parse_dmar(&data).unwrap();
713        assert_eq!(info.host_address_width, 39);
714        assert_eq!(info.flags, 0x01);
715        assert!(info.drhd_units.is_empty());
716        assert!(info.rmrr_regions.is_empty());
717    }
718
719    #[test]
720    fn test_parse_dmar_with_drhd() {
721        // DMAR header (48 bytes) + DRHD entry (16 bytes, no device scopes)
722        let mut data = [0u8; 64];
723        data[36] = 39; // host_address_width
724
725        // DRHD at offset 48
726        data[48] = 0; // type low
727        data[49] = 0; // type high = 0 (DRHD)
728        data[50] = 16; // length low
729        data[51] = 0; // length high
730        data[52] = 0x01; // flags: INCLUDE_PCI_ALL
731                         // segment = 0
732                         // register_base = 0xFED90000
733        data[56] = 0x00;
734        data[57] = 0x00;
735        data[58] = 0xD9;
736        data[59] = 0xFE;
737
738        let info = parse_dmar(&data).unwrap();
739        assert_eq!(info.drhd_units.len(), 1);
740        assert!(info.drhd_units[0].include_all);
741        assert_eq!(info.drhd_units[0].register_base, 0xFED9_0000);
742        assert_eq!(info.drhd_units[0].segment, 0);
743    }
744
745    #[test]
746    fn test_parse_dmar_with_rmrr() {
747        // DMAR header (48 bytes) + RMRR entry (24 bytes, no device scopes)
748        let mut data = [0u8; 72];
749        data[36] = 39;
750
751        // RMRR at offset 48
752        data[48] = 1; // type low = RMRR
753        data[49] = 0;
754        data[50] = 24; // length
755        data[51] = 0;
756        // segment = 0 at offset 54-55
757        // base_address = 0x000E0000 at offset 56-63
758        data[56] = 0x00;
759        data[57] = 0x00;
760        data[58] = 0x0E;
761        data[59] = 0x00;
762        // limit_address = 0x000FFFFF at offset 64-71
763        data[64] = 0xFF;
764        data[65] = 0xFF;
765        data[66] = 0x0F;
766        data[67] = 0x00;
767
768        let info = parse_dmar(&data).unwrap();
769        assert_eq!(info.rmrr_regions.len(), 1);
770        assert_eq!(info.rmrr_regions[0].base_address, 0x000E_0000);
771        assert_eq!(info.rmrr_regions[0].limit_address, 0x000F_FFFF);
772    }
773
774    #[test]
775    fn test_parse_device_scope() {
776        // Device scope: type=1 (PCI endpoint), len=8, enum_id=0, bus=0, path=(2,0)
777        let data = [1, 8, 0, 0, 0, 0, 2, 0];
778        let scopes = parse_device_scope(&data);
779        assert_eq!(scopes.len(), 1);
780        assert_eq!(scopes[0].scope_type, 1);
781        assert_eq!(scopes[0].start_bus, 0);
782        assert_eq!(scopes[0].path[0], (2, 0));
783        assert_eq!(scopes[0].path_len, 1);
784    }
785
786    #[test]
787    fn test_parse_device_scope_empty() {
788        let scopes = parse_device_scope(&[]);
789        assert!(scopes.is_empty());
790    }
791
792    #[test]
793    fn test_iommu_unit_init() {
794        let drhd = DrhdUnit {
795            segment: 0,
796            register_base: 0xFED9_0000,
797            include_all: true,
798            device_scope: Vec::new(),
799        };
800
801        let unit = init_iommu_unit(&drhd).unwrap();
802        assert_eq!(unit.register_base, 0xFED9_0000);
803        assert_eq!(unit.segment, 0);
804        assert!(unit.include_all);
805    }
806
807    #[test]
808    fn test_scatter_gather_list() {
809        let mut sgl = ScatterGatherList::new();
810        assert_eq!(sgl.entry_count(), 0);
811        assert_eq!(sgl.total_length, 0);
812
813        sgl.add_entry(0x1000, 4096);
814        sgl.add_entry(0x3000, 8192);
815
816        assert_eq!(sgl.entry_count(), 2);
817        assert_eq!(sgl.total_length, 4096 + 8192);
818    }
819}