⚠️ VeridianOS Kernel Documentation - This is low-level kernel code. All functions are unsafe unless explicitly marked otherwise. no_std

veridian_kernel/mm/
vmm.rs

1//! Virtual Memory Manager
2//!
3//! Complete virtual memory management for VeridianOS with proper page table
4//! support.
5
6// Virtual memory manager -- process address space management
7#![allow(dead_code)]
8
9use super::{
10    page_table::{FrameAllocator as PageFrameAllocator, PageMapper, PageTable, PageTableHierarchy},
11    FrameAllocatorError, FrameNumber, PageFlags, PageSize, PhysicalAddress, VirtualAddress,
12    FRAME_ALLOCATOR,
13};
14use crate::error::KernelError;
15
16/// Virtual memory manager for a process
17pub struct VirtualMemoryManager {
18    /// Page table hierarchy
19    page_tables: PageTableHierarchy,
20    /// Whether this is the kernel address space
21    is_kernel: bool,
22    /// Page mapper for this VMM (cached for performance)
23    mapper: Option<PageMapper>,
24}
25
26impl VirtualMemoryManager {
27    /// Create a new virtual memory manager
28    pub fn new() -> Result<Self, KernelError> {
29        let page_tables = PageTableHierarchy::new()?;
30
31        Ok(Self {
32            page_tables,
33            is_kernel: false,
34            mapper: None,
35        })
36    }
37
38    /// Create a kernel virtual memory manager
39    pub fn new_kernel() -> Result<Self, KernelError> {
40        let page_tables = PageTableHierarchy::new()?;
41
42        // Map kernel sections
43        let mut vmm = Self {
44            page_tables,
45            is_kernel: true,
46            mapper: None,
47        };
48
49        // Map kernel code, data, heap regions
50        vmm.setup_kernel_mappings()?;
51
52        Ok(vmm)
53    }
54
55    /// Setup initial kernel mappings
56    fn setup_kernel_mappings(&mut self) -> Result<(), KernelError> {
57        // Map kernel at higher half (0xFFFF_8000_0000_0000)
58        // This implementation maps:
59        // 1. Kernel code as read-only + execute
60        // 2. Kernel data as read-write + no-execute
61        // 3. Kernel heap region
62        // 4. MMIO regions for devices
63
64        // Identity map first 2GB for bootloader compatibility
65        for i in 0..1024 {
66            let phys_addr = PhysicalAddress::new(i * 0x200000); // 2MB pages
67            let virt_addr = VirtualAddress::new(i * 0x200000);
68            self.map(
69                virt_addr,
70                phys_addr,
71                PageFlags::PRESENT | PageFlags::WRITABLE,
72                PageSize::Large,
73            )?;
74        }
75
76        // Map kernel to higher half
77        let kernel_start = 0x100000; // 1MB
78        let kernel_size = 16 * 1024 * 1024; // 16MB for kernel
79
80        // Map kernel code (read + execute)
81        for offset in (0..kernel_size).step_by(0x200000) {
82            let phys_addr = PhysicalAddress::new(kernel_start + offset as u64);
83            let virt_addr = VirtualAddress::new(0xFFFF_8000_0000_0000 + offset as u64);
84            self.map(virt_addr, phys_addr, PageFlags::PRESENT, PageSize::Large)?;
85        }
86
87        // Map kernel heap
88        let heap_start = super::heap::HEAP_START;
89        let heap_size = super::heap::HEAP_SIZE;
90        for offset in (0..heap_size).step_by(0x200000) {
91            let phys_addr = PhysicalAddress::new((heap_start + offset) as u64);
92            let virt_addr = VirtualAddress::new((heap_start + offset) as u64);
93            self.map(
94                virt_addr,
95                phys_addr,
96                PageFlags::PRESENT | PageFlags::WRITABLE | PageFlags::NO_EXECUTE,
97                PageSize::Large,
98            )?;
99        }
100
101        Ok(())
102    }
103
104    /// Map a virtual address to a physical address
105    pub fn map(
106        &mut self,
107        virt: VirtualAddress,
108        phys: PhysicalAddress,
109        flags: PageFlags,
110        size: PageSize,
111    ) -> Result<(), KernelError> {
112        // Get or create page mapper
113        let mapper = self.get_or_create_mapper()?;
114
115        match size {
116            PageSize::Small => {
117                // Map 4KB page
118                let frame = FrameNumber::new(phys.as_u64() >> 12);
119                let mut frame_allocator_wrapper = FrameAllocatorWrapper;
120                mapper.map_page(virt, frame, flags, &mut frame_allocator_wrapper)?;
121            }
122            PageSize::Large => {
123                // Map 2MB page
124                self.map_large_page(virt, phys, flags)?;
125            }
126            PageSize::Huge => {
127                // Map 1GB page
128                self.map_huge_page(virt, phys, flags)?;
129            }
130        }
131
132        // Flush TLB for this address
133        tlb::flush_address(virt);
134
135        Ok(())
136    }
137
138    /// Get or create the page mapper
139    fn get_or_create_mapper(&mut self) -> Result<&mut PageMapper, KernelError> {
140        if self.mapper.is_none() {
141            // Map L4 table to a known virtual address for access
142            // In a real implementation, this would use recursive mapping or physical memory
143            // mapping
144            let l4_virt = 0xFFFF_FF00_0000_0000 as *mut PageTable;
145            // SAFETY: The L4 page table is expected to be mapped at a fixed virtual
146            // address (0xFFFF_FF00_0000_0000) via recursive mapping or the kernel's
147            // physical memory map. This address is within the kernel's higher-half
148            // address space and is reserved for page table access. The PageMapper
149            // requires exclusive access to this table, which is maintained by the
150            // VMM being the sole owner of the mapper through `&mut self`.
151            unsafe {
152                self.mapper = Some(PageMapper::new(l4_virt));
153            }
154        }
155        self.mapper.as_mut().ok_or(KernelError::NotInitialized {
156            subsystem: "VMM page mapper",
157        })
158    }
159
160    /// Map a 2MB large page
161    fn map_large_page(
162        &mut self,
163        _virt: VirtualAddress,
164        _phys: PhysicalAddress,
165        _flags: PageFlags,
166    ) -> Result<(), KernelError> {
167        // For large pages, we need to set the page directory entry directly
168        // This is architecture-specific
169        #[cfg(target_arch = "x86_64")]
170        {
171            // Large page mappings use the HUGE flag
172            let _frame = FrameNumber::new(_phys.as_u64() >> 21);
173            let _large_flags = _flags | PageFlags::HUGE;
174            // Would map at PD level instead of PT level
175            println!(
176                "[VMM] Mapping large page 0x{:x} -> 0x{:x}",
177                _virt.as_u64(),
178                _phys.as_u64()
179            );
180        }
181
182        #[cfg(not(target_arch = "x86_64"))]
183        {
184            println!(
185                "[VMM] Mapping large page 0x{:x} -> 0x{:x}",
186                _virt.as_u64(),
187                _phys.as_u64()
188            );
189        }
190
191        Ok(())
192    }
193
194    /// Map a 1GB huge page
195    fn map_huge_page(
196        &mut self,
197        _virt: VirtualAddress,
198        _phys: PhysicalAddress,
199        _flags: PageFlags,
200    ) -> Result<(), KernelError> {
201        // For huge pages, we need to set the page directory pointer entry directly
202        #[cfg(target_arch = "x86_64")]
203        {
204            // Huge page mappings at PDP level
205            let _frame = FrameNumber::new(_phys.as_u64() >> 30);
206            let _huge_flags = _flags | PageFlags::HUGE;
207            println!(
208                "[VMM] Mapping huge page 0x{:x} -> 0x{:x}",
209                _virt.as_u64(),
210                _phys.as_u64()
211            );
212        }
213
214        #[cfg(not(target_arch = "x86_64"))]
215        {
216            println!(
217                "[VMM] Mapping huge page 0x{:x} -> 0x{:x}",
218                _virt.as_u64(),
219                _phys.as_u64()
220            );
221        }
222
223        Ok(())
224    }
225
226    /// Unmap a virtual address
227    pub fn unmap(&mut self, virt: VirtualAddress) -> Result<(), KernelError> {
228        let mapper = self.get_or_create_mapper()?;
229
230        // Unmap the page
231        let frame = mapper.unmap_page(virt)?;
232
233        // Free the frame back to allocator
234        FRAME_ALLOCATOR
235            .lock()
236            .free_frames(frame, 1)
237            .map_err(|_| KernelError::OutOfMemory {
238                requested: 1,
239                available: 0,
240            })?;
241
242        // Flush TLB
243        tlb::flush_address(virt);
244
245        Ok(())
246    }
247
248    /// Map a guard page at the given virtual address.
249    ///
250    /// A guard page is an unmapped page that triggers a page fault on access,
251    /// used to detect stack overflows. The page is left unmapped (no physical
252    /// backing) so any read/write/execute will trap.
253    pub fn map_guard_page(&mut self, virt: VirtualAddress) -> Result<(), KernelError> {
254        // Ensure the address is not already mapped; if it is, unmap it
255        if self.translate(virt).is_some() {
256            self.unmap(virt)?;
257        }
258        // The page is now unmapped - any access will fault
259        tlb::flush_address(virt);
260        Ok(())
261    }
262
263    /// Translate a virtual address to physical
264    pub fn translate(&self, virt: VirtualAddress) -> Option<PhysicalAddress> {
265        // For now, we do simple translation based on known mappings
266        // In a real implementation, would walk page tables
267
268        let virt_addr = virt.as_u64();
269
270        // Identity mapped region (first 2GB)
271        if virt_addr < 0x8000_0000 {
272            return Some(PhysicalAddress::new(virt_addr));
273        }
274
275        // Higher half kernel mapping
276        if (0xFFFF_8000_0000_0000..0xFFFF_8000_1000_0000).contains(&virt_addr) {
277            let offset = virt_addr - 0xFFFF_8000_0000_0000;
278            return Some(PhysicalAddress::new(0x100000 + offset)); // Kernel at 1MB
279        }
280
281        // Kernel heap mapping
282        if virt_addr >= super::heap::HEAP_START as u64
283            && virt_addr < (super::heap::HEAP_START + super::heap::HEAP_SIZE) as u64
284        {
285            let offset = virt_addr - super::heap::HEAP_START as u64;
286            return Some(PhysicalAddress::new(
287                super::heap::HEAP_START as u64 + offset,
288            ));
289        }
290
291        None
292    }
293
294    /// Load memory mappings from bootloader
295    pub fn load_bootloader_mappings(
296        &mut self,
297        memory_map: &[super::MemoryRegion],
298    ) -> Result<(), KernelError> {
299        println!("[VMM] Loading bootloader memory mappings...");
300
301        for region in memory_map {
302            if !region.usable {
303                continue;
304            }
305
306            // Map usable memory regions
307            let start_addr = region.start & !(0x200000 - 1); // Align to 2MB
308            let end_addr = (region.start + region.size + 0x200000 - 1) & !(0x200000 - 1);
309
310            for addr in (start_addr..end_addr).step_by(0x200000) {
311                let phys = PhysicalAddress::new(addr);
312                let virt = VirtualAddress::new(addr); // Identity map for now
313
314                // Skip if already mapped
315                if self.translate(virt).is_some() {
316                    continue;
317                }
318
319                self.map(
320                    virt,
321                    phys,
322                    PageFlags::PRESENT | PageFlags::WRITABLE,
323                    PageSize::Large,
324                )?;
325            }
326        }
327
328        println!("[VMM] Bootloader mappings loaded");
329        Ok(())
330    }
331}
332
333/// Frame allocator wrapper for PageMapper
334struct FrameAllocatorWrapper;
335
336impl PageFrameAllocator for FrameAllocatorWrapper {
337    fn allocate_frames(
338        &mut self,
339        count: usize,
340        numa_node: Option<usize>,
341    ) -> Result<FrameNumber, FrameAllocatorError> {
342        FRAME_ALLOCATOR.lock().allocate_frames(count, numa_node)
343    }
344}
345
346/// TLB management -- delegates to architecture-specific implementations
347/// in `crate::arch::{tlb_flush_address, tlb_flush_all}`.
348pub mod tlb {
349    use super::VirtualAddress;
350
351    /// Flush TLB for a specific address
352    pub fn flush_address(addr: VirtualAddress) {
353        crate::arch::tlb_flush_address(addr.as_u64());
354    }
355
356    /// Flush entire TLB
357    pub fn flush_all() {
358        crate::arch::tlb_flush_all();
359    }
360}
361
362#[cfg(test)]
363mod tests {
364    use super::*;
365
366    // --- VirtualMemoryManager translate tests ---
367    //
368    // translate() is pure logic (no hardware interaction) and is testable
369    // on the host.
370
371    #[test]
372    fn test_translate_identity_mapped_region() {
373        // The VMM's translate() treats addresses < 0x8000_0000 as identity mapped
374        let vmm = VirtualMemoryManager {
375            page_tables: PageTableHierarchy::empty_for_test(),
376            is_kernel: false,
377            mapper: None,
378        };
379
380        // Address in the identity-mapped first 2GB
381        let result = vmm.translate(VirtualAddress::new(0x100000));
382        assert!(result.is_some());
383        assert_eq!(result.unwrap().as_u64(), 0x100000);
384    }
385
386    #[test]
387    fn test_translate_identity_mapped_boundary() {
388        let vmm = VirtualMemoryManager {
389            page_tables: PageTableHierarchy::empty_for_test(),
390            is_kernel: false,
391            mapper: None,
392        };
393
394        // Last valid identity-mapped address
395        let result = vmm.translate(VirtualAddress::new(0x7FFF_FFFF));
396        assert!(result.is_some());
397        assert_eq!(result.unwrap().as_u64(), 0x7FFF_FFFF);
398
399        // Just past the 2GB boundary -- no longer identity mapped
400        let result = vmm.translate(VirtualAddress::new(0x8000_0000));
401        assert!(result.is_none());
402    }
403
404    #[test]
405    fn test_translate_higher_half_kernel() {
406        let vmm = VirtualMemoryManager {
407            page_tables: PageTableHierarchy::empty_for_test(),
408            is_kernel: true,
409            mapper: None,
410        };
411
412        // Higher-half kernel mapping: 0xFFFF_8000_0000_0000 -> 0x100000 (1MB)
413        let virt = VirtualAddress::new(0xFFFF_8000_0000_0000);
414        let result = vmm.translate(virt);
415        assert!(result.is_some());
416        assert_eq!(result.unwrap().as_u64(), 0x100000);
417    }
418
419    #[test]
420    fn test_translate_higher_half_kernel_with_offset() {
421        let vmm = VirtualMemoryManager {
422            page_tables: PageTableHierarchy::empty_for_test(),
423            is_kernel: true,
424            mapper: None,
425        };
426
427        // Offset within the kernel higher-half mapping
428        let offset = 0x5000u64;
429        let virt = VirtualAddress::new(0xFFFF_8000_0000_0000 + offset);
430        let result = vmm.translate(virt);
431        assert!(result.is_some());
432        assert_eq!(result.unwrap().as_u64(), 0x100000 + offset);
433    }
434
435    #[test]
436    fn test_translate_unmapped_address() {
437        let vmm = VirtualMemoryManager {
438            page_tables: PageTableHierarchy::empty_for_test(),
439            is_kernel: false,
440            mapper: None,
441        };
442
443        // Random high address that is not in any known mapping
444        let result = vmm.translate(VirtualAddress::new(0xDEAD_0000_0000));
445        assert!(result.is_none());
446    }
447
448    #[test]
449    fn test_translate_zero_address() {
450        let vmm = VirtualMemoryManager {
451            page_tables: PageTableHierarchy::empty_for_test(),
452            is_kernel: false,
453            mapper: None,
454        };
455
456        // Address 0 is within the identity-mapped region
457        let result = vmm.translate(VirtualAddress::new(0));
458        assert!(result.is_some());
459        assert_eq!(result.unwrap().as_u64(), 0);
460    }
461
462    #[test]
463    fn test_is_kernel_flag() {
464        let user_vmm = VirtualMemoryManager {
465            page_tables: PageTableHierarchy::empty_for_test(),
466            is_kernel: false,
467            mapper: None,
468        };
469        assert!(!user_vmm.is_kernel);
470
471        let kernel_vmm = VirtualMemoryManager {
472            page_tables: PageTableHierarchy::empty_for_test(),
473            is_kernel: true,
474            mapper: None,
475        };
476        assert!(kernel_vmm.is_kernel);
477    }
478}