1#![allow(dead_code)]
8
9use super::{
10 page_table::{FrameAllocator as PageFrameAllocator, PageMapper, PageTable, PageTableHierarchy},
11 FrameAllocatorError, FrameNumber, PageFlags, PageSize, PhysicalAddress, VirtualAddress,
12 FRAME_ALLOCATOR,
13};
14use crate::error::KernelError;
15
16pub struct VirtualMemoryManager {
18 page_tables: PageTableHierarchy,
20 is_kernel: bool,
22 mapper: Option<PageMapper>,
24}
25
26impl VirtualMemoryManager {
27 pub fn new() -> Result<Self, KernelError> {
29 let page_tables = PageTableHierarchy::new()?;
30
31 Ok(Self {
32 page_tables,
33 is_kernel: false,
34 mapper: None,
35 })
36 }
37
38 pub fn new_kernel() -> Result<Self, KernelError> {
40 let page_tables = PageTableHierarchy::new()?;
41
42 let mut vmm = Self {
44 page_tables,
45 is_kernel: true,
46 mapper: None,
47 };
48
49 vmm.setup_kernel_mappings()?;
51
52 Ok(vmm)
53 }
54
55 fn setup_kernel_mappings(&mut self) -> Result<(), KernelError> {
57 for i in 0..1024 {
66 let phys_addr = PhysicalAddress::new(i * 0x200000); let virt_addr = VirtualAddress::new(i * 0x200000);
68 self.map(
69 virt_addr,
70 phys_addr,
71 PageFlags::PRESENT | PageFlags::WRITABLE,
72 PageSize::Large,
73 )?;
74 }
75
76 let kernel_start = 0x100000; let kernel_size = 16 * 1024 * 1024; for offset in (0..kernel_size).step_by(0x200000) {
82 let phys_addr = PhysicalAddress::new(kernel_start + offset as u64);
83 let virt_addr = VirtualAddress::new(0xFFFF_8000_0000_0000 + offset as u64);
84 self.map(virt_addr, phys_addr, PageFlags::PRESENT, PageSize::Large)?;
85 }
86
87 let heap_start = super::heap::HEAP_START;
89 let heap_size = super::heap::HEAP_SIZE;
90 for offset in (0..heap_size).step_by(0x200000) {
91 let phys_addr = PhysicalAddress::new((heap_start + offset) as u64);
92 let virt_addr = VirtualAddress::new((heap_start + offset) as u64);
93 self.map(
94 virt_addr,
95 phys_addr,
96 PageFlags::PRESENT | PageFlags::WRITABLE | PageFlags::NO_EXECUTE,
97 PageSize::Large,
98 )?;
99 }
100
101 Ok(())
102 }
103
104 pub fn map(
106 &mut self,
107 virt: VirtualAddress,
108 phys: PhysicalAddress,
109 flags: PageFlags,
110 size: PageSize,
111 ) -> Result<(), KernelError> {
112 let mapper = self.get_or_create_mapper()?;
114
115 match size {
116 PageSize::Small => {
117 let frame = FrameNumber::new(phys.as_u64() >> 12);
119 let mut frame_allocator_wrapper = FrameAllocatorWrapper;
120 mapper.map_page(virt, frame, flags, &mut frame_allocator_wrapper)?;
121 }
122 PageSize::Large => {
123 self.map_large_page(virt, phys, flags)?;
125 }
126 PageSize::Huge => {
127 self.map_huge_page(virt, phys, flags)?;
129 }
130 }
131
132 tlb::flush_address(virt);
134
135 Ok(())
136 }
137
138 fn get_or_create_mapper(&mut self) -> Result<&mut PageMapper, KernelError> {
140 if self.mapper.is_none() {
141 let l4_virt = 0xFFFF_FF00_0000_0000 as *mut PageTable;
145 unsafe {
152 self.mapper = Some(PageMapper::new(l4_virt));
153 }
154 }
155 self.mapper.as_mut().ok_or(KernelError::NotInitialized {
156 subsystem: "VMM page mapper",
157 })
158 }
159
160 fn map_large_page(
162 &mut self,
163 _virt: VirtualAddress,
164 _phys: PhysicalAddress,
165 _flags: PageFlags,
166 ) -> Result<(), KernelError> {
167 #[cfg(target_arch = "x86_64")]
170 {
171 let _frame = FrameNumber::new(_phys.as_u64() >> 21);
173 let _large_flags = _flags | PageFlags::HUGE;
174 println!(
176 "[VMM] Mapping large page 0x{:x} -> 0x{:x}",
177 _virt.as_u64(),
178 _phys.as_u64()
179 );
180 }
181
182 #[cfg(not(target_arch = "x86_64"))]
183 {
184 println!(
185 "[VMM] Mapping large page 0x{:x} -> 0x{:x}",
186 _virt.as_u64(),
187 _phys.as_u64()
188 );
189 }
190
191 Ok(())
192 }
193
194 fn map_huge_page(
196 &mut self,
197 _virt: VirtualAddress,
198 _phys: PhysicalAddress,
199 _flags: PageFlags,
200 ) -> Result<(), KernelError> {
201 #[cfg(target_arch = "x86_64")]
203 {
204 let _frame = FrameNumber::new(_phys.as_u64() >> 30);
206 let _huge_flags = _flags | PageFlags::HUGE;
207 println!(
208 "[VMM] Mapping huge page 0x{:x} -> 0x{:x}",
209 _virt.as_u64(),
210 _phys.as_u64()
211 );
212 }
213
214 #[cfg(not(target_arch = "x86_64"))]
215 {
216 println!(
217 "[VMM] Mapping huge page 0x{:x} -> 0x{:x}",
218 _virt.as_u64(),
219 _phys.as_u64()
220 );
221 }
222
223 Ok(())
224 }
225
226 pub fn unmap(&mut self, virt: VirtualAddress) -> Result<(), KernelError> {
228 let mapper = self.get_or_create_mapper()?;
229
230 let frame = mapper.unmap_page(virt)?;
232
233 FRAME_ALLOCATOR
235 .lock()
236 .free_frames(frame, 1)
237 .map_err(|_| KernelError::OutOfMemory {
238 requested: 1,
239 available: 0,
240 })?;
241
242 tlb::flush_address(virt);
244
245 Ok(())
246 }
247
248 pub fn map_guard_page(&mut self, virt: VirtualAddress) -> Result<(), KernelError> {
254 if self.translate(virt).is_some() {
256 self.unmap(virt)?;
257 }
258 tlb::flush_address(virt);
260 Ok(())
261 }
262
263 pub fn translate(&self, virt: VirtualAddress) -> Option<PhysicalAddress> {
265 let virt_addr = virt.as_u64();
269
270 if virt_addr < 0x8000_0000 {
272 return Some(PhysicalAddress::new(virt_addr));
273 }
274
275 if (0xFFFF_8000_0000_0000..0xFFFF_8000_1000_0000).contains(&virt_addr) {
277 let offset = virt_addr - 0xFFFF_8000_0000_0000;
278 return Some(PhysicalAddress::new(0x100000 + offset)); }
280
281 if virt_addr >= super::heap::HEAP_START as u64
283 && virt_addr < (super::heap::HEAP_START + super::heap::HEAP_SIZE) as u64
284 {
285 let offset = virt_addr - super::heap::HEAP_START as u64;
286 return Some(PhysicalAddress::new(
287 super::heap::HEAP_START as u64 + offset,
288 ));
289 }
290
291 None
292 }
293
294 pub fn load_bootloader_mappings(
296 &mut self,
297 memory_map: &[super::MemoryRegion],
298 ) -> Result<(), KernelError> {
299 println!("[VMM] Loading bootloader memory mappings...");
300
301 for region in memory_map {
302 if !region.usable {
303 continue;
304 }
305
306 let start_addr = region.start & !(0x200000 - 1); let end_addr = (region.start + region.size + 0x200000 - 1) & !(0x200000 - 1);
309
310 for addr in (start_addr..end_addr).step_by(0x200000) {
311 let phys = PhysicalAddress::new(addr);
312 let virt = VirtualAddress::new(addr); if self.translate(virt).is_some() {
316 continue;
317 }
318
319 self.map(
320 virt,
321 phys,
322 PageFlags::PRESENT | PageFlags::WRITABLE,
323 PageSize::Large,
324 )?;
325 }
326 }
327
328 println!("[VMM] Bootloader mappings loaded");
329 Ok(())
330 }
331}
332
333struct FrameAllocatorWrapper;
335
336impl PageFrameAllocator for FrameAllocatorWrapper {
337 fn allocate_frames(
338 &mut self,
339 count: usize,
340 numa_node: Option<usize>,
341 ) -> Result<FrameNumber, FrameAllocatorError> {
342 FRAME_ALLOCATOR.lock().allocate_frames(count, numa_node)
343 }
344}
345
346pub mod tlb {
349 use super::VirtualAddress;
350
351 pub fn flush_address(addr: VirtualAddress) {
353 crate::arch::tlb_flush_address(addr.as_u64());
354 }
355
356 pub fn flush_all() {
358 crate::arch::tlb_flush_all();
359 }
360}
361
362#[cfg(test)]
363mod tests {
364 use super::*;
365
366 #[test]
372 fn test_translate_identity_mapped_region() {
373 let vmm = VirtualMemoryManager {
375 page_tables: PageTableHierarchy::empty_for_test(),
376 is_kernel: false,
377 mapper: None,
378 };
379
380 let result = vmm.translate(VirtualAddress::new(0x100000));
382 assert!(result.is_some());
383 assert_eq!(result.unwrap().as_u64(), 0x100000);
384 }
385
386 #[test]
387 fn test_translate_identity_mapped_boundary() {
388 let vmm = VirtualMemoryManager {
389 page_tables: PageTableHierarchy::empty_for_test(),
390 is_kernel: false,
391 mapper: None,
392 };
393
394 let result = vmm.translate(VirtualAddress::new(0x7FFF_FFFF));
396 assert!(result.is_some());
397 assert_eq!(result.unwrap().as_u64(), 0x7FFF_FFFF);
398
399 let result = vmm.translate(VirtualAddress::new(0x8000_0000));
401 assert!(result.is_none());
402 }
403
404 #[test]
405 fn test_translate_higher_half_kernel() {
406 let vmm = VirtualMemoryManager {
407 page_tables: PageTableHierarchy::empty_for_test(),
408 is_kernel: true,
409 mapper: None,
410 };
411
412 let virt = VirtualAddress::new(0xFFFF_8000_0000_0000);
414 let result = vmm.translate(virt);
415 assert!(result.is_some());
416 assert_eq!(result.unwrap().as_u64(), 0x100000);
417 }
418
419 #[test]
420 fn test_translate_higher_half_kernel_with_offset() {
421 let vmm = VirtualMemoryManager {
422 page_tables: PageTableHierarchy::empty_for_test(),
423 is_kernel: true,
424 mapper: None,
425 };
426
427 let offset = 0x5000u64;
429 let virt = VirtualAddress::new(0xFFFF_8000_0000_0000 + offset);
430 let result = vmm.translate(virt);
431 assert!(result.is_some());
432 assert_eq!(result.unwrap().as_u64(), 0x100000 + offset);
433 }
434
435 #[test]
436 fn test_translate_unmapped_address() {
437 let vmm = VirtualMemoryManager {
438 page_tables: PageTableHierarchy::empty_for_test(),
439 is_kernel: false,
440 mapper: None,
441 };
442
443 let result = vmm.translate(VirtualAddress::new(0xDEAD_0000_0000));
445 assert!(result.is_none());
446 }
447
448 #[test]
449 fn test_translate_zero_address() {
450 let vmm = VirtualMemoryManager {
451 page_tables: PageTableHierarchy::empty_for_test(),
452 is_kernel: false,
453 mapper: None,
454 };
455
456 let result = vmm.translate(VirtualAddress::new(0));
458 assert!(result.is_some());
459 assert_eq!(result.unwrap().as_u64(), 0);
460 }
461
462 #[test]
463 fn test_is_kernel_flag() {
464 let user_vmm = VirtualMemoryManager {
465 page_tables: PageTableHierarchy::empty_for_test(),
466 is_kernel: false,
467 mapper: None,
468 };
469 assert!(!user_vmm.is_kernel);
470
471 let kernel_vmm = VirtualMemoryManager {
472 page_tables: PageTableHierarchy::empty_for_test(),
473 is_kernel: true,
474 mapper: None,
475 };
476 assert!(kernel_vmm.is_kernel);
477 }
478}