1#![allow(clippy::manual_div_ceil)]
7
8use core::sync::atomic::{AtomicU64, Ordering};
9
10#[cfg(feature = "alloc")]
11extern crate alloc;
12
13#[cfg(feature = "alloc")]
14use alloc::{collections::BTreeMap, vec::Vec};
15
16use spin::Mutex;
17
18use super::{
19 page_table::{FrameAllocator as PageFrameAllocator, PageMapper, PageTable, PAGE_TABLE_ENTRIES},
20 FrameAllocatorError, FrameNumber, PageFlags, VirtualAddress, FRAME_ALLOCATOR, FRAME_SIZE,
21};
22use crate::error::KernelError;
23
24struct VasFrameAllocator;
27
28impl PageFrameAllocator for VasFrameAllocator {
29 fn allocate_frames(
30 &mut self,
31 count: usize,
32 numa_node: Option<usize>,
33 ) -> Result<FrameNumber, FrameAllocatorError> {
34 FRAME_ALLOCATOR.lock().allocate_frames(count, numa_node)
35 }
36}
37
38unsafe fn create_mapper_from_root(page_table_root: u64) -> PageMapper {
49 let virt = super::phys_to_virt_addr(page_table_root);
50 let l4_ptr = virt as *mut super::page_table::PageTable;
51 unsafe { PageMapper::new(l4_ptr) }
56}
57
58pub unsafe fn create_mapper_from_root_pub(page_table_root: u64) -> PageMapper {
65 unsafe { create_mapper_from_root(page_table_root) }
68}
69
70pub fn free_user_page_table_frames(l4_phys: u64) -> usize {
81 if l4_phys == 0 {
82 return 0;
83 }
84
85 let phys_offset_val = super::PHYS_MEM_OFFSET.load(core::sync::atomic::Ordering::Acquire);
86 let mut freed = 0usize;
87
88 let l4_table = unsafe { &*(super::phys_to_virt_addr(l4_phys) as *const PageTable) };
91
92 for l4_idx in 0..256 {
95 let l4_entry = &l4_table[l4_idx];
96 if !l4_entry.is_present() {
97 continue;
98 }
99
100 if phys_offset_val != 0 {
103 let phys_l4_idx = ((phys_offset_val >> 39) & 0x1FF) as usize;
104 if l4_idx == phys_l4_idx {
105 continue;
106 }
107 }
108
109 let l3_phys = match l4_entry.addr() {
110 Some(a) => a.as_u64(),
111 None => continue,
112 };
113
114 let l3_table = unsafe { &*(super::phys_to_virt_addr(l3_phys) as *const PageTable) };
118 for l3_idx in 0..PAGE_TABLE_ENTRIES {
119 let l3_entry = &l3_table[l3_idx];
120 if !l3_entry.is_present() {
121 continue;
122 }
123 if l3_entry.flags().0 & PageFlags::HUGE.0 != 0 {
125 continue;
126 }
127
128 let l2_phys = match l3_entry.addr() {
129 Some(a) => a.as_u64(),
130 None => continue,
131 };
132
133 let l2_table = unsafe { &*(super::phys_to_virt_addr(l2_phys) as *const PageTable) };
137 for l2_idx in 0..PAGE_TABLE_ENTRIES {
138 let l2_entry = &l2_table[l2_idx];
139 if !l2_entry.is_present() {
140 continue;
141 }
142 if l2_entry.flags().0 & PageFlags::HUGE.0 != 0 {
144 continue;
145 }
146
147 let l1_phys = match l2_entry.addr() {
148 Some(a) => a.as_u64(),
149 None => continue,
150 };
151
152 let l1_frame = FrameNumber::new(l1_phys / FRAME_SIZE as u64);
154 FRAME_ALLOCATOR.lock().free_frames(l1_frame, 1).ok();
155 freed += 1;
156 }
157
158 let l2_frame = FrameNumber::new(l2_phys / FRAME_SIZE as u64);
160 FRAME_ALLOCATOR.lock().free_frames(l2_frame, 1).ok();
161 freed += 1;
162 }
163
164 let l3_frame = FrameNumber::new(l3_phys / FRAME_SIZE as u64);
166 FRAME_ALLOCATOR.lock().free_frames(l3_frame, 1).ok();
167 freed += 1;
168 }
169
170 let l4_frame = FrameNumber::new(l4_phys / FRAME_SIZE as u64);
172 FRAME_ALLOCATOR.lock().free_frames(l4_frame, 1).ok();
173 freed += 1;
174
175 freed
176}
177
178fn free_user_page_table_subtrees(l4_phys: u64) {
185 let phys_offset_val = super::PHYS_MEM_OFFSET.load(core::sync::atomic::Ordering::Acquire);
186
187 let l4_table = unsafe { &mut *(super::phys_to_virt_addr(l4_phys) as *mut PageTable) };
189
190 for l4_idx in 0..256 {
191 let l4_entry = &l4_table[l4_idx];
192 if !l4_entry.is_present() {
193 continue;
194 }
195
196 if phys_offset_val != 0 {
198 let phys_l4_idx = ((phys_offset_val >> 39) & 0x1FF) as usize;
199 if l4_idx == phys_l4_idx {
200 continue;
201 }
202 }
203
204 let l3_phys = match l4_entry.addr() {
205 Some(a) => a.as_u64(),
206 None => continue,
207 };
208
209 let l3_table = unsafe { &*(super::phys_to_virt_addr(l3_phys) as *const PageTable) };
213 for l3_idx in 0..PAGE_TABLE_ENTRIES {
214 let l3_entry = &l3_table[l3_idx];
215 if !l3_entry.is_present() || l3_entry.flags().0 & PageFlags::HUGE.0 != 0 {
216 continue;
217 }
218
219 let l2_phys = match l3_entry.addr() {
220 Some(a) => a.as_u64(),
221 None => continue,
222 };
223
224 let l2_table = unsafe { &*(super::phys_to_virt_addr(l2_phys) as *const PageTable) };
227 for l2_idx in 0..PAGE_TABLE_ENTRIES {
228 let l2_entry = &l2_table[l2_idx];
229 if !l2_entry.is_present() || l2_entry.flags().0 & PageFlags::HUGE.0 != 0 {
230 continue;
231 }
232
233 let l1_phys = match l2_entry.addr() {
234 Some(a) => a.as_u64(),
235 None => continue,
236 };
237
238 let l1_frame = FrameNumber::new(l1_phys / FRAME_SIZE as u64);
240 FRAME_ALLOCATOR.lock().free_frames(l1_frame, 1).ok();
241 }
242
243 let l2_frame = FrameNumber::new(l2_phys / FRAME_SIZE as u64);
245 FRAME_ALLOCATOR.lock().free_frames(l2_frame, 1).ok();
246 }
247
248 let l3_frame = FrameNumber::new(l3_phys / FRAME_SIZE as u64);
250 FRAME_ALLOCATOR.lock().free_frames(l3_frame, 1).ok();
251
252 l4_table[l4_idx].clear();
254 }
255}
256
257#[derive(Debug, Clone, Copy, PartialEq, Eq)]
259pub enum MappingType {
260 Code,
262 Data,
264 Stack,
266 Heap,
268 File,
270 Shared,
272 Device,
274}
275
276#[derive(Debug, Clone)]
278pub struct VirtualMapping {
279 pub start: VirtualAddress,
281 pub size: usize,
283 pub mapping_type: MappingType,
285 pub flags: PageFlags,
287 #[cfg(feature = "alloc")]
289 pub physical_frames: Vec<super::FrameNumber>,
290}
291
292impl VirtualMapping {
293 pub fn new(start: VirtualAddress, size: usize, mapping_type: MappingType) -> Self {
295 let flags = match mapping_type {
296 MappingType::Code => PageFlags::PRESENT | PageFlags::USER,
297 MappingType::Data => PageFlags::PRESENT | PageFlags::WRITABLE | PageFlags::USER,
298 MappingType::Stack => {
299 PageFlags::PRESENT | PageFlags::WRITABLE | PageFlags::USER | PageFlags::NO_EXECUTE
300 }
301 MappingType::Heap => {
302 PageFlags::PRESENT | PageFlags::WRITABLE | PageFlags::USER | PageFlags::NO_EXECUTE
303 }
304 MappingType::File => PageFlags::PRESENT | PageFlags::USER,
305 MappingType::Shared => PageFlags::PRESENT | PageFlags::WRITABLE | PageFlags::USER,
306 MappingType::Device => PageFlags::PRESENT | PageFlags::WRITABLE | PageFlags::NO_CACHE,
307 };
308
309 Self {
310 start,
311 size,
312 mapping_type,
313 flags,
314 #[cfg(feature = "alloc")]
315 physical_frames: Vec::new(),
316 }
317 }
318
319 pub fn contains(&self, addr: VirtualAddress) -> bool {
321 addr.0 >= self.start.0 && addr.0 < self.start.0 + self.size as u64
322 }
323
324 pub fn end(&self) -> VirtualAddress {
326 VirtualAddress(self.start.0 + self.size as u64)
327 }
328}
329
330pub struct VirtualAddressSpace {
332 pub page_table_root: AtomicU64,
334
335 #[cfg(feature = "alloc")]
337 mappings: Mutex<BTreeMap<VirtualAddress, VirtualMapping>>,
338
339 next_mmap_addr: AtomicU64,
341
342 heap_start: AtomicU64,
344 heap_break: AtomicU64,
345
346 stack_top: AtomicU64,
348 stack_size: AtomicU64,
350
351 pub tlb_generation: AtomicU64,
355}
356
357pub struct TlbFlushBatch {
364 addresses: [u64; Self::MAX_BATCH],
365 count: usize,
366}
367
368impl Default for TlbFlushBatch {
369 fn default() -> Self {
370 Self::new()
371 }
372}
373
374impl TlbFlushBatch {
375 const MAX_BATCH: usize = 16;
376
377 pub const fn new() -> Self {
379 Self {
380 addresses: [0; Self::MAX_BATCH],
381 count: 0,
382 }
383 }
384
385 #[inline]
387 pub fn add(&mut self, vaddr: u64) {
388 if self.count < Self::MAX_BATCH {
389 self.addresses[self.count] = vaddr;
390 }
391 self.count += 1; }
393
394 pub fn flush(self) {
396 if self.count == 0 {
397 return;
398 }
399 if self.count > Self::MAX_BATCH {
400 crate::arch::tlb_flush_all();
402 } else {
403 for i in 0..self.count {
405 crate::arch::tlb_flush_address(self.addresses[i]);
406 }
407 }
408 }
409
410 pub fn len(&self) -> usize {
412 self.count
413 }
414
415 pub fn is_empty(&self) -> bool {
417 self.count == 0
418 }
419
420 pub fn flush_with_shootdown(self) {
426 self.flush();
428
429 #[cfg(target_arch = "x86_64")]
434 {
435 if crate::arch::x86_64::apic::is_initialized() {
436 let _ = crate::arch::x86_64::apic::send_ipi_all_excluding_self(
437 crate::arch::x86_64::apic::TLB_SHOOTDOWN_VECTOR,
438 );
439 }
440 }
441 }
442}
443
444impl Default for VirtualAddressSpace {
445 fn default() -> Self {
446 Self {
447 page_table_root: AtomicU64::new(0),
448 #[cfg(feature = "alloc")]
449 mappings: Mutex::new(BTreeMap::new()),
450 next_mmap_addr: AtomicU64::new(0x4000_0000_0000),
452 heap_start: AtomicU64::new(0x2000_0000_0000),
454 heap_break: AtomicU64::new(0x2000_0000_0000),
455 stack_top: AtomicU64::new(0x7FFF_FFFF_0000),
457 stack_size: AtomicU64::new(8 * 1024 * 1024),
458 tlb_generation: AtomicU64::new(0),
459 }
460 }
461}
462
463impl VirtualAddressSpace {
464 pub fn new() -> Self {
466 Self::default()
467 }
468
469 pub fn init(&mut self) -> Result<(), KernelError> {
471 use super::page_table::PageTableHierarchy;
472
473 let page_table = PageTableHierarchy::new()?;
475 self.page_table_root
476 .store(page_table.l4_addr().as_u64(), Ordering::Release);
477
478 self.map_kernel_space()?;
480
481 Ok(())
482 }
483
484 pub fn map_kernel_space(&mut self) -> Result<(), KernelError> {
493 use super::page_table::{PageTable, PAGE_TABLE_ENTRIES};
494
495 let new_root = self.page_table_root.load(Ordering::Acquire);
496 if new_root == 0 {
497 return Err(KernelError::NotInitialized {
498 subsystem: "VAS page table",
499 });
500 }
501
502 let boot_cr3: u64;
504 #[cfg(target_arch = "x86_64")]
505 {
506 unsafe {
508 core::arch::asm!("mov {}, cr3", out(reg) boot_cr3);
509 }
510 }
511 #[cfg(not(target_arch = "x86_64"))]
512 {
513 boot_cr3 = 0;
514 }
515
516 let boot_l4_phys = boot_cr3 & 0x000F_FFFF_FFFF_F000;
517 if boot_l4_phys == 0 {
518 #[cfg(feature = "alloc")]
520 {
521 self.map_region(
522 VirtualAddress(0xFFFF_8000_0000_0000),
523 0x200000,
524 MappingType::Code,
525 )?;
526 self.map_region(
527 VirtualAddress(0xFFFF_8000_0020_0000),
528 0x200000,
529 MappingType::Data,
530 )?;
531 self.map_region(
532 VirtualAddress(0xFFFF_C000_0000_0000),
533 0x1000000,
534 MappingType::Heap,
535 )?;
536 }
537 return Ok(());
538 }
539
540 unsafe {
548 let boot_l4 = &*(super::phys_to_virt_addr(boot_l4_phys) as *const PageTable);
549 let new_l4 = &mut *(super::phys_to_virt_addr(new_root) as *mut PageTable);
550
551 for i in 256..PAGE_TABLE_ENTRIES {
552 if boot_l4[i].is_present() {
553 new_l4[i] = boot_l4[i];
554 }
555 }
556
557 let phys_offset = super::PHYS_MEM_OFFSET.load(core::sync::atomic::Ordering::Acquire);
563 if phys_offset != 0 {
564 let phys_l4_idx = ((phys_offset >> 39) & 0x1FF) as usize;
565 if phys_l4_idx < 256 && boot_l4[phys_l4_idx].is_present() {
566 new_l4[phys_l4_idx] = boot_l4[phys_l4_idx];
567 }
568 }
569 }
570
571 Ok(())
572 }
573
574 #[cfg(feature = "alloc")]
581 pub fn clone_from(&mut self, other: &Self) -> Result<(), KernelError> {
582 use super::page_table::{PageTable, PageTableHierarchy, PAGE_TABLE_ENTRIES};
583
584 let new_hierarchy = PageTableHierarchy::new()?;
586 let new_root = new_hierarchy.l4_addr().as_u64();
587 self.page_table_root.store(new_root, Ordering::Release);
588
589 let parent_root = other.page_table_root.load(Ordering::Acquire);
590
591 if parent_root != 0 {
592 let parent_l4 =
597 unsafe { &*(super::phys_to_virt_addr(parent_root) as *const PageTable) };
598 let child_l4 = unsafe { &mut *(super::phys_to_virt_addr(new_root) as *mut PageTable) };
601
602 for i in 256..PAGE_TABLE_ENTRIES {
603 child_l4[i] = parent_l4[i];
604 }
605
606 let phys_offset = super::PHYS_MEM_OFFSET.load(core::sync::atomic::Ordering::Acquire);
609 if phys_offset != 0 {
610 let phys_l4_idx = ((phys_offset >> 39) & 0x1FF) as usize;
611 if phys_l4_idx < 256 {
612 child_l4[phys_l4_idx] = parent_l4[phys_l4_idx];
613 }
614 }
615
616 let parent_mappings = other.mappings.lock();
620 let mut child_mappings = self.mappings.lock();
621 child_mappings.clear();
622
623 let parent_mapper = unsafe { create_mapper_from_root(parent_root) };
625 let mut child_mapper = unsafe { create_mapper_from_root(new_root) };
627 let mut alloc = VasFrameAllocator;
628
629 const KERNEL_SPACE_START: u64 = 0xFFFF_8000_0000_0000;
630
631 for (addr, mapping) in parent_mappings.iter() {
632 if addr.0 >= KERNEL_SPACE_START {
634 child_mappings.insert(*addr, mapping.clone());
636 continue;
637 }
638
639 let num_pages = mapping.size / 4096;
640 let mut child_frames = Vec::with_capacity(num_pages);
641
642 for i in 0..num_pages {
643 let vaddr = VirtualAddress(mapping.start.0 + (i as u64) * 4096);
644
645 let (parent_frame, flags) = match parent_mapper.translate_page(vaddr) {
647 Ok(result) => result,
648 Err(_) => continue, };
650
651 let child_frame = {
653 FRAME_ALLOCATOR
654 .lock()
655 .allocate_frames(1, None)
656 .map_err(|_| KernelError::OutOfMemory {
657 requested: 4096,
658 available: 0,
659 })?
660 };
661
662 unsafe {
667 let src_phys = parent_frame.as_u64() << 12;
668 let dst_phys = child_frame.as_u64() << 12;
669 let src = super::phys_to_virt_addr(src_phys) as *const u8;
670 let dst = super::phys_to_virt_addr(dst_phys) as *mut u8;
671 core::ptr::copy_nonoverlapping(src, dst, 4096);
672 }
673
674 child_mapper
676 .map_page(vaddr, child_frame, flags, &mut alloc)
677 .ok(); child_frames.push(child_frame);
680 }
681
682 let mut child_mapping = mapping.clone();
684 child_mapping.physical_frames = child_frames;
685 child_mappings.insert(*addr, child_mapping);
686 }
687 }
688
689 self.heap_start
691 .store(other.heap_start.load(Ordering::Relaxed), Ordering::Relaxed);
692 self.heap_break
693 .store(other.heap_break.load(Ordering::Relaxed), Ordering::Relaxed);
694 self.stack_top
695 .store(other.stack_top.load(Ordering::Relaxed), Ordering::Relaxed);
696 self.next_mmap_addr.store(
697 other.next_mmap_addr.load(Ordering::Relaxed),
698 Ordering::Relaxed,
699 );
700
701 Ok(())
702 }
703
704 #[cfg(not(feature = "alloc"))]
706 pub fn clone_from(&mut self, _other: &Self) -> Result<(), KernelError> {
707 Err(KernelError::NotImplemented {
708 feature: "clone_from (requires alloc)",
709 })
710 }
711
712 pub fn destroy(&mut self) {
714 #[cfg(feature = "alloc")]
715 {
716 let pt_root = self.page_table_root.load(Ordering::Acquire);
717
718 let mut mappings = self.mappings.lock();
720
721 if pt_root != 0 {
723 let mut mapper = unsafe { create_mapper_from_root(pt_root) };
728
729 for (_, mapping) in mappings.iter() {
730 let num_pages = mapping.size / 4096;
731 for i in 0..num_pages {
732 let vaddr = VirtualAddress(mapping.start.0 + (i as u64) * 4096);
733 let _ = mapper.unmap_page(vaddr);
734 }
735 }
736 }
737
738 for (_, mapping) in mappings.iter() {
740 let allocator = FRAME_ALLOCATOR.lock();
741 for &frame in &mapping.physical_frames {
742 let _ = allocator.free_frames(frame, 1);
743 }
744 }
745
746 mappings.clear();
748
749 crate::arch::tlb_flush_all();
754 }
755 }
756
757 pub fn set_page_table(&self, root_phys_addr: u64) {
759 self.page_table_root
760 .store(root_phys_addr, Ordering::Release);
761 }
762
763 pub fn get_page_table(&self) -> u64 {
765 self.page_table_root.load(Ordering::Acquire)
766 }
767
768 #[cfg(feature = "alloc")]
770 pub fn map_region(
771 &self,
772 start: VirtualAddress,
773 size: usize,
774 mapping_type: MappingType,
775 ) -> Result<(), KernelError> {
776 let aligned_start = VirtualAddress(start.0 & !(4096 - 1));
778 let aligned_size = ((size + 4095) / 4096) * 4096;
779
780 let mapping = VirtualMapping::new(aligned_start, aligned_size, mapping_type);
781
782 let mut mappings = self.mappings.lock();
783
784 let b_start = aligned_start.0;
790 let b_end = aligned_start.0 + aligned_size as u64;
791 for (_, existing) in mappings.iter() {
792 let a_start = existing.start.0;
793 let a_end = existing.start.0 + existing.size as u64;
794 if a_start < b_end && b_start < a_end {
795 return Err(KernelError::AlreadyExists {
796 resource: "address range",
797 id: aligned_start.0,
798 });
799 }
800 }
801
802 let num_pages = aligned_size / 4096;
804 let mut physical_frames = Vec::with_capacity(num_pages);
805
806 {
812 let frame_allocator = FRAME_ALLOCATOR.lock();
813 for _ in 0..num_pages {
814 match frame_allocator.allocate_frames(1, None) {
815 Ok(frame) => physical_frames.push(frame),
816 Err(_) => {
817 for &f in &physical_frames {
819 frame_allocator.free_frames(f, 1).ok();
820 }
821 return Err(KernelError::OutOfMemory {
822 requested: 4096,
823 available: 0,
824 });
825 }
826 }
827 }
828 } for &frame in &physical_frames {
836 let phys_addr = frame.as_u64() << 12;
837 let virt = crate::mm::phys_to_virt_addr(phys_addr) as *mut u8;
838 unsafe {
839 core::ptr::write_bytes(virt, 0, 4096);
840 }
841 }
842
843 let pt_root = self.page_table_root.load(Ordering::Acquire);
845 if pt_root != 0 {
846 let mut mapper = unsafe { create_mapper_from_root(pt_root) };
852 let mut alloc = VasFrameAllocator;
853
854 for (i, &frame) in physical_frames.iter().enumerate() {
855 let vaddr = VirtualAddress(aligned_start.0 + (i as u64) * 4096);
856 mapper.map_page(vaddr, frame, mapping.flags, &mut alloc)?;
861 }
862
863 let mut tlb_batch = TlbFlushBatch::new();
867 for i in 0..num_pages {
868 let vaddr = aligned_start.0 + (i as u64) * 4096;
869 tlb_batch.add(vaddr);
870 }
871 tlb_batch.flush();
872 }
873
874 let mut mapping = mapping;
876 mapping.physical_frames = physical_frames;
877
878 mappings.insert(aligned_start, mapping);
879 Ok(())
880 }
881
882 #[cfg(feature = "alloc")]
888 pub fn map_physical_region(
889 &self,
890 phys_addr: u64,
891 size: usize,
892 vaddr: VirtualAddress,
893 ) -> Result<(), KernelError> {
894 let aligned_size = ((size + 4095) / 4096) * 4096;
895 let num_pages = aligned_size / 4096;
896 let aligned_phys = phys_addr & !(4096 - 1);
897
898 let flags = PageFlags::PRESENT | PageFlags::WRITABLE | PageFlags::USER;
899
900 let pt_root = self.page_table_root.load(Ordering::Acquire);
901 if pt_root == 0 {
902 return Err(KernelError::InvalidState {
903 expected: "initialized page table",
904 actual: "null page table root",
905 });
906 }
907
908 let mut mapper = unsafe { create_mapper_from_root(pt_root) };
910 let mut alloc = VasFrameAllocator;
911
912 let mut physical_frames = Vec::with_capacity(num_pages);
914 for i in 0..num_pages {
915 let frame = FrameNumber::new((aligned_phys >> 12) + i as u64);
916 physical_frames.push(frame);
917 let page_vaddr = VirtualAddress(vaddr.0 + (i as u64) * 4096);
918 mapper.map_page(page_vaddr, frame, flags, &mut alloc)?;
919 }
920
921 let mut tlb_batch = TlbFlushBatch::new();
923 for i in 0..num_pages {
924 tlb_batch.add(vaddr.0 + (i as u64) * 4096);
925 }
926 tlb_batch.flush();
927
928 let mapping = VirtualMapping {
930 start: vaddr,
931 size: aligned_size,
932 mapping_type: MappingType::Device,
933 flags,
934 physical_frames,
935 };
936 self.mappings.lock().insert(vaddr, mapping);
937
938 Ok(())
939 }
940
941 #[cfg(feature = "alloc")]
943 pub fn map_region_raii(
944 &self,
945 start: VirtualAddress,
946 size: usize,
947 mapping_type: MappingType,
948 process_id: crate::process::ProcessId,
949 ) -> Result<crate::raii::MappedRegion, KernelError> {
950 self.map_region(start, size, mapping_type)?;
952
953 let aligned_start = VirtualAddress(start.0 & !(4096 - 1));
955 let aligned_size = ((size + 4095) / 4096) * 4096;
956
957 Ok(crate::raii::MappedRegion::new(
958 aligned_start.as_usize(),
959 aligned_size,
960 process_id,
961 ))
962 }
963
964 #[cfg(feature = "alloc")]
966 pub fn unmap_region(&self, start: VirtualAddress) -> Result<(), KernelError> {
967 let mut mappings = self.mappings.lock();
968 let mapping = mappings.remove(&start).ok_or(KernelError::NotFound {
969 resource: "memory region",
970 id: start.0,
971 })?;
972
973 let num_pages = mapping.size / 4096;
974
975 let pt_root = self.page_table_root.load(Ordering::Acquire);
977 if pt_root != 0 {
978 let mut mapper = unsafe { create_mapper_from_root(pt_root) };
983
984 for i in 0..num_pages {
985 let vaddr = VirtualAddress(mapping.start.0 + (i as u64) * 4096);
986 let _ = mapper.unmap_page(vaddr);
990 }
991 }
992
993 let mut tlb_batch = TlbFlushBatch::new();
995 for i in 0..num_pages {
996 let vaddr = mapping.start.0 + (i as u64) * 4096;
997 tlb_batch.add(vaddr);
998 }
999 tlb_batch.flush();
1000
1001 let frame_allocator = FRAME_ALLOCATOR.lock();
1003 for frame in mapping.physical_frames {
1004 let _ = frame_allocator.free_frames(frame, 1);
1005 }
1006
1007 Ok(())
1008 }
1009
1010 #[cfg(feature = "alloc")]
1027 pub fn unmap(&self, start_addr: usize, size: usize) -> Result<(), KernelError> {
1028 let unmap_start = (start_addr & !(4096 - 1)) as u64;
1029 let unmap_size = ((size + 4095) / 4096) * 4096;
1030 let unmap_end = unmap_start + unmap_size as u64;
1031
1032 let addr = VirtualAddress(unmap_start);
1034 let mut mappings = self.mappings.lock();
1035
1036 if let Some(existing) = mappings.get(&addr) {
1037 if existing.size == unmap_size {
1038 drop(mappings);
1040 return self.unmap_region(addr);
1041 }
1042 }
1043
1044 let mut containing_key = None;
1047 for (key, mapping) in mappings.iter() {
1048 let m_start = key.0;
1049 let m_end = m_start + mapping.size as u64;
1050 if m_start <= unmap_start && m_end >= unmap_end {
1051 containing_key = Some(*key);
1052 break;
1053 }
1054 }
1055
1056 let containing_key = match containing_key {
1057 Some(k) => k,
1058 None => {
1059 if mappings.contains_key(&addr) {
1064 drop(mappings);
1065 return self.unmap_region(addr);
1066 }
1067 return Err(KernelError::NotFound {
1068 resource: "memory region",
1069 id: unmap_start,
1070 });
1071 }
1072 };
1073
1074 let mapping = mappings
1076 .remove(&containing_key)
1077 .ok_or(KernelError::NotFound {
1078 resource: "vas_mapping",
1079 id: containing_key.0 as u64,
1080 })?;
1081 let m_start = containing_key.0;
1082
1083 let unmap_page_start = ((unmap_start - m_start) / 4096) as usize;
1085 let unmap_page_count = unmap_size / 4096;
1086 let unmap_page_end = unmap_page_start + unmap_page_count;
1087
1088 let pt_root = self.page_table_root.load(Ordering::Acquire);
1090 if pt_root != 0 {
1091 let mut mapper = unsafe { create_mapper_from_root(pt_root) };
1093 for i in unmap_page_start..unmap_page_end {
1094 let vaddr = VirtualAddress(m_start + (i as u64) * 4096);
1095 let _ = mapper.unmap_page(vaddr);
1096 }
1097 }
1098
1099 let mut tlb_batch = TlbFlushBatch::new();
1101 for i in unmap_page_start..unmap_page_end {
1102 let vaddr = m_start + (i as u64) * 4096;
1103 tlb_batch.add(vaddr);
1104 }
1105 tlb_batch.flush();
1106
1107 {
1109 let frame_allocator = FRAME_ALLOCATOR.lock();
1110 for i in unmap_page_start..unmap_page_end.min(mapping.physical_frames.len()) {
1111 let _ = frame_allocator.free_frames(mapping.physical_frames[i], 1);
1112 }
1113 }
1114
1115 if unmap_page_start > 0 {
1119 let front_size = unmap_page_start * 4096;
1120 let mut front = VirtualMapping::new(containing_key, front_size, mapping.mapping_type);
1121 front.flags = mapping.flags;
1122 if unmap_page_start <= mapping.physical_frames.len() {
1123 front.physical_frames = mapping.physical_frames[..unmap_page_start].to_vec();
1124 }
1125 mappings.insert(containing_key, front);
1126 }
1127
1128 let total_pages = mapping.size / 4096;
1130 if unmap_page_end < total_pages {
1131 let back_start_addr = m_start + (unmap_page_end as u64) * 4096;
1132 let back_size = (total_pages - unmap_page_end) * 4096;
1133 let mut back = VirtualMapping::new(
1134 VirtualAddress(back_start_addr),
1135 back_size,
1136 mapping.mapping_type,
1137 );
1138 back.flags = mapping.flags;
1139 if unmap_page_end < mapping.physical_frames.len() {
1140 back.physical_frames = mapping.physical_frames[unmap_page_end..].to_vec();
1141 }
1142 mappings.insert(VirtualAddress(back_start_addr), back);
1143 }
1144
1145 Ok(())
1146 }
1147
1148 #[cfg(feature = "alloc")]
1150 pub fn find_mapping(&self, addr: VirtualAddress) -> Option<VirtualMapping> {
1151 let mappings = self.mappings.lock();
1152 for (_, mapping) in mappings.iter() {
1153 if mapping.contains(addr) {
1154 return Some(mapping.clone());
1155 }
1156 }
1157 None
1158 }
1159
1160 pub fn mappings_ref(
1165 &self,
1166 ) -> &spin::Mutex<alloc::collections::BTreeMap<VirtualAddress, VirtualMapping>> {
1167 &self.mappings
1168 }
1169
1170 #[cfg(feature = "alloc")]
1175 pub fn map_page_with_frame(
1176 &mut self,
1177 vaddr: usize,
1178 frame: super::FrameNumber,
1179 flags: PageFlags,
1180 ) -> Result<(), KernelError> {
1181 let vaddr_obj = VirtualAddress(vaddr as u64);
1182 let pt_root = self.page_table_root.load(Ordering::Acquire);
1183 if pt_root != 0 {
1184 let mut mapper = unsafe { create_mapper_from_root(pt_root) };
1187 let mut alloc = VasFrameAllocator;
1188 mapper.map_page(vaddr_obj, frame, flags, &mut alloc)?;
1189 crate::arch::tlb_flush_address(vaddr as u64);
1190 }
1191 Ok(())
1192 }
1193
1194 #[cfg(feature = "alloc")]
1198 pub fn remap_page(
1199 &mut self,
1200 vaddr: usize,
1201 new_frame: super::FrameNumber,
1202 flags: PageFlags,
1203 ) -> Result<(), KernelError> {
1204 let vaddr_obj = VirtualAddress(vaddr as u64);
1205 let pt_root = self.page_table_root.load(Ordering::Acquire);
1206 if pt_root != 0 {
1207 let mut mapper = unsafe { create_mapper_from_root(pt_root) };
1209 let mut alloc = VasFrameAllocator;
1210 let _ = mapper.unmap_page(vaddr_obj);
1212 mapper.map_page(vaddr_obj, new_frame, flags, &mut alloc)?;
1213 crate::arch::tlb_flush_address(vaddr as u64);
1214 }
1215 Ok(())
1216 }
1217
1218 #[cfg(feature = "alloc")]
1223 pub fn map_lazy(&mut self, vaddr: usize, size: usize, flags: PageFlags) {
1224 crate::mm::demand_paging::register_lazy(
1225 vaddr,
1226 size,
1227 flags,
1228 crate::mm::demand_paging::BackingType::Anonymous,
1229 );
1230 }
1231
1232 pub fn mmap(
1234 &self,
1235 size: usize,
1236 mapping_type: MappingType,
1237 ) -> Result<VirtualAddress, KernelError> {
1238 let aligned_size = ((size + 4095) / 4096) * 4096;
1239 let addr = VirtualAddress(
1240 self.next_mmap_addr
1241 .fetch_add(aligned_size as u64, Ordering::Relaxed),
1242 );
1243
1244 #[cfg(all(feature = "alloc", not(test)))]
1246 self.map_region(addr, aligned_size, mapping_type)?;
1247
1248 Ok(addr)
1249 }
1250
1251 pub fn heap_start_addr(&self) -> u64 {
1253 self.heap_start.load(Ordering::Relaxed)
1254 }
1255
1256 pub fn brk(&self, new_break: Option<VirtualAddress>) -> VirtualAddress {
1271 if let Some(addr) = new_break {
1272 let current = self.heap_break.load(Ordering::Acquire);
1273 let heap_start = self.heap_start.load(Ordering::Relaxed);
1274
1275 if addr.0 < heap_start {
1276 } else if addr.0 > current {
1278 let old_page = (current + 4095) / 4096; let new_page = (addr.0 + 4095) / 4096;
1281
1282 if new_page > old_page {
1283 #[cfg(all(feature = "alloc", not(test)))]
1286 {
1287 if self.brk_extend_heap(old_page, new_page).is_ok() {
1288 self.heap_break.store(addr.0, Ordering::Release);
1289 }
1290 }
1292 #[cfg(any(not(feature = "alloc"), test))]
1293 {
1294 self.heap_break.store(addr.0, Ordering::Release);
1296 }
1297 } else {
1298 self.heap_break.store(addr.0, Ordering::Release);
1300 }
1301 } else if addr.0 < current && addr.0 >= heap_start {
1302 }
1306 }
1307
1308 VirtualAddress(self.heap_break.load(Ordering::Acquire))
1309 }
1310
1311 #[cfg(all(feature = "alloc", not(test)))]
1319 fn brk_extend_heap(&self, old_page: u64, new_page: u64) -> Result<(), KernelError> {
1320 let delta_pages = (new_page - old_page) as usize;
1321 let start_addr = VirtualAddress(old_page * 4096);
1322
1323 let mut new_frames = Vec::with_capacity(delta_pages);
1325 {
1326 let frame_allocator = FRAME_ALLOCATOR.lock();
1327 for _ in 0..delta_pages {
1328 match frame_allocator.allocate_frames(1, None) {
1329 Ok(frame) => new_frames.push(frame),
1330 Err(_) => {
1331 for &f in &new_frames {
1332 frame_allocator.free_frames(f, 1).ok();
1333 }
1334 return Err(KernelError::OutOfMemory {
1335 requested: 4096,
1336 available: 0,
1337 });
1338 }
1339 }
1340 }
1341 }
1342
1343 for &frame in &new_frames {
1345 let phys_addr = frame.as_u64() << 12;
1346 let virt = crate::mm::phys_to_virt_addr(phys_addr) as *mut u8;
1347 unsafe {
1350 core::ptr::write_bytes(virt, 0, 4096);
1351 }
1352 }
1353
1354 let pt_root = self.page_table_root.load(Ordering::Acquire);
1356 if pt_root != 0 {
1357 let mut mapper = unsafe { create_mapper_from_root(pt_root) };
1359 let mut alloc = VasFrameAllocator;
1360 let flags = PageFlags::PRESENT | PageFlags::WRITABLE | PageFlags::USER;
1361
1362 for (i, &frame) in new_frames.iter().enumerate() {
1363 let vaddr = VirtualAddress(start_addr.0 + (i as u64) * 4096);
1364 mapper.map_page(vaddr, frame, flags, &mut alloc)?;
1365 crate::arch::tlb_flush_address(vaddr.0);
1366 }
1367 }
1368
1369 let heap_start_page = (self.heap_start.load(Ordering::Relaxed) + 4095) / 4096;
1371 let heap_key = VirtualAddress(heap_start_page * 4096);
1372
1373 let mut mappings = self.mappings.lock();
1374 if let Some(mapping) = mappings.get_mut(&heap_key) {
1375 mapping.size += delta_pages * 4096;
1377 mapping.physical_frames.extend_from_slice(&new_frames);
1378 } else {
1379 let total_size = ((new_page - heap_start_page) as usize) * 4096;
1381 let mut mapping = VirtualMapping::new(heap_key, total_size, MappingType::Heap);
1382 mapping.physical_frames = new_frames;
1383 mapping.flags = PageFlags::PRESENT | PageFlags::WRITABLE | PageFlags::USER;
1384 mappings.insert(heap_key, mapping);
1385 }
1386
1387 Ok(())
1388 }
1389
1390 #[cfg(feature = "alloc")]
1395 pub fn fork(&self) -> Result<Self, KernelError> {
1396 let mut new_vas = Self::new();
1397 new_vas.clone_from(self)?;
1398 Ok(new_vas)
1399 }
1400
1401 #[cfg(feature = "alloc")]
1407 pub fn protect_region(
1408 &self,
1409 start: VirtualAddress,
1410 size: usize,
1411 prot: usize,
1412 ) -> Result<(), KernelError> {
1413 use super::PageFlags;
1414
1415 let pt_root = self.page_table_root.load(Ordering::Acquire);
1416 if pt_root == 0 {
1417 return Ok(()); }
1419
1420 let mut new_flags = PageFlags::PRESENT | PageFlags::USER;
1422 if prot & 0x2 != 0 {
1423 new_flags |= PageFlags::WRITABLE;
1425 }
1426 if prot & 0x4 == 0 {
1427 new_flags |= PageFlags::NO_EXECUTE;
1429 }
1430
1431 let mut mapper = unsafe { create_mapper_from_root(pt_root) };
1434
1435 let num_pages = (size + 4095) / 4096;
1436 for i in 0..num_pages {
1437 let vaddr = VirtualAddress(start.0 + (i as u64) * 4096);
1438 let _ = mapper.update_page_flags(vaddr, new_flags);
1440 crate::arch::tlb_flush_address(vaddr.0);
1441 }
1442
1443 let mut mappings = self.mappings.lock();
1445 if let Some(mapping) = mappings.get_mut(&start) {
1446 mapping.flags = new_flags;
1447 }
1448
1449 Ok(())
1450 }
1451
1452 pub fn handle_page_fault(
1454 &self,
1455 fault_addr: VirtualAddress,
1456 write: bool,
1457 user: bool,
1458 ) -> Result<(), KernelError> {
1459 #[cfg(feature = "alloc")]
1460 {
1461 let mapping = self
1463 .find_mapping(fault_addr)
1464 .ok_or(KernelError::UnmappedMemory {
1465 addr: fault_addr.0 as usize,
1466 })?;
1467
1468 if write && !mapping.flags.contains(PageFlags::WRITABLE) {
1470 return Err(KernelError::PermissionDenied {
1471 operation: "write to read-only page",
1472 });
1473 }
1474
1475 if user && !mapping.flags.contains(PageFlags::USER) {
1476 return Err(KernelError::PermissionDenied {
1477 operation: "user access to kernel page",
1478 });
1479 }
1480
1481 Err(KernelError::NotImplemented {
1484 feature: "page fault handling (COW/demand paging)",
1485 })
1486 }
1487
1488 #[cfg(not(feature = "alloc"))]
1489 Err(KernelError::NotImplemented {
1490 feature: "page fault handling (requires alloc)",
1491 })
1492 }
1493
1494 #[cfg(feature = "alloc")]
1496 pub fn get_stats(&self) -> VasStats {
1497 let mappings = self.mappings.lock();
1498 let mut total_size = 0;
1499 let mut code_size = 0;
1500 let mut data_size = 0;
1501 let mut stack_size = 0;
1502 let mut heap_size = 0;
1503
1504 for (_, mapping) in mappings.iter() {
1505 total_size += mapping.size;
1506 match mapping.mapping_type {
1507 MappingType::Code => code_size += mapping.size,
1508 MappingType::Data => data_size += mapping.size,
1509 MappingType::Stack => stack_size += mapping.size,
1510 MappingType::Heap => heap_size += mapping.size,
1511 _ => {}
1512 }
1513 }
1514
1515 VasStats {
1516 total_size,
1517 code_size,
1518 data_size,
1519 stack_size,
1520 heap_size,
1521 mapping_count: mappings.len(),
1522 }
1523 }
1524
1525 pub fn clear(&mut self) {
1527 #[cfg(feature = "alloc")]
1528 {
1529 let pt_root = self.page_table_root.load(Ordering::Acquire);
1530
1531 let mappings = self.mappings.get_mut();
1533
1534 if pt_root != 0 {
1536 let mut mapper = unsafe { create_mapper_from_root(pt_root) };
1541
1542 for (_, mapping) in mappings.iter() {
1543 let num_pages = mapping.size / 4096;
1544 for i in 0..num_pages {
1545 let vaddr = VirtualAddress(mapping.start.0 + (i as u64) * 4096);
1546 let _ = mapper.unmap_page(vaddr);
1547 }
1548 }
1549 }
1550
1551 for (_, mapping) in mappings.iter() {
1553 let frame_allocator = FRAME_ALLOCATOR.lock();
1554 for frame in &mapping.physical_frames {
1555 frame_allocator.free_frames(*frame, 1).ok();
1556 }
1557 }
1558
1559 mappings.clear();
1561
1562 crate::arch::tlb_flush_all();
1566
1567 if pt_root != 0 {
1582 free_user_page_table_subtrees(pt_root);
1583 }
1584 }
1585
1586 self.heap_break
1588 .store(self.heap_start.load(Ordering::Relaxed), Ordering::Release);
1589 self.next_mmap_addr
1590 .store(0x4000_0000_0000, Ordering::Release);
1591 }
1592
1593 pub fn clear_user_space(&mut self) -> Result<(), KernelError> {
1595 #[cfg(feature = "alloc")]
1596 {
1597 let pt_root = self.page_table_root.load(Ordering::Acquire);
1598 let mappings = self.mappings.get_mut();
1599 let mut to_remove = Vec::new();
1600
1601 const KERNEL_SPACE_START: u64 = 0xFFFF_8000_0000_0000;
1603
1604 for (addr, _mapping) in mappings.iter() {
1605 if addr.0 < KERNEL_SPACE_START {
1606 to_remove.push(*addr);
1607 }
1608 }
1609
1610 if pt_root != 0 {
1612 let mut mapper = unsafe { create_mapper_from_root(pt_root) };
1617
1618 for addr in &to_remove {
1619 if let Some(mapping) = mappings.get(addr) {
1620 let num_pages = mapping.size / 4096;
1621 for i in 0..num_pages {
1622 let vaddr = VirtualAddress(mapping.start.0 + (i as u64) * 4096);
1623 let _ = mapper.unmap_page(vaddr);
1624 }
1625 }
1626 }
1627 }
1628
1629 for addr in &to_remove {
1631 if let Some(mapping) = mappings.get(addr) {
1632 let frame_allocator = FRAME_ALLOCATOR.lock();
1633 for frame in &mapping.physical_frames {
1634 frame_allocator.free_frames(*frame, 1).ok();
1635 }
1636 }
1637 }
1638
1639 for addr in to_remove {
1640 mappings.remove(&addr);
1641 }
1642
1643 crate::arch::tlb_flush_all();
1652 }
1653
1654 self.heap_break
1656 .store(self.heap_start.load(Ordering::Relaxed), Ordering::Release);
1657 self.next_mmap_addr
1658 .store(0x4000_0000_0000, Ordering::Release);
1659
1660 Ok(())
1661 }
1662
1663 pub fn user_stack_base(&self) -> usize {
1665 let size = self.stack_size.load(Ordering::Acquire);
1667 (self.stack_top.load(Ordering::Acquire) - size) as usize
1668 }
1669
1670 pub fn user_stack_size(&self) -> usize {
1672 self.stack_size.load(Ordering::Acquire) as usize
1673 }
1674
1675 pub fn stack_top(&self) -> usize {
1677 self.stack_top.load(Ordering::Acquire) as usize
1678 }
1679
1680 pub fn set_stack_top(&self, addr: usize) {
1682 self.stack_top.store(addr as u64, Ordering::Release);
1683 }
1684
1685 pub fn set_stack_size(&self, size: usize) {
1687 self.stack_size.store(size as u64, Ordering::Release);
1688 }
1689
1690 pub fn map_page(&mut self, vaddr: usize, flags: PageFlags) -> Result<(), KernelError> {
1692 use super::PAGE_SIZE;
1693
1694 let frame = crate::mm::frame_allocator::per_cpu_alloc_frame().map_err(|_| {
1696 KernelError::OutOfMemory {
1697 requested: 4096,
1698 available: 0,
1699 }
1700 })?;
1701
1702 let phys_addr = frame.as_u64() << 12;
1708 let virt = crate::mm::phys_to_virt_addr(phys_addr) as *mut u8;
1709 unsafe {
1710 core::ptr::write_bytes(virt, 0, 4096);
1711 }
1712
1713 let vaddr_obj = VirtualAddress(vaddr as u64);
1714
1715 let pt_root = self.page_table_root.load(Ordering::Acquire);
1717 if pt_root != 0 {
1718 let mut mapper = unsafe { create_mapper_from_root(pt_root) };
1723 let mut alloc = VasFrameAllocator;
1724 match mapper.map_page(vaddr_obj, frame, flags, &mut alloc) {
1725 Ok(()) => {}
1726 Err(KernelError::AlreadyExists { .. }) => {
1727 let _ = mapper.update_page_flags(vaddr_obj, flags);
1732 let _ = FRAME_ALLOCATOR.lock().free_frames(frame, 1);
1733 crate::arch::tlb_flush_address(vaddr as u64);
1734 return Ok(());
1735 }
1736 Err(e) => return Err(e),
1737 }
1738 crate::arch::tlb_flush_address(vaddr as u64);
1739 }
1740
1741 #[cfg(feature = "alloc")]
1743 {
1744 let mut mappings = self.mappings.lock();
1745
1746 if let Some(mapping) = mappings.get_mut(&vaddr_obj) {
1747 mapping.physical_frames.push(frame);
1748 } else {
1749 let mut new_mapping = VirtualMapping::new(vaddr_obj, PAGE_SIZE, MappingType::Data);
1750 new_mapping.physical_frames.push(frame);
1751 new_mapping.flags = flags;
1752 mappings.insert(vaddr_obj, new_mapping);
1753 }
1754 }
1755
1756 Ok(())
1757 }
1758
1759 pub fn map_huge_page(&mut self, vaddr: usize, flags: PageFlags) -> Result<(), KernelError> {
1767 const HUGE_PAGE_SIZE: usize = 2 * 1024 * 1024; const HUGE_PAGE_FRAMES: usize = HUGE_PAGE_SIZE / 4096; if vaddr & (HUGE_PAGE_SIZE - 1) != 0 {
1771 return Err(KernelError::InvalidArgument {
1772 name: "vaddr",
1773 value: "not 2MB aligned for huge page",
1774 });
1775 }
1776
1777 let frame = FRAME_ALLOCATOR
1779 .lock()
1780 .allocate_frames(HUGE_PAGE_FRAMES, None)
1781 .map_err(|_| KernelError::OutOfMemory {
1782 requested: HUGE_PAGE_SIZE,
1783 available: 0,
1784 })?;
1785
1786 let phys_addr = frame.as_u64() * 4096;
1788 let virt = crate::mm::phys_to_virt_addr(phys_addr) as *mut u8;
1789 unsafe {
1791 core::ptr::write_bytes(virt, 0, HUGE_PAGE_SIZE);
1792 }
1793
1794 let huge_flags = PageFlags(flags.0 | PageFlags::HUGE.0);
1796 let vaddr_obj = VirtualAddress(vaddr as u64);
1797
1798 let pt_root = self.page_table_root.load(Ordering::Acquire);
1799 if pt_root != 0 {
1800 let mut mapper = unsafe { create_mapper_from_root(pt_root) };
1802 let mut alloc = VasFrameAllocator;
1803 mapper.map_page(vaddr_obj, frame, huge_flags, &mut alloc)?;
1804 crate::arch::tlb_flush_address(vaddr as u64);
1805 }
1806
1807 #[cfg(feature = "alloc")]
1809 {
1810 let mut mappings = self.mappings.lock();
1811 let mut new_mapping = VirtualMapping::new(vaddr_obj, HUGE_PAGE_SIZE, MappingType::Data);
1812 new_mapping.physical_frames.push(frame);
1813 new_mapping.flags = huge_flags;
1814 mappings.insert(vaddr_obj, new_mapping);
1815 }
1816
1817 Ok(())
1818 }
1819}
1820
1821#[derive(Debug, Default)]
1823pub struct VasStats {
1824 pub total_size: usize,
1825 pub code_size: usize,
1826 pub data_size: usize,
1827 pub stack_size: usize,
1828 pub heap_size: usize,
1829 pub mapping_count: usize,
1830}
1831
1832pub fn map_physical_region_user(
1840 phys_addr: u64,
1841 size: usize,
1842) -> Result<usize, crate::syscall::SyscallError> {
1843 let proc =
1844 crate::process::current_process().ok_or(crate::syscall::SyscallError::InvalidState)?;
1845
1846 let memory_space = proc.memory_space.lock();
1847
1848 let aligned_size = ((size + 4095) / 4096) * 4096;
1850 let vaddr = VirtualAddress(
1851 memory_space
1852 .next_mmap_addr
1853 .fetch_add(aligned_size as u64, Ordering::Relaxed),
1854 );
1855
1856 #[cfg(feature = "alloc")]
1858 memory_space
1859 .map_physical_region(phys_addr, aligned_size, vaddr)
1860 .map_err(|_| crate::syscall::SyscallError::OutOfMemory)?;
1861
1862 Ok(vaddr.as_usize())
1863}
1864
1865#[cfg(test)]
1866mod tests {
1867 use super::*;
1868
1869 #[test]
1872 fn test_mapping_type_equality() {
1873 assert_eq!(MappingType::Code, MappingType::Code);
1874 assert_ne!(MappingType::Code, MappingType::Data);
1875 assert_ne!(MappingType::Stack, MappingType::Heap);
1876 }
1877
1878 #[test]
1881 fn test_virtual_mapping_new_code() {
1882 let start = VirtualAddress(0x1000);
1883 let mapping = VirtualMapping::new(start, 0x4000, MappingType::Code);
1884
1885 assert_eq!(mapping.start, start);
1886 assert_eq!(mapping.size, 0x4000);
1887 assert_eq!(mapping.mapping_type, MappingType::Code);
1888 assert!(mapping.flags.contains(PageFlags::PRESENT));
1890 assert!(mapping.flags.contains(PageFlags::USER));
1891 assert!(!mapping.flags.contains(PageFlags::WRITABLE));
1892 }
1893
1894 #[test]
1895 fn test_virtual_mapping_new_data() {
1896 let mapping = VirtualMapping::new(VirtualAddress(0x2000), 0x1000, MappingType::Data);
1897
1898 assert!(mapping.flags.contains(PageFlags::PRESENT));
1899 assert!(mapping.flags.contains(PageFlags::WRITABLE));
1900 assert!(mapping.flags.contains(PageFlags::USER));
1901 }
1902
1903 #[test]
1904 fn test_virtual_mapping_new_stack() {
1905 let mapping = VirtualMapping::new(VirtualAddress(0x3000), 0x2000, MappingType::Stack);
1906
1907 assert!(mapping.flags.contains(PageFlags::PRESENT));
1908 assert!(mapping.flags.contains(PageFlags::WRITABLE));
1909 assert!(mapping.flags.contains(PageFlags::USER));
1910 assert!(mapping.flags.contains(PageFlags::NO_EXECUTE));
1911 }
1912
1913 #[test]
1914 fn test_virtual_mapping_new_heap() {
1915 let mapping = VirtualMapping::new(VirtualAddress(0x4000), 0x10000, MappingType::Heap);
1916
1917 assert!(mapping.flags.contains(PageFlags::PRESENT));
1918 assert!(mapping.flags.contains(PageFlags::WRITABLE));
1919 assert!(mapping.flags.contains(PageFlags::USER));
1920 assert!(mapping.flags.contains(PageFlags::NO_EXECUTE));
1921 }
1922
1923 #[test]
1924 fn test_virtual_mapping_new_device() {
1925 let mapping = VirtualMapping::new(VirtualAddress(0xF000), 0x1000, MappingType::Device);
1926
1927 assert!(mapping.flags.contains(PageFlags::PRESENT));
1928 assert!(mapping.flags.contains(PageFlags::WRITABLE));
1929 assert!(mapping.flags.contains(PageFlags::NO_CACHE));
1930 assert!(!mapping.flags.contains(PageFlags::USER));
1932 }
1933
1934 #[test]
1935 fn test_virtual_mapping_contains() {
1936 let mapping = VirtualMapping::new(VirtualAddress(0x1000), 0x3000, MappingType::Data);
1937
1938 assert!(mapping.contains(VirtualAddress(0x1000)));
1940 assert!(mapping.contains(VirtualAddress(0x2000)));
1942 assert!(mapping.contains(VirtualAddress(0x3FFF)));
1944 assert!(!mapping.contains(VirtualAddress(0x4000)));
1946 assert!(!mapping.contains(VirtualAddress(0x0FFF)));
1948 assert!(!mapping.contains(VirtualAddress(0x5000)));
1950 }
1951
1952 #[test]
1953 fn test_virtual_mapping_end() {
1954 let mapping = VirtualMapping::new(VirtualAddress(0x1000), 0x3000, MappingType::Data);
1955 assert_eq!(mapping.end(), VirtualAddress(0x4000));
1956 }
1957
1958 #[test]
1959 fn test_virtual_mapping_zero_size() {
1960 let mapping = VirtualMapping::new(VirtualAddress(0x1000), 0, MappingType::File);
1961 assert_eq!(mapping.end(), VirtualAddress(0x1000));
1962 assert!(!mapping.contains(VirtualAddress(0x1000)));
1964 }
1965
1966 #[test]
1969 fn test_vas_default_values() {
1970 let vas = VirtualAddressSpace::new();
1971
1972 assert_eq!(vas.get_page_table(), 0);
1974
1975 let heap_break = vas.brk(None);
1977 assert_eq!(heap_break, VirtualAddress(0x2000_0000_0000));
1978
1979 assert_eq!(vas.stack_top(), 0x7FFF_FFFF_0000);
1981 }
1982
1983 #[test]
1984 fn test_vas_set_page_table() {
1985 let vas = VirtualAddressSpace::new();
1986 vas.set_page_table(0xDEAD_BEEF_0000);
1987 assert_eq!(vas.get_page_table(), 0xDEAD_BEEF_0000);
1988 }
1989
1990 #[test]
1991 fn test_vas_brk_extend_heap() {
1992 let vas = VirtualAddressSpace::new();
1993
1994 let initial = vas.brk(None);
1996 assert_eq!(initial, VirtualAddress(0x2000_0000_0000));
1997
1998 let new_addr = VirtualAddress(0x2000_0001_0000);
2000 let result = vas.brk(Some(new_addr));
2001 assert_eq!(result, new_addr);
2002
2003 let current = vas.brk(None);
2005 assert_eq!(current, new_addr);
2006 }
2007
2008 #[test]
2009 fn test_vas_brk_refuses_shrink() {
2010 let vas = VirtualAddressSpace::new();
2011
2012 let extended = VirtualAddress(0x2000_0001_0000);
2014 vas.brk(Some(extended));
2015
2016 let shrink_addr = VirtualAddress(0x2000_0000_0000);
2018 let result = vas.brk(Some(shrink_addr));
2019 assert_eq!(result, extended);
2021 }
2022
2023 #[test]
2024 fn test_vas_brk_refuses_below_heap_start() {
2025 let vas = VirtualAddressSpace::new();
2026
2027 let below_start = VirtualAddress(0x1000_0000_0000);
2029 let result = vas.brk(Some(below_start));
2030 assert_eq!(result, VirtualAddress(0x2000_0000_0000));
2032 }
2033
2034 #[test]
2035 fn test_vas_stack_top_get_set() {
2036 let vas = VirtualAddressSpace::new();
2037
2038 let default_top = vas.stack_top();
2039 assert_eq!(default_top, 0x7FFF_FFFF_0000);
2040
2041 vas.set_stack_top(0x7000_0000_0000);
2042 assert_eq!(vas.stack_top(), 0x7000_0000_0000);
2043 }
2044
2045 #[test]
2046 fn test_vas_user_stack_base_and_size() {
2047 let vas = VirtualAddressSpace::new();
2048
2049 let stack_size = vas.user_stack_size();
2050 assert_eq!(stack_size, 8 * 1024 * 1024); let stack_base = vas.user_stack_base();
2053 let expected_base = 0x7FFF_FFFF_0000 - 8 * 1024 * 1024;
2054 assert_eq!(stack_base, expected_base);
2055 }
2056
2057 #[test]
2062 fn test_vas_mmap_advances_address() {
2063 let vas = VirtualAddressSpace::new();
2064
2065 let addr1 = vas.mmap(0x1000, MappingType::Data);
2067 assert!(addr1.is_ok());
2068 let addr1 = addr1.unwrap();
2069 assert_eq!(addr1, VirtualAddress(0x4000_0000_0000));
2070
2071 let addr2 = vas.mmap(0x2000, MappingType::Data);
2073 assert!(addr2.is_ok());
2074 let addr2 = addr2.unwrap();
2075 assert_eq!(addr2, VirtualAddress(0x4000_0000_1000));
2076 }
2077
2078 #[test]
2079 fn test_vas_mmap_page_alignment() {
2080 let vas = VirtualAddressSpace::new();
2081
2082 let addr = vas.mmap(100, MappingType::Code);
2084 assert!(addr.is_ok());
2085
2086 let addr2 = vas.mmap(100, MappingType::Code);
2088 assert!(addr2.is_ok());
2089 let diff = addr2.unwrap().as_u64() - addr.unwrap().as_u64();
2090 assert_eq!(diff, 4096, "mmap allocations should be page-aligned");
2091 }
2092
2093 #[test]
2096 fn test_vas_stats_default() {
2097 let stats = VasStats::default();
2098 assert_eq!(stats.total_size, 0);
2099 assert_eq!(stats.code_size, 0);
2100 assert_eq!(stats.data_size, 0);
2101 assert_eq!(stats.stack_size, 0);
2102 assert_eq!(stats.heap_size, 0);
2103 assert_eq!(stats.mapping_count, 0);
2104 }
2105}