veridian_kernel/mm/user_validation.rs
1//! User space memory validation utilities
2//!
3//! Provides functions to validate user space addresses and check page mappings.
4
5use crate::mm::{
6 page_table::{PageTable, PageTableEntry},
7 PageFlags,
8};
9
10/// Check if a user address is valid (within user space range)
11pub fn is_user_addr_valid(addr: usize) -> bool {
12 // User space is 0x0 - 0x7FFF_FFFF_FFFF (128TB)
13 addr < 0x0000_8000_0000_0000
14}
15
16/// Translate a virtual address to its page table entry
17///
18/// Returns None if the address is not mapped
19pub fn translate_address(addr: usize) -> Option<PageTableEntry> {
20 // Get current process's page table
21 let _current_process = crate::process::current_process()?;
22
23 // Get the active page table base address (CR3 on x86_64).
24 // In VeridianOS, syscalls run with the process's page tables loaded
25 // (CR3 switching was removed from syscalls in v0.4.9 for performance).
26 // The kernel is mapped via L4[256-511] in every process's page table,
27 // so CR3 already points to the correct page table for translating both
28 // user and kernel addresses.
29 // SAFETY: get_kernel_page_table() returns the physical address of the
30 // active page table root. The page table memory is accessible via the
31 // kernel's physical memory mapping and has 'static lifetime.
32 let page_table = unsafe { &*(crate::mm::get_kernel_page_table() as *const PageTable) };
33
34 // Walk the page tables to find the entry
35 let vpn = addr >> 12; // Virtual page number
36
37 // 4-level page table walk (x86_64 style)
38 let l4_index = (vpn >> 27) & 0x1FF;
39 let l3_index = (vpn >> 18) & 0x1FF;
40 let l2_index = (vpn >> 9) & 0x1FF;
41 let l1_index = vpn & 0x1FF;
42
43 // Walk L4
44 let l4_entry = page_table[l4_index];
45 if !l4_entry.is_present() {
46 return None;
47 }
48
49 // Get L3 table
50 // SAFETY: l4_entry.addr() returns the physical address of the next-level
51 // page table. This address was set by the kernel's page table setup code
52 // and points to a valid PageTable in identity-mapped kernel memory.
53 let l3_table = unsafe { &*(l4_entry.addr()?.as_u64() as *const PageTable) };
54
55 let l3_entry = l3_table[l3_index];
56 if !l3_entry.is_present() {
57 return None;
58 }
59
60 // Check for huge page (1GB)
61 if l3_entry.flags().contains(PageFlags::HUGE) {
62 return Some(l3_entry);
63 }
64
65 // Get L2 table
66 // SAFETY: l3_entry.addr() returns the physical address of the next-level
67 // page table, set by kernel page table initialization. The address points
68 // to a valid PageTable in identity-mapped kernel memory.
69 let l2_table = unsafe { &*(l3_entry.addr()?.as_u64() as *const PageTable) };
70
71 let l2_entry = l2_table[l2_index];
72 if !l2_entry.is_present() {
73 return None;
74 }
75
76 // Check for large page (2MB)
77 if l2_entry.flags().contains(PageFlags::HUGE) {
78 return Some(l2_entry);
79 }
80
81 // Get L1 table
82 // SAFETY: l2_entry.addr() returns the physical address of the final-level
83 // page table, set by kernel page table initialization. The address points
84 // to a valid PageTable in identity-mapped kernel memory.
85 let l1_table = unsafe { &*(l2_entry.addr()?.as_u64() as *const PageTable) };
86
87 let l1_entry = l1_table[l1_index];
88 if !l1_entry.is_present() {
89 return None;
90 }
91
92 Some(l1_entry)
93}
94
95/// Extension trait for PageTableEntry to check user accessibility
96pub trait PageTableEntryExt {
97 fn is_user_accessible(&self) -> bool;
98}
99
100impl PageTableEntryExt for PageTableEntry {
101 fn is_user_accessible(&self) -> bool {
102 // Check user bit (bit 2) in flags
103 self.flags().contains(PageFlags::USER)
104 }
105}