veridian_kernel/arch/x86_64/mmu.rs
1//! x86_64 Memory Management Unit (MMU) support
2//!
3//! Handles x86_64-specific paging setup and management.
4
5// x86_64 MMU support
6
7use crate::mm::{PhysicalAddress, VirtualAddress};
8
9/// Enable paging and set up initial page tables
10pub fn init() {
11 println!("[x86_64 MMU] Initializing paging...");
12
13 // The bootloader should have already set up paging for us
14 // We just need to ensure our kernel is properly mapped
15
16 let cr3 = read_cr3();
17 println!("[x86_64 MMU] Current CR3: 0x{:x}", cr3.as_u64());
18
19 // The bootloader sets up initial page tables with kernel mapped at
20 // L4[256-511]. Each process gets its own L4 with the kernel entries
21 // copied from this root. Dedicated kernel page tables are not needed
22 // because the per-process page tables already include the kernel mapping.
23 // KPTI shadow page tables for Meltdown mitigation are in kpti.rs.
24}
25
26/// Read CR3 register (page table base)
27pub fn read_cr3() -> PhysicalAddress {
28 let cr3: u64;
29 // SAFETY: Reading CR3 is a privileged operation that returns the physical
30 // address of the current page table root. Always accessible in kernel mode.
31 unsafe {
32 core::arch::asm!("mov {}, cr3", out(reg) cr3);
33 }
34 PhysicalAddress::new(cr3 & 0x000FFFFF_FFFFF000)
35}
36
37/// Write CR3 register (page table base)
38pub fn write_cr3(addr: PhysicalAddress) {
39 // SAFETY: Writing CR3 sets the page table root and flushes the TLB. The
40 // caller must ensure `addr` points to a valid, properly aligned PML4 table.
41 unsafe {
42 core::arch::asm!("mov cr3, {}", in(reg) addr.as_u64());
43 }
44}
45
46/// Invalidate TLB entry for virtual address
47pub fn invlpg(virt: VirtualAddress) {
48 // SAFETY: invlpg invalidates the TLB entry for the specified virtual address.
49 // This is a privileged, non-destructive operation that only affects caching.
50 unsafe {
51 core::arch::asm!("invlpg [{}]", in(reg) virt.as_u64());
52 }
53}
54
55/// Flush entire TLB by reloading CR3
56pub fn flush_tlb() {
57 let cr3 = read_cr3();
58 write_cr3(cr3);
59}
60
61/// Flush TLB entry for a specific address
62pub fn flush_tlb_address(addr: u64) {
63 invlpg(VirtualAddress::new(addr));
64}
65
66/// Read CR2 register (page fault address)
67pub fn read_cr2() -> VirtualAddress {
68 let cr2: u64;
69 // SAFETY: Reading CR2 returns the faulting virtual address from the last
70 // page fault. Always accessible in kernel mode with no side effects.
71 unsafe {
72 core::arch::asm!("mov {}, cr2", out(reg) cr2);
73 }
74 VirtualAddress::new(cr2)
75}
76
77/// Page fault error code bits
78#[derive(Debug, Clone, Copy)]
79pub struct PageFaultErrorCode(u32);
80
81impl PageFaultErrorCode {
82 /// Was the fault caused by a page-level protection violation?
83 pub fn protection_violation(&self) -> bool {
84 self.0 & 0x1 != 0
85 }
86
87 /// Was the access a write?
88 pub fn write(&self) -> bool {
89 self.0 & 0x2 != 0
90 }
91
92 /// Was the access in user mode?
93 pub fn user_mode(&self) -> bool {
94 self.0 & 0x4 != 0
95 }
96
97 /// Was the fault caused by reserved bit violation?
98 pub fn reserved_write(&self) -> bool {
99 self.0 & 0x8 != 0
100 }
101
102 /// Was the fault caused by instruction fetch?
103 pub fn instruction_fetch(&self) -> bool {
104 self.0 & 0x10 != 0
105 }
106}
107
108/// Handle page fault
109pub fn handle_page_fault(error_code: u32, faulting_address: VirtualAddress) {
110 let error = PageFaultErrorCode(error_code);
111
112 println!(
113 "[x86_64 MMU] Page fault at 0x{:x}",
114 faulting_address.as_u64()
115 );
116 println!(" Protection violation: {}", error.protection_violation());
117 println!(" Write access: {}", error.write());
118 println!(" User mode: {}", error.user_mode());
119 println!(" Reserved bit: {}", error.reserved_write());
120 println!(" Instruction fetch: {}", error.instruction_fetch());
121
122 // Delegate to the unified page fault handler which attempts demand paging,
123 // copy-on-write, and stack growth before giving up.
124 let info = crate::mm::page_fault::from_x86_64(
125 error_code as u64,
126 faulting_address.as_u64(),
127 0, // RIP not available in this legacy path
128 );
129 if let Err(_e) = crate::mm::page_fault::handle_page_fault(info) {
130 panic!(
131 "Unhandled page fault at 0x{:x}: {}",
132 faulting_address.as_u64(),
133 _e
134 );
135 }
136}