⚠️ VeridianOS Kernel Documentation - This is low-level kernel code. All functions are unsafe unless explicitly marked otherwise. no_std

veridian_kernel/mm/
page_table.rs

1//! Page table management for virtual memory
2//!
3//! Provides page table structures and operations for 4-level paging
4
5// Page table management -- 4-level paging for all architectures
6#![allow(dead_code)]
7
8use core::{
9    marker::PhantomData,
10    ops::{Index, IndexMut},
11};
12
13use super::{FrameNumber, PageFlags, PhysicalAddress, VirtualAddress, FRAME_ALLOCATOR};
14use crate::error::KernelError;
15
16/// Number of entries in a page table
17pub const PAGE_TABLE_ENTRIES: usize = 512;
18
19/// Page table entry
20#[derive(Debug, Clone, Copy)]
21#[repr(transparent)]
22pub struct PageTableEntry {
23    entry: u64,
24}
25
26impl PageTableEntry {
27    /// Create an empty/unused entry
28    pub const fn empty() -> Self {
29        Self { entry: 0 }
30    }
31
32    /// Check if the entry is unused
33    pub const fn is_unused(&self) -> bool {
34        self.entry == 0
35    }
36
37    /// Check if the entry is present
38    pub const fn is_present(&self) -> bool {
39        self.entry & PageFlags::PRESENT.0 != 0
40    }
41
42    /// Get the physical frame this entry points to
43    pub fn frame(&self) -> Option<FrameNumber> {
44        if self.is_present() {
45            Some(FrameNumber::new((self.entry & 0x000FFFFF_FFFFF000) >> 12))
46        } else {
47            None
48        }
49    }
50
51    /// Get the address this entry points to
52    pub fn addr(&self) -> Option<PhysicalAddress> {
53        self.frame().map(|f| PhysicalAddress::new(f.as_u64() << 12))
54    }
55
56    /// Get flags for this entry
57    pub const fn flags(&self) -> PageFlags {
58        PageFlags(self.entry & 0xFFF)
59    }
60
61    /// Set this entry to map to a frame with given flags
62    pub fn set(&mut self, frame: FrameNumber, flags: PageFlags) {
63        self.entry = (frame.as_u64() << 12) | flags.0;
64    }
65
66    /// Set this entry to map to an address with given flags
67    pub fn set_addr(&mut self, addr: PhysicalAddress, flags: PageFlags) {
68        self.set(FrameNumber::new(addr.as_u64() >> 12), flags);
69    }
70
71    /// Clear this entry
72    pub fn clear(&mut self) {
73        self.entry = 0;
74    }
75}
76
77/// A page table with 512 entries
78#[repr(C, align(4096))]
79pub struct PageTable {
80    entries: [PageTableEntry; PAGE_TABLE_ENTRIES],
81}
82
83impl PageTable {
84    /// Create a new empty page table
85    pub const fn new() -> Self {
86        Self {
87            entries: [PageTableEntry::empty(); PAGE_TABLE_ENTRIES],
88        }
89    }
90
91    /// Clear all entries
92    pub fn zero(&mut self) {
93        for entry in &mut self.entries {
94            entry.clear();
95        }
96    }
97
98    /// Get an iterator over all entries
99    pub fn iter(&self) -> impl Iterator<Item = &PageTableEntry> {
100        self.entries.iter()
101    }
102
103    /// Get a mutable iterator over all entries
104    pub fn iter_mut(&mut self) -> impl Iterator<Item = &mut PageTableEntry> {
105        self.entries.iter_mut()
106    }
107}
108
109impl Default for PageTable {
110    fn default() -> Self {
111        Self::new()
112    }
113}
114
115impl Index<usize> for PageTable {
116    type Output = PageTableEntry;
117
118    fn index(&self, index: usize) -> &Self::Output {
119        &self.entries[index]
120    }
121}
122
123impl IndexMut<usize> for PageTable {
124    fn index_mut(&mut self, index: usize) -> &mut Self::Output {
125        &mut self.entries[index]
126    }
127}
128
129impl Index<PageTableIndex> for PageTable {
130    type Output = PageTableEntry;
131
132    fn index(&self, index: PageTableIndex) -> &Self::Output {
133        &self.entries[usize::from(index)]
134    }
135}
136
137impl IndexMut<PageTableIndex> for PageTable {
138    fn index_mut(&mut self, index: PageTableIndex) -> &mut Self::Output {
139        &mut self.entries[usize::from(index)]
140    }
141}
142
143/// An index into a page table
144#[derive(Debug, Clone, Copy, PartialEq, Eq)]
145pub struct PageTableIndex(u16);
146
147impl PageTableIndex {
148    /// Create a new index, panics if >= 512
149    pub fn new(index: u16) -> Self {
150        assert!(index < 512, "page table index out of bounds");
151        Self(index)
152    }
153
154    /// Create a new index, truncates if >= 512
155    pub const fn new_truncate(index: u16) -> Self {
156        Self(index & 0x1FF)
157    }
158}
159
160impl From<PageTableIndex> for usize {
161    fn from(index: PageTableIndex) -> Self {
162        index.0 as usize
163    }
164}
165
166impl From<u16> for PageTableIndex {
167    fn from(index: u16) -> Self {
168        Self::new(index)
169    }
170}
171
172impl From<usize> for PageTableIndex {
173    fn from(index: usize) -> Self {
174        assert!(index < 512);
175        Self(index as u16)
176    }
177}
178
179/// A 4-level page table hierarchy
180pub struct PageTableHierarchy {
181    /// Level 4 (PML4/PGD) table physical address
182    pub l4_table: PhysicalAddress,
183}
184
185impl PageTableHierarchy {
186    /// Create a new page table hierarchy
187    pub fn new() -> Result<Self, KernelError> {
188        let frame = FRAME_ALLOCATOR
189            .lock()
190            .allocate_frames(1, None)
191            .map_err(|_| KernelError::OutOfMemory {
192                requested: 1,
193                available: 0,
194            })?;
195        let l4_addr = PhysicalAddress::new(frame.as_u64() << 12);
196
197        // Zero the L4 table so all entries start as "not present".
198        // Without this, garbage data in the frame looks like valid page
199        // table entries, causing the mapper to follow bogus pointers.
200        // SAFETY: The virtual address is computed via phys_to_virt_addr
201        // which adds the bootloader's physical memory mapping offset.
202        unsafe {
203            let virt = super::phys_to_virt_addr(l4_addr.as_u64());
204            core::ptr::write_bytes(virt as *mut u8, 0, 4096);
205        }
206
207        Ok(Self { l4_table: l4_addr })
208    }
209
210    /// Get the L4 table address
211    pub const fn l4_addr(&self) -> PhysicalAddress {
212        self.l4_table
213    }
214
215    /// Create an empty page table hierarchy for unit tests.
216    ///
217    /// This avoids calling the frame allocator, which is unavailable
218    /// in the host test environment.
219    #[cfg(test)]
220    pub fn empty_for_test() -> Self {
221        Self {
222            l4_table: PhysicalAddress::new(0),
223        }
224    }
225}
226
227/// Virtual address breakdown for 4-level paging
228#[derive(Debug, Clone, Copy)]
229pub struct VirtualAddressBreakdown {
230    pub l4_index: PageTableIndex,
231    pub l3_index: PageTableIndex,
232    pub l2_index: PageTableIndex,
233    pub l1_index: PageTableIndex,
234    pub page_offset: u16,
235}
236
237impl VirtualAddressBreakdown {
238    /// Break down a virtual address into page table indices
239    pub fn new(addr: VirtualAddress) -> Self {
240        let addr = addr.as_u64();
241        Self {
242            l4_index: PageTableIndex::new_truncate((addr >> 39) as u16),
243            l3_index: PageTableIndex::new_truncate((addr >> 30) as u16),
244            l2_index: PageTableIndex::new_truncate((addr >> 21) as u16),
245            l1_index: PageTableIndex::new_truncate((addr >> 12) as u16),
246            page_offset: (addr & 0xFFF) as u16,
247        }
248    }
249}
250
251/// Active page table (architecture-specific)
252pub struct ActivePageTable {
253    l4_table: PhysicalAddress,
254    _phantom: PhantomData<PageTable>,
255}
256
257impl ActivePageTable {
258    /// Create from the current active page table
259    #[cfg(target_arch = "x86_64")]
260    pub fn current() -> Self {
261        use crate::arch::x86_64::mmu;
262        Self {
263            l4_table: mmu::read_cr3(),
264            _phantom: PhantomData,
265        }
266    }
267
268    #[cfg(target_arch = "aarch64")]
269    pub fn current() -> Self {
270        let ttbr0: u64;
271        // SAFETY: Reading TTBR0_EL1 (Translation Table Base Register 0) is a
272        // read-only operation on a system register that is always accessible at
273        // EL1 (kernel mode). It has no side effects beyond returning the current
274        // page table base address. The kernel always runs at EL1 when this is
275        // called, so the register access is valid.
276        unsafe {
277            core::arch::asm!("mrs {}, ttbr0_el1", out(reg) ttbr0);
278        }
279        Self {
280            l4_table: PhysicalAddress::new(ttbr0 & 0x0000_FFFF_FFFF_F000),
281            _phantom: PhantomData,
282        }
283    }
284
285    #[cfg(target_arch = "riscv64")]
286    pub fn current() -> Self {
287        let satp: u64;
288        // SAFETY: Reading the SATP (Supervisor Address Translation and Protection)
289        // CSR is a read-only operation on a control/status register that is always
290        // accessible in supervisor mode. It returns the current page table
291        // configuration with no side effects. The kernel runs in S-mode when this
292        // is called.
293        unsafe {
294            core::arch::asm!("csrr {}, satp", out(reg) satp);
295        }
296        let ppn = satp & 0x0FFF_FFFF_FFFF;
297        Self {
298            l4_table: PhysicalAddress::new(ppn << 12),
299            _phantom: PhantomData,
300        }
301    }
302
303    /// Switch to this page table
304    pub fn make_active(&self) {
305        #[cfg(target_arch = "x86_64")]
306        {
307            use crate::arch::x86_64::mmu;
308            mmu::write_cr3(self.l4_table);
309        }
310
311        #[cfg(target_arch = "aarch64")]
312        {
313            // SAFETY: Writing TTBR0_EL1 switches the active page table for EL0/EL1
314            // translations. `self.l4_table` must contain a valid physical address of
315            // a properly constructed page table hierarchy. The ISB ensures the
316            // pipeline is flushed so subsequent instructions use the new translation
317            // tables. This is only called from kernel context (EL1) where TTBR0_EL1
318            // is writable. The caller is responsible for ensuring the new page table
319            // maps all memory the kernel needs to continue executing.
320            unsafe {
321                core::arch::asm!("msr ttbr0_el1, {}", in(reg) self.l4_table.as_u64());
322                core::arch::asm!("isb");
323            }
324        }
325
326        #[cfg(target_arch = "riscv64")]
327        {
328            let satp = (8 << 60) | (self.l4_table.as_u64() >> 12); // Mode 8 = Sv48
329                                                                   // SAFETY: Writing the SATP CSR switches the active page table in S-mode.
330                                                                   // Mode 8 selects Sv48 (4-level paging). `self.l4_table` must contain a
331                                                                   // valid physical address of a root page table. The caller is responsible
332                                                                   // for ensuring the new page table maps all memory the kernel needs to
333                                                                   // continue executing. We are in S-mode so SATP is writable.
334            unsafe {
335                core::arch::asm!("csrw satp, {}", in(reg) satp);
336            }
337        }
338    }
339
340    /// Get the physical address of the L4 table
341    pub const fn l4_phys(&self) -> PhysicalAddress {
342        self.l4_table
343    }
344}
345
346/// Page mapper for modifying page tables
347pub struct PageMapper {
348    l4_table: *mut PageTable,
349    /// Recursive mapping index (typically 510 on x86_64)
350    recursive_index: Option<PageTableIndex>,
351}
352
353impl PageMapper {
354    /// Create a new page mapper (unsafe: requires valid mapped L4 table)
355    ///
356    /// # Safety
357    ///
358    /// The l4_table pointer must:
359    /// - Point to a valid, mapped page table
360    /// - Remain valid for the lifetime of the PageMapper
361    /// - Not be accessed through any other means while this exists
362    pub unsafe fn new(l4_table: *mut PageTable) -> Self {
363        Self {
364            l4_table,
365            recursive_index: None,
366        }
367    }
368
369    /// Create a new page mapper with recursive mapping
370    ///
371    /// # Safety
372    ///
373    /// Same requirements as `new`, plus:
374    /// - The recursive_index must be set up for recursive mapping
375    pub unsafe fn new_with_recursive(
376        l4_table: *mut PageTable,
377        recursive_index: PageTableIndex,
378    ) -> Self {
379        Self {
380            l4_table,
381            recursive_index: Some(recursive_index),
382        }
383    }
384
385    /// Map a page to a frame
386    pub fn map_page(
387        &mut self,
388        page: VirtualAddress,
389        frame: FrameNumber,
390        flags: PageFlags,
391        allocator: &mut impl FrameAllocator,
392    ) -> Result<(), KernelError> {
393        let breakdown = VirtualAddressBreakdown::new(page);
394
395        // Get L4 table
396        // SAFETY: `self.l4_table` was provided by the caller of `PageMapper::new`
397        // who guaranteed it points to a valid, mapped, 4096-byte-aligned page table
398        // that remains valid for the lifetime of this PageMapper. No other mutable
399        // references to this table exist (exclusive access contract from `new`).
400        let l4_table = unsafe { &mut *self.l4_table };
401        let l4_entry = &mut l4_table[breakdown.l4_index];
402
403        // Intermediate page table entries must include USER if the leaf
404        // mapping is user-accessible. x86_64 checks USER on all 4 levels.
405        let intermediate_flags = if flags.contains(PageFlags::USER) {
406            PageFlags::PRESENT | PageFlags::WRITABLE | PageFlags::USER
407        } else {
408            PageFlags::PRESENT | PageFlags::WRITABLE
409        };
410
411        // Get or create L3 table
412        if !l4_entry.is_present() {
413            let frame =
414                allocator
415                    .allocate_frames(1, None)
416                    .map_err(|_| KernelError::OutOfMemory {
417                        requested: 1,
418                        available: 0,
419                    })?;
420            // Zero the new page table frame before use
421            // SAFETY: phys_to_virt_addr converts the physical frame address
422            // to a valid virtual address in the kernel's memory mapping.
423            unsafe {
424                let virt = super::phys_to_virt_addr(frame.as_u64() << 12);
425                core::ptr::write_bytes(virt as *mut u8, 0, 4096);
426            }
427            l4_entry.set(frame, intermediate_flags);
428        } else if flags.contains(PageFlags::USER) && !l4_entry.flags().contains(PageFlags::USER) {
429            // Existing entry needs USER bit added for user-space child mapping
430            let current_flags = l4_entry.flags();
431            let addr = l4_entry.addr().unwrap();
432            l4_entry.set_addr(addr, current_flags | PageFlags::USER);
433        }
434        // The entry was just set to PRESENT (either already was, or we set it above),
435        // so `addr()` is guaranteed to return `Some`.
436        let l3_phys = l4_entry.addr().ok_or(KernelError::InvalidState {
437            expected: "L4 entry present",
438            actual: "not present",
439        })?;
440        // SAFETY: phys_to_virt_addr converts the physical address to the
441        // corresponding virtual address. The frame is either pre-existing
442        // (valid page table) or freshly allocated and zeroed above.
443        let l3_table =
444            unsafe { &mut *(super::phys_to_virt_addr(l3_phys.as_u64()) as *mut PageTable) };
445        let l3_entry = &mut l3_table[breakdown.l3_index];
446
447        // Get or create L2 table
448        if !l3_entry.is_present() {
449            let frame =
450                allocator
451                    .allocate_frames(1, None)
452                    .map_err(|_| KernelError::OutOfMemory {
453                        requested: 1,
454                        available: 0,
455                    })?;
456            // Zero the new page table frame before use
457            // SAFETY: Physical address converted to virtual via identity-mapped region;
458            // frame is freshly allocated and exclusively owned.
459            unsafe {
460                let virt = super::phys_to_virt_addr(frame.as_u64() << 12);
461                core::ptr::write_bytes(virt as *mut u8, 0, 4096);
462            }
463            l3_entry.set(frame, intermediate_flags);
464        } else if flags.contains(PageFlags::USER) && !l3_entry.flags().contains(PageFlags::USER) {
465            let current_flags = l3_entry.flags();
466            let addr = l3_entry.addr().unwrap();
467            l3_entry.set_addr(addr, current_flags | PageFlags::USER);
468        }
469        let l2_phys = l3_entry.addr().ok_or(KernelError::InvalidState {
470            expected: "L3 entry present",
471            actual: "not present",
472        })?;
473        // SAFETY: Physical address from a present L3 entry, converted to virtual via
474        // identity-mapped region.
475        let l2_table =
476            unsafe { &mut *(super::phys_to_virt_addr(l2_phys.as_u64()) as *mut PageTable) };
477        let l2_entry = &mut l2_table[breakdown.l2_index];
478
479        // Get or create L1 table
480        if !l2_entry.is_present() {
481            let frame =
482                allocator
483                    .allocate_frames(1, None)
484                    .map_err(|_| KernelError::OutOfMemory {
485                        requested: 1,
486                        available: 0,
487                    })?;
488            // Zero the new page table frame before use
489            // SAFETY: Physical address converted to virtual via identity-mapped region;
490            // frame is freshly allocated and exclusively owned.
491            unsafe {
492                let virt = super::phys_to_virt_addr(frame.as_u64() << 12);
493                core::ptr::write_bytes(virt as *mut u8, 0, 4096);
494            }
495            l2_entry.set(frame, intermediate_flags);
496        } else if flags.contains(PageFlags::USER) && !l2_entry.flags().contains(PageFlags::USER) {
497            let current_flags = l2_entry.flags();
498            let addr = l2_entry.addr().unwrap();
499            l2_entry.set_addr(addr, current_flags | PageFlags::USER);
500        }
501        let l1_phys = l2_entry.addr().ok_or(KernelError::InvalidState {
502            expected: "L2 entry present",
503            actual: "not present",
504        })?;
505        // SAFETY: Physical address from a present L2 entry, converted to virtual via
506        // identity-mapped region.
507        let l1_table =
508            unsafe { &mut *(super::phys_to_virt_addr(l1_phys.as_u64()) as *mut PageTable) };
509
510        // Map the page
511        let entry = &mut l1_table[breakdown.l1_index];
512        if entry.is_present() {
513            return Err(KernelError::AlreadyExists {
514                resource: "page mapping",
515                id: page.as_u64(),
516            });
517        }
518        entry.set(frame, flags | PageFlags::PRESENT);
519
520        Ok(())
521    }
522
523    /// Look up the physical frame mapped at a virtual address.
524    ///
525    /// Walks the page table hierarchy and returns the frame number and flags
526    /// of the L1 (leaf) entry if the page is present at all four levels.
527    pub fn translate_page(
528        &self,
529        page: VirtualAddress,
530    ) -> Result<(FrameNumber, PageFlags), KernelError> {
531        let breakdown = VirtualAddressBreakdown::new(page);
532
533        // SAFETY: Same invariants as other PageMapper methods — self.l4_table
534        // was validated by the caller of PageMapper::new.
535        let l4_table = unsafe { &*self.l4_table };
536        let l4_entry = &l4_table[breakdown.l4_index];
537        if !l4_entry.is_present() {
538            return Err(KernelError::UnmappedMemory {
539                addr: page.as_u64() as usize,
540            });
541        }
542
543        let l3_phys = l4_entry.addr().ok_or(KernelError::InvalidState {
544            expected: "L4 entry has address",
545            actual: "no address",
546        })?;
547        // SAFETY: Physical address from a present L4 entry, converted to virtual via
548        // identity-mapped region.
549        let l3_table =
550            unsafe { &*(super::phys_to_virt_addr(l3_phys.as_u64()) as *const PageTable) };
551        let l3_entry = &l3_table[breakdown.l3_index];
552        if !l3_entry.is_present() {
553            return Err(KernelError::UnmappedMemory {
554                addr: page.as_u64() as usize,
555            });
556        }
557
558        let l2_phys = l3_entry.addr().ok_or(KernelError::InvalidState {
559            expected: "L3 entry has address",
560            actual: "no address",
561        })?;
562        // SAFETY: Physical address from a present L3 entry, converted to virtual via
563        // identity-mapped region.
564        let l2_table =
565            unsafe { &*(super::phys_to_virt_addr(l2_phys.as_u64()) as *const PageTable) };
566        let l2_entry = &l2_table[breakdown.l2_index];
567        if !l2_entry.is_present() {
568            return Err(KernelError::UnmappedMemory {
569                addr: page.as_u64() as usize,
570            });
571        }
572
573        let l1_phys = l2_entry.addr().ok_or(KernelError::InvalidState {
574            expected: "L2 entry has address",
575            actual: "no address",
576        })?;
577        // SAFETY: Physical address from a present L2 entry, converted to virtual via
578        // identity-mapped region.
579        let l1_table =
580            unsafe { &*(super::phys_to_virt_addr(l1_phys.as_u64()) as *const PageTable) };
581        let entry = &l1_table[breakdown.l1_index];
582
583        let frame = entry.frame().ok_or(KernelError::UnmappedMemory {
584            addr: page.as_u64() as usize,
585        })?;
586
587        Ok((frame, entry.flags()))
588    }
589
590    /// Update the flags on an existing page table entry without changing the
591    /// mapped frame.
592    ///
593    /// Returns the old flags on success.
594    pub fn update_page_flags(
595        &mut self,
596        page: VirtualAddress,
597        new_flags: PageFlags,
598    ) -> Result<PageFlags, KernelError> {
599        let breakdown = VirtualAddressBreakdown::new(page);
600
601        // SAFETY: Same invariants as other PageMapper methods.
602        let l4_table = unsafe { &*self.l4_table };
603        let l4_entry = &l4_table[breakdown.l4_index];
604        if !l4_entry.is_present() {
605            return Err(KernelError::UnmappedMemory {
606                addr: page.as_u64() as usize,
607            });
608        }
609
610        let l3_phys = l3_phys_from_entry(l4_entry, page)?;
611        // SAFETY: Physical address from a present L4 entry, converted to virtual via
612        // identity-mapped region.
613        let l3_table = unsafe { &*(super::phys_to_virt_addr(l3_phys as u64) as *const PageTable) };
614        let l3_entry = &l3_table[breakdown.l3_index];
615        if !l3_entry.is_present() {
616            return Err(KernelError::UnmappedMemory {
617                addr: page.as_u64() as usize,
618            });
619        }
620
621        let l2_phys = l3_phys_from_entry(l3_entry, page)?;
622        // SAFETY: Physical address from a present L3 entry, converted to virtual via
623        // identity-mapped region.
624        let l2_table = unsafe { &*(super::phys_to_virt_addr(l2_phys as u64) as *const PageTable) };
625        let l2_entry = &l2_table[breakdown.l2_index];
626        if !l2_entry.is_present() {
627            return Err(KernelError::UnmappedMemory {
628                addr: page.as_u64() as usize,
629            });
630        }
631
632        let l1_phys = l3_phys_from_entry(l2_entry, page)?;
633        // SAFETY: Physical address from a present L2 entry, converted to virtual via
634        // identity-mapped region.
635        let l1_table =
636            unsafe { &mut *(super::phys_to_virt_addr(l1_phys as u64) as *mut PageTable) };
637        let entry = &mut l1_table[breakdown.l1_index];
638
639        let frame = entry.frame().ok_or(KernelError::UnmappedMemory {
640            addr: page.as_u64() as usize,
641        })?;
642
643        let old_flags = entry.flags();
644        entry.set(frame, new_flags | PageFlags::PRESENT);
645        Ok(old_flags)
646    }
647
648    /// Unmap a page
649    pub fn unmap_page(&mut self, page: VirtualAddress) -> Result<FrameNumber, KernelError> {
650        let breakdown = VirtualAddressBreakdown::new(page);
651
652        // Walk the page table hierarchy
653        // SAFETY: `self.l4_table` was validated by the caller of `PageMapper::new`
654        // to point to a valid, mapped page table. Exclusive access is guaranteed by
655        // the PageMapper ownership contract.
656        let l4_table = unsafe { &mut *self.l4_table };
657        let l4_entry = &l4_table[breakdown.l4_index];
658        if !l4_entry.is_present() {
659            return Err(KernelError::UnmappedMemory {
660                addr: page.as_u64() as usize,
661            });
662        }
663
664        // `is_present()` returned true, so `addr()` is guaranteed to return `Some`.
665        let l3_phys = l4_entry.addr().ok_or(KernelError::InvalidState {
666            expected: "L4 entry has address",
667            actual: "no address",
668        })?;
669        // SAFETY: Physical address from a present L4 entry, converted to virtual via
670        // identity-mapped region.
671        let l3_table =
672            unsafe { &mut *(super::phys_to_virt_addr(l3_phys.as_u64()) as *mut PageTable) };
673        let l3_entry = &l3_table[breakdown.l3_index];
674        if !l3_entry.is_present() {
675            return Err(KernelError::UnmappedMemory {
676                addr: page.as_u64() as usize,
677            });
678        }
679
680        let l2_phys = l3_entry.addr().ok_or(KernelError::InvalidState {
681            expected: "L3 entry has address",
682            actual: "no address",
683        })?;
684        // SAFETY: Physical address from a present L3 entry, converted to virtual via
685        // identity-mapped region.
686        let l2_table =
687            unsafe { &mut *(super::phys_to_virt_addr(l2_phys.as_u64()) as *mut PageTable) };
688        let l2_entry = &l2_table[breakdown.l2_index];
689        if !l2_entry.is_present() {
690            return Err(KernelError::UnmappedMemory {
691                addr: page.as_u64() as usize,
692            });
693        }
694
695        let l1_phys = l2_entry.addr().ok_or(KernelError::InvalidState {
696            expected: "L2 entry has address",
697            actual: "no address",
698        })?;
699        // SAFETY: Physical address from a present L2 entry, converted to virtual via
700        // identity-mapped region.
701        let l1_table =
702            unsafe { &mut *(super::phys_to_virt_addr(l1_phys.as_u64()) as *mut PageTable) };
703
704        // Unmap the page
705        let entry = &mut l1_table[breakdown.l1_index];
706        let frame = entry.frame().ok_or(KernelError::UnmappedMemory {
707            addr: page.as_u64() as usize,
708        })?;
709        entry.clear();
710
711        // Flush the TLB entry for the unmapped page to ensure stale translations
712        // are not used. Each architecture has its own invalidation instruction.
713        #[cfg(target_arch = "x86_64")]
714        crate::arch::x86_64::mmu::flush_tlb_address(page.as_u64());
715
716        #[cfg(target_arch = "aarch64")]
717        {
718            // SAFETY: TLBI invalidates a single TLB entry for the given virtual
719            // address. This is a non-destructive, privileged operation.
720            unsafe {
721                core::arch::asm!(
722                    "tlbi vaae1is, {0}",
723                    "dsb ish",
724                    "isb",
725                    in(reg) (page.as_u64() >> 12),
726                );
727            }
728        }
729
730        #[cfg(target_arch = "riscv64")]
731        {
732            // SAFETY: sfence.vma invalidates TLB entries for the given virtual
733            // address. This is a non-destructive, privileged operation.
734            unsafe {
735                core::arch::asm!(
736                    "sfence.vma {0}, zero",
737                    in(reg) page.as_u64(),
738                );
739            }
740        }
741
742        Ok(frame)
743    }
744}
745
746/// Extract a physical address from a present page table entry.
747/// Returns the address as a raw u64 suitable for casting to a pointer.
748fn l3_phys_from_entry(entry: &PageTableEntry, page: VirtualAddress) -> Result<u64, KernelError> {
749    entry
750        .addr()
751        .map(|a| a.as_u64())
752        .ok_or(KernelError::UnmappedMemory {
753            addr: page.as_u64() as usize,
754        })
755}
756
757/// Frame allocator trait for page mapper
758pub trait FrameAllocator {
759    /// Allocate frames
760    fn allocate_frames(
761        &mut self,
762        count: usize,
763        numa_node: Option<usize>,
764    ) -> Result<FrameNumber, super::FrameAllocatorError>;
765}