1#![allow(dead_code)]
7
8use core::{
9 marker::PhantomData,
10 ops::{Index, IndexMut},
11};
12
13use super::{FrameNumber, PageFlags, PhysicalAddress, VirtualAddress, FRAME_ALLOCATOR};
14use crate::error::KernelError;
15
16pub const PAGE_TABLE_ENTRIES: usize = 512;
18
19#[derive(Debug, Clone, Copy)]
21#[repr(transparent)]
22pub struct PageTableEntry {
23 entry: u64,
24}
25
26impl PageTableEntry {
27 pub const fn empty() -> Self {
29 Self { entry: 0 }
30 }
31
32 pub const fn is_unused(&self) -> bool {
34 self.entry == 0
35 }
36
37 pub const fn is_present(&self) -> bool {
39 self.entry & PageFlags::PRESENT.0 != 0
40 }
41
42 pub fn frame(&self) -> Option<FrameNumber> {
44 if self.is_present() {
45 Some(FrameNumber::new((self.entry & 0x000FFFFF_FFFFF000) >> 12))
46 } else {
47 None
48 }
49 }
50
51 pub fn addr(&self) -> Option<PhysicalAddress> {
53 self.frame().map(|f| PhysicalAddress::new(f.as_u64() << 12))
54 }
55
56 pub const fn flags(&self) -> PageFlags {
58 PageFlags(self.entry & 0xFFF)
59 }
60
61 pub fn set(&mut self, frame: FrameNumber, flags: PageFlags) {
63 self.entry = (frame.as_u64() << 12) | flags.0;
64 }
65
66 pub fn set_addr(&mut self, addr: PhysicalAddress, flags: PageFlags) {
68 self.set(FrameNumber::new(addr.as_u64() >> 12), flags);
69 }
70
71 pub fn clear(&mut self) {
73 self.entry = 0;
74 }
75}
76
77#[repr(C, align(4096))]
79pub struct PageTable {
80 entries: [PageTableEntry; PAGE_TABLE_ENTRIES],
81}
82
83impl PageTable {
84 pub const fn new() -> Self {
86 Self {
87 entries: [PageTableEntry::empty(); PAGE_TABLE_ENTRIES],
88 }
89 }
90
91 pub fn zero(&mut self) {
93 for entry in &mut self.entries {
94 entry.clear();
95 }
96 }
97
98 pub fn iter(&self) -> impl Iterator<Item = &PageTableEntry> {
100 self.entries.iter()
101 }
102
103 pub fn iter_mut(&mut self) -> impl Iterator<Item = &mut PageTableEntry> {
105 self.entries.iter_mut()
106 }
107}
108
109impl Default for PageTable {
110 fn default() -> Self {
111 Self::new()
112 }
113}
114
115impl Index<usize> for PageTable {
116 type Output = PageTableEntry;
117
118 fn index(&self, index: usize) -> &Self::Output {
119 &self.entries[index]
120 }
121}
122
123impl IndexMut<usize> for PageTable {
124 fn index_mut(&mut self, index: usize) -> &mut Self::Output {
125 &mut self.entries[index]
126 }
127}
128
129impl Index<PageTableIndex> for PageTable {
130 type Output = PageTableEntry;
131
132 fn index(&self, index: PageTableIndex) -> &Self::Output {
133 &self.entries[usize::from(index)]
134 }
135}
136
137impl IndexMut<PageTableIndex> for PageTable {
138 fn index_mut(&mut self, index: PageTableIndex) -> &mut Self::Output {
139 &mut self.entries[usize::from(index)]
140 }
141}
142
143#[derive(Debug, Clone, Copy, PartialEq, Eq)]
145pub struct PageTableIndex(u16);
146
147impl PageTableIndex {
148 pub fn new(index: u16) -> Self {
150 assert!(index < 512, "page table index out of bounds");
151 Self(index)
152 }
153
154 pub const fn new_truncate(index: u16) -> Self {
156 Self(index & 0x1FF)
157 }
158}
159
160impl From<PageTableIndex> for usize {
161 fn from(index: PageTableIndex) -> Self {
162 index.0 as usize
163 }
164}
165
166impl From<u16> for PageTableIndex {
167 fn from(index: u16) -> Self {
168 Self::new(index)
169 }
170}
171
172impl From<usize> for PageTableIndex {
173 fn from(index: usize) -> Self {
174 assert!(index < 512);
175 Self(index as u16)
176 }
177}
178
179pub struct PageTableHierarchy {
181 pub l4_table: PhysicalAddress,
183}
184
185impl PageTableHierarchy {
186 pub fn new() -> Result<Self, KernelError> {
188 let frame = FRAME_ALLOCATOR
189 .lock()
190 .allocate_frames(1, None)
191 .map_err(|_| KernelError::OutOfMemory {
192 requested: 1,
193 available: 0,
194 })?;
195 let l4_addr = PhysicalAddress::new(frame.as_u64() << 12);
196
197 unsafe {
203 let virt = super::phys_to_virt_addr(l4_addr.as_u64());
204 core::ptr::write_bytes(virt as *mut u8, 0, 4096);
205 }
206
207 Ok(Self { l4_table: l4_addr })
208 }
209
210 pub const fn l4_addr(&self) -> PhysicalAddress {
212 self.l4_table
213 }
214
215 #[cfg(test)]
220 pub fn empty_for_test() -> Self {
221 Self {
222 l4_table: PhysicalAddress::new(0),
223 }
224 }
225}
226
227#[derive(Debug, Clone, Copy)]
229pub struct VirtualAddressBreakdown {
230 pub l4_index: PageTableIndex,
231 pub l3_index: PageTableIndex,
232 pub l2_index: PageTableIndex,
233 pub l1_index: PageTableIndex,
234 pub page_offset: u16,
235}
236
237impl VirtualAddressBreakdown {
238 pub fn new(addr: VirtualAddress) -> Self {
240 let addr = addr.as_u64();
241 Self {
242 l4_index: PageTableIndex::new_truncate((addr >> 39) as u16),
243 l3_index: PageTableIndex::new_truncate((addr >> 30) as u16),
244 l2_index: PageTableIndex::new_truncate((addr >> 21) as u16),
245 l1_index: PageTableIndex::new_truncate((addr >> 12) as u16),
246 page_offset: (addr & 0xFFF) as u16,
247 }
248 }
249}
250
251pub struct ActivePageTable {
253 l4_table: PhysicalAddress,
254 _phantom: PhantomData<PageTable>,
255}
256
257impl ActivePageTable {
258 #[cfg(target_arch = "x86_64")]
260 pub fn current() -> Self {
261 use crate::arch::x86_64::mmu;
262 Self {
263 l4_table: mmu::read_cr3(),
264 _phantom: PhantomData,
265 }
266 }
267
268 #[cfg(target_arch = "aarch64")]
269 pub fn current() -> Self {
270 let ttbr0: u64;
271 unsafe {
277 core::arch::asm!("mrs {}, ttbr0_el1", out(reg) ttbr0);
278 }
279 Self {
280 l4_table: PhysicalAddress::new(ttbr0 & 0x0000_FFFF_FFFF_F000),
281 _phantom: PhantomData,
282 }
283 }
284
285 #[cfg(target_arch = "riscv64")]
286 pub fn current() -> Self {
287 let satp: u64;
288 unsafe {
294 core::arch::asm!("csrr {}, satp", out(reg) satp);
295 }
296 let ppn = satp & 0x0FFF_FFFF_FFFF;
297 Self {
298 l4_table: PhysicalAddress::new(ppn << 12),
299 _phantom: PhantomData,
300 }
301 }
302
303 pub fn make_active(&self) {
305 #[cfg(target_arch = "x86_64")]
306 {
307 use crate::arch::x86_64::mmu;
308 mmu::write_cr3(self.l4_table);
309 }
310
311 #[cfg(target_arch = "aarch64")]
312 {
313 unsafe {
321 core::arch::asm!("msr ttbr0_el1, {}", in(reg) self.l4_table.as_u64());
322 core::arch::asm!("isb");
323 }
324 }
325
326 #[cfg(target_arch = "riscv64")]
327 {
328 let satp = (8 << 60) | (self.l4_table.as_u64() >> 12); unsafe {
335 core::arch::asm!("csrw satp, {}", in(reg) satp);
336 }
337 }
338 }
339
340 pub const fn l4_phys(&self) -> PhysicalAddress {
342 self.l4_table
343 }
344}
345
346pub struct PageMapper {
348 l4_table: *mut PageTable,
349 recursive_index: Option<PageTableIndex>,
351}
352
353impl PageMapper {
354 pub unsafe fn new(l4_table: *mut PageTable) -> Self {
363 Self {
364 l4_table,
365 recursive_index: None,
366 }
367 }
368
369 pub unsafe fn new_with_recursive(
376 l4_table: *mut PageTable,
377 recursive_index: PageTableIndex,
378 ) -> Self {
379 Self {
380 l4_table,
381 recursive_index: Some(recursive_index),
382 }
383 }
384
385 pub fn map_page(
387 &mut self,
388 page: VirtualAddress,
389 frame: FrameNumber,
390 flags: PageFlags,
391 allocator: &mut impl FrameAllocator,
392 ) -> Result<(), KernelError> {
393 let breakdown = VirtualAddressBreakdown::new(page);
394
395 let l4_table = unsafe { &mut *self.l4_table };
401 let l4_entry = &mut l4_table[breakdown.l4_index];
402
403 let intermediate_flags = if flags.contains(PageFlags::USER) {
406 PageFlags::PRESENT | PageFlags::WRITABLE | PageFlags::USER
407 } else {
408 PageFlags::PRESENT | PageFlags::WRITABLE
409 };
410
411 if !l4_entry.is_present() {
413 let frame =
414 allocator
415 .allocate_frames(1, None)
416 .map_err(|_| KernelError::OutOfMemory {
417 requested: 1,
418 available: 0,
419 })?;
420 unsafe {
424 let virt = super::phys_to_virt_addr(frame.as_u64() << 12);
425 core::ptr::write_bytes(virt as *mut u8, 0, 4096);
426 }
427 l4_entry.set(frame, intermediate_flags);
428 } else if flags.contains(PageFlags::USER) && !l4_entry.flags().contains(PageFlags::USER) {
429 let current_flags = l4_entry.flags();
431 let addr = l4_entry.addr().unwrap();
432 l4_entry.set_addr(addr, current_flags | PageFlags::USER);
433 }
434 let l3_phys = l4_entry.addr().ok_or(KernelError::InvalidState {
437 expected: "L4 entry present",
438 actual: "not present",
439 })?;
440 let l3_table =
444 unsafe { &mut *(super::phys_to_virt_addr(l3_phys.as_u64()) as *mut PageTable) };
445 let l3_entry = &mut l3_table[breakdown.l3_index];
446
447 if !l3_entry.is_present() {
449 let frame =
450 allocator
451 .allocate_frames(1, None)
452 .map_err(|_| KernelError::OutOfMemory {
453 requested: 1,
454 available: 0,
455 })?;
456 unsafe {
460 let virt = super::phys_to_virt_addr(frame.as_u64() << 12);
461 core::ptr::write_bytes(virt as *mut u8, 0, 4096);
462 }
463 l3_entry.set(frame, intermediate_flags);
464 } else if flags.contains(PageFlags::USER) && !l3_entry.flags().contains(PageFlags::USER) {
465 let current_flags = l3_entry.flags();
466 let addr = l3_entry.addr().unwrap();
467 l3_entry.set_addr(addr, current_flags | PageFlags::USER);
468 }
469 let l2_phys = l3_entry.addr().ok_or(KernelError::InvalidState {
470 expected: "L3 entry present",
471 actual: "not present",
472 })?;
473 let l2_table =
476 unsafe { &mut *(super::phys_to_virt_addr(l2_phys.as_u64()) as *mut PageTable) };
477 let l2_entry = &mut l2_table[breakdown.l2_index];
478
479 if !l2_entry.is_present() {
481 let frame =
482 allocator
483 .allocate_frames(1, None)
484 .map_err(|_| KernelError::OutOfMemory {
485 requested: 1,
486 available: 0,
487 })?;
488 unsafe {
492 let virt = super::phys_to_virt_addr(frame.as_u64() << 12);
493 core::ptr::write_bytes(virt as *mut u8, 0, 4096);
494 }
495 l2_entry.set(frame, intermediate_flags);
496 } else if flags.contains(PageFlags::USER) && !l2_entry.flags().contains(PageFlags::USER) {
497 let current_flags = l2_entry.flags();
498 let addr = l2_entry.addr().unwrap();
499 l2_entry.set_addr(addr, current_flags | PageFlags::USER);
500 }
501 let l1_phys = l2_entry.addr().ok_or(KernelError::InvalidState {
502 expected: "L2 entry present",
503 actual: "not present",
504 })?;
505 let l1_table =
508 unsafe { &mut *(super::phys_to_virt_addr(l1_phys.as_u64()) as *mut PageTable) };
509
510 let entry = &mut l1_table[breakdown.l1_index];
512 if entry.is_present() {
513 return Err(KernelError::AlreadyExists {
514 resource: "page mapping",
515 id: page.as_u64(),
516 });
517 }
518 entry.set(frame, flags | PageFlags::PRESENT);
519
520 Ok(())
521 }
522
523 pub fn translate_page(
528 &self,
529 page: VirtualAddress,
530 ) -> Result<(FrameNumber, PageFlags), KernelError> {
531 let breakdown = VirtualAddressBreakdown::new(page);
532
533 let l4_table = unsafe { &*self.l4_table };
536 let l4_entry = &l4_table[breakdown.l4_index];
537 if !l4_entry.is_present() {
538 return Err(KernelError::UnmappedMemory {
539 addr: page.as_u64() as usize,
540 });
541 }
542
543 let l3_phys = l4_entry.addr().ok_or(KernelError::InvalidState {
544 expected: "L4 entry has address",
545 actual: "no address",
546 })?;
547 let l3_table =
550 unsafe { &*(super::phys_to_virt_addr(l3_phys.as_u64()) as *const PageTable) };
551 let l3_entry = &l3_table[breakdown.l3_index];
552 if !l3_entry.is_present() {
553 return Err(KernelError::UnmappedMemory {
554 addr: page.as_u64() as usize,
555 });
556 }
557
558 let l2_phys = l3_entry.addr().ok_or(KernelError::InvalidState {
559 expected: "L3 entry has address",
560 actual: "no address",
561 })?;
562 let l2_table =
565 unsafe { &*(super::phys_to_virt_addr(l2_phys.as_u64()) as *const PageTable) };
566 let l2_entry = &l2_table[breakdown.l2_index];
567 if !l2_entry.is_present() {
568 return Err(KernelError::UnmappedMemory {
569 addr: page.as_u64() as usize,
570 });
571 }
572
573 let l1_phys = l2_entry.addr().ok_or(KernelError::InvalidState {
574 expected: "L2 entry has address",
575 actual: "no address",
576 })?;
577 let l1_table =
580 unsafe { &*(super::phys_to_virt_addr(l1_phys.as_u64()) as *const PageTable) };
581 let entry = &l1_table[breakdown.l1_index];
582
583 let frame = entry.frame().ok_or(KernelError::UnmappedMemory {
584 addr: page.as_u64() as usize,
585 })?;
586
587 Ok((frame, entry.flags()))
588 }
589
590 pub fn update_page_flags(
595 &mut self,
596 page: VirtualAddress,
597 new_flags: PageFlags,
598 ) -> Result<PageFlags, KernelError> {
599 let breakdown = VirtualAddressBreakdown::new(page);
600
601 let l4_table = unsafe { &*self.l4_table };
603 let l4_entry = &l4_table[breakdown.l4_index];
604 if !l4_entry.is_present() {
605 return Err(KernelError::UnmappedMemory {
606 addr: page.as_u64() as usize,
607 });
608 }
609
610 let l3_phys = l3_phys_from_entry(l4_entry, page)?;
611 let l3_table = unsafe { &*(super::phys_to_virt_addr(l3_phys as u64) as *const PageTable) };
614 let l3_entry = &l3_table[breakdown.l3_index];
615 if !l3_entry.is_present() {
616 return Err(KernelError::UnmappedMemory {
617 addr: page.as_u64() as usize,
618 });
619 }
620
621 let l2_phys = l3_phys_from_entry(l3_entry, page)?;
622 let l2_table = unsafe { &*(super::phys_to_virt_addr(l2_phys as u64) as *const PageTable) };
625 let l2_entry = &l2_table[breakdown.l2_index];
626 if !l2_entry.is_present() {
627 return Err(KernelError::UnmappedMemory {
628 addr: page.as_u64() as usize,
629 });
630 }
631
632 let l1_phys = l3_phys_from_entry(l2_entry, page)?;
633 let l1_table =
636 unsafe { &mut *(super::phys_to_virt_addr(l1_phys as u64) as *mut PageTable) };
637 let entry = &mut l1_table[breakdown.l1_index];
638
639 let frame = entry.frame().ok_or(KernelError::UnmappedMemory {
640 addr: page.as_u64() as usize,
641 })?;
642
643 let old_flags = entry.flags();
644 entry.set(frame, new_flags | PageFlags::PRESENT);
645 Ok(old_flags)
646 }
647
648 pub fn unmap_page(&mut self, page: VirtualAddress) -> Result<FrameNumber, KernelError> {
650 let breakdown = VirtualAddressBreakdown::new(page);
651
652 let l4_table = unsafe { &mut *self.l4_table };
657 let l4_entry = &l4_table[breakdown.l4_index];
658 if !l4_entry.is_present() {
659 return Err(KernelError::UnmappedMemory {
660 addr: page.as_u64() as usize,
661 });
662 }
663
664 let l3_phys = l4_entry.addr().ok_or(KernelError::InvalidState {
666 expected: "L4 entry has address",
667 actual: "no address",
668 })?;
669 let l3_table =
672 unsafe { &mut *(super::phys_to_virt_addr(l3_phys.as_u64()) as *mut PageTable) };
673 let l3_entry = &l3_table[breakdown.l3_index];
674 if !l3_entry.is_present() {
675 return Err(KernelError::UnmappedMemory {
676 addr: page.as_u64() as usize,
677 });
678 }
679
680 let l2_phys = l3_entry.addr().ok_or(KernelError::InvalidState {
681 expected: "L3 entry has address",
682 actual: "no address",
683 })?;
684 let l2_table =
687 unsafe { &mut *(super::phys_to_virt_addr(l2_phys.as_u64()) as *mut PageTable) };
688 let l2_entry = &l2_table[breakdown.l2_index];
689 if !l2_entry.is_present() {
690 return Err(KernelError::UnmappedMemory {
691 addr: page.as_u64() as usize,
692 });
693 }
694
695 let l1_phys = l2_entry.addr().ok_or(KernelError::InvalidState {
696 expected: "L2 entry has address",
697 actual: "no address",
698 })?;
699 let l1_table =
702 unsafe { &mut *(super::phys_to_virt_addr(l1_phys.as_u64()) as *mut PageTable) };
703
704 let entry = &mut l1_table[breakdown.l1_index];
706 let frame = entry.frame().ok_or(KernelError::UnmappedMemory {
707 addr: page.as_u64() as usize,
708 })?;
709 entry.clear();
710
711 #[cfg(target_arch = "x86_64")]
714 crate::arch::x86_64::mmu::flush_tlb_address(page.as_u64());
715
716 #[cfg(target_arch = "aarch64")]
717 {
718 unsafe {
721 core::arch::asm!(
722 "tlbi vaae1is, {0}",
723 "dsb ish",
724 "isb",
725 in(reg) (page.as_u64() >> 12),
726 );
727 }
728 }
729
730 #[cfg(target_arch = "riscv64")]
731 {
732 unsafe {
735 core::arch::asm!(
736 "sfence.vma {0}, zero",
737 in(reg) page.as_u64(),
738 );
739 }
740 }
741
742 Ok(frame)
743 }
744}
745
746fn l3_phys_from_entry(entry: &PageTableEntry, page: VirtualAddress) -> Result<u64, KernelError> {
749 entry
750 .addr()
751 .map(|a| a.as_u64())
752 .ok_or(KernelError::UnmappedMemory {
753 addr: page.as_u64() as usize,
754 })
755}
756
757pub trait FrameAllocator {
759 fn allocate_frames(
761 &mut self,
762 count: usize,
763 numa_node: Option<usize>,
764 ) -> Result<FrameNumber, super::FrameAllocatorError>;
765}