1#![allow(dead_code)]
8
9pub mod bootloader;
10pub mod cache_aligned;
11pub mod cache_topology;
12pub mod demand_paging;
13pub mod frame_allocator;
14pub mod heap;
15pub mod ksm;
16pub mod page_fault;
17pub mod page_table;
18pub mod user_validation;
19pub mod vas;
20pub mod vmm;
21
22#[cfg(feature = "alloc")]
23extern crate alloc;
24#[cfg(feature = "alloc")]
25use alloc::vec::Vec;
26use core::sync::atomic::{AtomicU64, Ordering};
27
28pub(crate) use frame_allocator::FRAME_ALLOCATOR;
29pub use frame_allocator::{
31 FrameAllocatorError, FrameNumber, PhysicalAddress, PhysicalFrame, FRAME_SIZE,
32};
33pub use heap::init as init_heap;
34pub use user_validation::{is_user_addr_valid, translate_address as translate_user_address};
35pub use vas::VirtualAddressSpace;
36
37pub const PAGE_SIZE: usize = 4096;
39
40pub(crate) static PHYS_MEM_OFFSET: AtomicU64 = AtomicU64::new(0);
46
47pub fn set_phys_mem_offset(offset: u64) {
49 PHYS_MEM_OFFSET.store(offset, Ordering::Release);
50}
51
52#[inline]
57pub fn phys_to_virt_addr(phys: u64) -> u64 {
58 phys + PHYS_MEM_OFFSET.load(Ordering::Acquire)
59}
60
61#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
63pub struct VirtualAddress(pub u64);
64
65impl VirtualAddress {
66 pub fn new(addr: u64) -> Self {
67 Self(addr)
68 }
69
70 pub fn as_u64(&self) -> u64 {
71 self.0
72 }
73
74 pub fn as_usize(&self) -> usize {
75 self.0 as usize
76 }
77
78 pub fn add(&self, offset: usize) -> Self {
79 Self(self.0 + offset as u64)
80 }
81}
82
83pub fn get_kernel_page_table() -> usize {
85 #[cfg(target_arch = "x86_64")]
88 {
89 let cr3: u64;
91 unsafe {
95 core::arch::asm!("mov {}, cr3", out(reg) cr3);
96 }
97 cr3 as usize
98 }
99
100 #[cfg(target_arch = "aarch64")]
101 {
102 let ttbr0: u64;
104 unsafe {
108 core::arch::asm!("mrs {}, TTBR0_EL1", out(reg) ttbr0);
109 }
110 ttbr0 as usize
111 }
112
113 #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
114 {
115 let satp: usize;
117 unsafe {
121 core::arch::asm!("csrr {}, satp", out(reg) satp);
122 }
123 (satp & 0xFFF_FFFFFFFF) << 12
125 }
126}
127
128#[repr(usize)]
130#[derive(Debug, Clone, Copy, PartialEq, Eq)]
131pub enum PageSize {
132 Small = 4096,
134 Large = 2 * 1024 * 1024,
136 Huge = 1024 * 1024 * 1024,
138}
139
140#[derive(Debug, Clone, Copy)]
142pub struct PageFlags(u64);
143
144impl PageFlags {
145 pub const PRESENT: Self = Self(1 << 0);
146 pub const WRITABLE: Self = Self(1 << 1);
147 pub const USER: Self = Self(1 << 2);
148 pub const WRITE_THROUGH: Self = Self(1 << 3);
149 pub const NO_CACHE: Self = Self(1 << 4);
150 pub const ACCESSED: Self = Self(1 << 5);
151 pub const DIRTY: Self = Self(1 << 6);
152 pub const HUGE: Self = Self(1 << 7);
153 pub const GLOBAL: Self = Self(1 << 8);
154 pub const NO_EXECUTE: Self = Self(1 << 63);
155
156 pub const EXECUTABLE: Self = Self(0); pub fn contains(&self, other: Self) -> bool {
160 (self.0 & other.0) == other.0
161 }
162}
163
164impl core::ops::BitOr for PageFlags {
165 type Output = Self;
166
167 fn bitor(self, rhs: Self) -> Self::Output {
168 Self(self.0 | rhs.0)
169 }
170}
171
172impl core::ops::BitOrAssign for PageFlags {
173 fn bitor_assign(&mut self, rhs: Self) {
174 self.0 |= rhs.0;
175 }
176}
177
178#[derive(Debug, Clone, Copy)]
180pub struct MemoryRegion {
181 pub start: u64,
182 pub size: u64,
183 pub usable: bool,
184}
185
186#[allow(unused_variables, unused_assignments)]
188pub fn init(memory_map: &[MemoryRegion]) {
189 kprintln!("[MM] Initializing memory management...");
190
191 kprintln!("[MM] Getting frame allocator lock...");
193
194 {
196 let mut allocator = FRAME_ALLOCATOR.lock();
197
198 kprintln!("[MM] Frame allocator locked successfully");
199
200 #[allow(unused_assignments)]
201 let mut total_memory = 0u64;
202 #[allow(unused_assignments)]
203 let mut usable_memory = 0u64;
204
205 for (idx, region) in memory_map.iter().enumerate() {
206 kprintln!("[MM] Processing memory region");
207
208 total_memory += region.size;
209
210 if region.usable {
211 usable_memory += region.size;
212
213 let start_frame = FrameNumber::new(region.start / FRAME_SIZE as u64);
214 let frame_count = region.size as usize / FRAME_SIZE;
215
216 let numa_node = idx.min(7); kprintln!("[MM] Initializing NUMA node");
220
221 if let Err(_e) = allocator.init_numa_node(numa_node, start_frame, frame_count) {
222 kprintln!("[MM] Warning: Failed to initialize memory region");
223 } else {
224 kprintln!("[MM] Memory region initialized");
225 }
226 }
227 }
228
229 drop(allocator); kprintln!("[MM] Memory initialization complete");
232 } }
234
235#[cfg(target_arch = "x86_64")]
241fn translate_kernel_vaddr(vaddr: u64) -> u64 {
242 let cr3: u64;
243 unsafe {
246 core::arch::asm!("mov {}, cr3", out(reg) cr3, options(nomem, nostack));
247 }
248 let phys_offset = PHYS_MEM_OFFSET.load(core::sync::atomic::Ordering::Acquire);
249 let l4_phys = cr3 & !0xFFF;
250
251 let l4_idx = ((vaddr >> 39) & 0x1FF) as usize;
253 let l4_virt = (l4_phys + phys_offset) as *const u64;
254 let l4_entry = unsafe { core::ptr::read_volatile(l4_virt.add(l4_idx)) };
257 if l4_entry & 1 == 0 {
258 return 0;
259 }
260
261 let l3_phys = l4_entry & 0x000F_FFFF_FFFF_F000;
263 let l3_idx = ((vaddr >> 30) & 0x1FF) as usize;
264 let l3_virt = (l3_phys + phys_offset) as *const u64;
265 let l3_entry = unsafe { core::ptr::read_volatile(l3_virt.add(l3_idx)) };
268 if l3_entry & 1 == 0 {
269 return 0;
270 }
271 if l3_entry & (1 << 7) != 0 {
272 return (l3_entry & 0x000F_FFFF_C000_0000) | (vaddr & 0x3FFF_FFFF);
274 }
275
276 let l2_phys = l3_entry & 0x000F_FFFF_FFFF_F000;
278 let l2_idx = ((vaddr >> 21) & 0x1FF) as usize;
279 let l2_virt = (l2_phys + phys_offset) as *const u64;
280 let l2_entry = unsafe { core::ptr::read_volatile(l2_virt.add(l2_idx)) };
283 if l2_entry & 1 == 0 {
284 return 0;
285 }
286 if l2_entry & (1 << 7) != 0 {
287 return (l2_entry & 0x000F_FFFF_FFE0_0000) | (vaddr & 0x1F_FFFF);
289 }
290
291 let l1_phys = l2_entry & 0x000F_FFFF_FFFF_F000;
293 let l1_idx = ((vaddr >> 12) & 0x1FF) as usize;
294 let l1_virt = (l1_phys + phys_offset) as *const u64;
295 let l1_entry = unsafe { core::ptr::read_volatile(l1_virt.add(l1_idx)) };
298 if l1_entry & 1 == 0 {
299 return 0;
300 }
301
302 (l1_entry & 0x000F_FFFF_FFFF_F000) | (vaddr & 0xFFF)
303}
304
305pub fn init_default() {
307 kprintln!("[MM] Using default memory map for initialization");
308
309 #[cfg(target_arch = "x86_64")]
311 let default_map = {
312 extern "C" {
317 static __kernel_end: u8;
318 }
319 let kernel_end_virt = unsafe { &__kernel_end as *const u8 as u64 };
322 let kernel_end_phys = translate_kernel_vaddr(kernel_end_virt);
323
324 let kernel_end_phys = if kernel_end_phys != 0 {
331 kernel_end_phys
332 } else {
333 let heap_end_virt = heap::heap_end_vaddr();
334 let heap_end_phys = translate_kernel_vaddr(heap_end_virt);
335 if heap_end_phys != 0 {
336 kprintln!(
337 "[MM] __kernel_end translation failed, using heap end at phys {:#x}",
338 heap_end_phys
339 );
340 }
341 heap_end_phys
342 };
343
344 let alloc_start = if kernel_end_phys != 0 {
346 let aligned = (kernel_end_phys + 0x1FFFFF) & !0x1FFFFF; let start = aligned + 0x200000; kprintln!(
349 "[MM] Kernel ends at phys {:#x}, allocator starts at {:#x} ({} MB)",
350 kernel_end_phys,
351 start,
352 start / (1024 * 1024)
353 );
354 start
355 } else {
356 let safe_start = (heap::HEAP_SIZE as u64 + 64 * 1024 * 1024 + 0x1FFFFF) & !0x1FFFFF;
361 kprintln!(
362 "[MM] WARNING: Could not find kernel physical end, using {} MB start (heap={}MB + \
363 64MB margin)",
364 safe_start / (1024 * 1024),
365 heap::HEAP_SIZE / (1024 * 1024)
366 );
367 safe_start
368 };
369
370 let ram_end: u64 = 2048 * 1024 * 1024;
377 let size = ram_end.saturating_sub(alloc_start);
378
379 [MemoryRegion {
380 start: alloc_start,
381 size,
382 usable: true,
383 }]
384 };
385
386 #[cfg(target_arch = "aarch64")]
387 let default_map = [MemoryRegion {
388 start: 0x48000000, size: 134217728, usable: true,
391 }];
392
393 #[cfg(target_arch = "riscv64")]
394 let default_map = [MemoryRegion {
395 start: 0x80E00000,
401 size: 0x88000000 - 0x80E00000, usable: true,
403 }];
404
405 kprintln!("[MM] Calling init with default memory map");
406
407 init(&default_map);
408
409 kprintln!("[MM] init returned successfully");
410
411 init_heap().expect("Heap initialization failed");
413}
414
415#[cfg(target_arch = "x86_64")]
426pub fn reserve_boot_page_table_frames() {
427 let cr3: u64;
428 unsafe {
431 core::arch::asm!("mov {}, cr3", out(reg) cr3, options(nomem, nostack));
432 }
433 let l4_phys = cr3 & !0xFFF; let phys_offset = PHYS_MEM_OFFSET.load(core::sync::atomic::Ordering::Acquire);
436 let mut reserved_count = 0u32;
437
438 let l4_frame = FrameNumber::new(l4_phys / FRAME_SIZE as u64);
440 let _ = FRAME_ALLOCATOR.lock().mark_frame_used(l4_frame);
441 reserved_count += 1;
442
443 let l4_virt = (l4_phys + phys_offset) as *const u64;
444
445 let mut reserve_l4_subtree = |l4_idx: usize| {
448 let l4_entry = unsafe { core::ptr::read_volatile(l4_virt.add(l4_idx)) };
451 if l4_entry & 1 == 0 {
452 return; }
454 let l3_phys = l4_entry & 0x000F_FFFF_FFFF_F000;
455 let l3_frame = FrameNumber::new(l3_phys / FRAME_SIZE as u64);
456 let _ = FRAME_ALLOCATOR.lock().mark_frame_used(l3_frame);
457 reserved_count += 1;
458
459 let l3_virt = (l3_phys + phys_offset) as *const u64;
461 for l3_idx in 0..512 {
462 let l3_entry = unsafe { core::ptr::read_volatile(l3_virt.add(l3_idx)) };
465 if l3_entry & 1 == 0 {
466 continue;
467 }
468 if l3_entry & (1 << 7) != 0 {
469 continue; }
471 let l2_phys = l3_entry & 0x000F_FFFF_FFFF_F000;
472 let l2_frame = FrameNumber::new(l2_phys / FRAME_SIZE as u64);
473 let _ = FRAME_ALLOCATOR.lock().mark_frame_used(l2_frame);
474 reserved_count += 1;
475
476 let l2_virt = (l2_phys + phys_offset) as *const u64;
478 for l2_idx in 0..512 {
479 let l2_entry = unsafe { core::ptr::read_volatile(l2_virt.add(l2_idx)) };
482 if l2_entry & 1 == 0 {
483 continue;
484 }
485 if l2_entry & (1 << 7) != 0 {
486 continue; }
488 let l1_phys = l2_entry & 0x000F_FFFF_FFFF_F000;
489 let l1_frame = FrameNumber::new(l1_phys / FRAME_SIZE as u64);
490 let _ = FRAME_ALLOCATOR.lock().mark_frame_used(l1_frame);
491 reserved_count += 1;
492 }
493 }
494 };
495
496 for l4_idx in 256..512 {
498 reserve_l4_subtree(l4_idx);
499 }
500
501 if phys_offset != 0 {
508 let phys_l4_idx = ((phys_offset >> 39) & 0x1FF) as usize;
509 if phys_l4_idx < 256 {
510 reserve_l4_subtree(phys_l4_idx);
511 }
512 }
513
514 kprintln!(
515 "[MM] Reserved {} boot page table frames from frame allocator",
516 reserved_count
517 );
518}
519
520pub fn translate_address(
522 vas: &VirtualAddressSpace,
523 vaddr: VirtualAddress,
524) -> Option<PhysicalAddress> {
525 #[cfg(feature = "alloc")]
527 {
528 if let Some(mapping) = vas.find_mapping(vaddr) {
529 let offset = vaddr.0 - mapping.start.0;
531 let page_index = (offset / PAGE_SIZE as u64) as usize;
532
533 if page_index < mapping.physical_frames.len() {
535 let frame = mapping.physical_frames[page_index];
536 let page_offset = offset % PAGE_SIZE as u64;
537 return Some(PhysicalAddress::new(frame.as_addr().as_u64() + page_offset));
538 }
539 }
540 }
541
542 None
543}
544
545pub fn free_frame(frame: PhysicalAddress) {
547 let frame_num = FrameNumber::new(frame.as_u64() / FRAME_SIZE as u64);
548 if let Err(_e) = FRAME_ALLOCATOR.lock().free_frames(frame_num, 1) {
549 kprintln!(
550 "[MM] Warning: Failed to free frame at {:#x}: {:?}",
551 frame.as_u64(),
552 _e
553 );
554 }
555}
556
557pub type PagePermissions = PageFlags;
559pub type PhysicalPage = FrameNumber;
560
561pub fn allocate_pages(
563 count: usize,
564 numa_node: Option<usize>,
565) -> Result<Vec<PhysicalPage>, FrameAllocatorError> {
566 let frame = FRAME_ALLOCATOR.lock().allocate_frames(count, numa_node)?;
567
568 let mut pages = Vec::with_capacity(count);
570 for i in 0..count {
571 pages.push(FrameNumber::new(frame.as_u64() + i as u64));
572 }
573
574 Ok(pages)
575}
576
577pub fn free_pages(pages: &[PhysicalPage]) -> Result<(), FrameAllocatorError> {
579 if pages.is_empty() {
580 return Ok(());
581 }
582
583 let first_frame = pages[0];
585 let count = pages.len();
586
587 FRAME_ALLOCATOR.lock().free_frames(first_frame, count)
588}
589
590static PAGE_CACHE_FRAMES: AtomicU64 = AtomicU64::new(0);
593
594pub fn page_cache_add(count: u64) {
596 PAGE_CACHE_FRAMES.fetch_add(count, Ordering::Relaxed);
597}
598
599pub fn page_cache_remove(count: u64) {
601 PAGE_CACHE_FRAMES.fetch_sub(count, Ordering::Relaxed);
602}
603
604pub struct MemoryStats {
606 pub total_frames: usize,
607 pub free_frames: usize,
608 pub cached_frames: usize,
609}
610
611pub fn get_memory_stats() -> MemoryStats {
613 let allocator = FRAME_ALLOCATOR.lock();
614 let stats = allocator.get_stats();
615
616 MemoryStats {
617 total_frames: stats.total_frames as usize,
618 free_frames: stats.free_frames as usize,
619 cached_frames: PAGE_CACHE_FRAMES.load(Ordering::Relaxed) as usize,
620 }
621}