1#![allow(dead_code)]
8
9use core::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
10
11use spin::Mutex;
12
13#[allow(unused_imports)]
15use crate::println;
16use crate::raii::{FrameGuard, FramesGuard};
17
18#[cfg(feature = "alloc")]
19extern crate alloc;
20
21#[cfg(feature = "alloc")]
22use alloc::boxed::Box;
23#[cfg(feature = "alloc")]
24use alloc::vec::Vec;
25
26#[cfg(not(feature = "alloc"))]
28struct Vec<T> {
29 _phantom: core::marker::PhantomData<T>,
30}
31
32#[cfg(not(feature = "alloc"))]
33impl<T> Vec<T> {
34 fn with_capacity(_: usize) -> Self {
35 Self {
36 _phantom: core::marker::PhantomData,
37 }
38 }
39 fn push(&mut self, _: T) {}
40}
41
42pub const FRAME_SIZE: usize = 4096;
44
45const BITMAP_BUDDY_THRESHOLD: usize = 512;
48
49const MAX_NUMA_NODES: usize = 8;
51
52#[derive(Debug, Clone, Copy, PartialEq, Eq)]
54pub enum MemoryZone {
55 Dma,
57 Normal,
59 High,
61}
62
63impl MemoryZone {
64 pub fn frame_range(&self) -> (FrameNumber, FrameNumber) {
66 match self {
67 MemoryZone::Dma => (FrameNumber::new(0), FrameNumber::new(4096)), MemoryZone::Normal => {
69 #[cfg(target_pointer_width = "32")]
70 {
71 (FrameNumber::new(4096), FrameNumber::new(1048576)) }
73 #[cfg(target_pointer_width = "64")]
74 {
75 (FrameNumber::new(4096), FrameNumber::new(u64::MAX >> 12)) }
77 }
78 MemoryZone::High => {
79 #[cfg(target_pointer_width = "32")]
80 {
81 (FrameNumber::new(1048576), FrameNumber::new(u64::MAX >> 12))
82 }
84 #[cfg(target_pointer_width = "64")]
85 {
86 (FrameNumber::new(0), FrameNumber::new(0))
88 }
89 }
90 }
91 }
92
93 pub fn contains(&self, frame: FrameNumber) -> bool {
95 let (start, end) = self.frame_range();
96 frame >= start && frame < end
97 }
98
99 pub fn for_frame(frame: FrameNumber) -> Self {
101 if MemoryZone::Dma.contains(frame) {
102 MemoryZone::Dma
103 } else if MemoryZone::High.contains(frame) && cfg!(target_pointer_width = "32") {
104 MemoryZone::High
105 } else {
106 MemoryZone::Normal
107 }
108 }
109}
110
111#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
113pub struct FrameNumber(u64);
114
115impl FrameNumber {
116 pub const fn new(num: u64) -> Self {
117 Self(num)
118 }
119
120 pub const fn as_u64(&self) -> u64 {
121 self.0
122 }
123
124 pub const fn as_addr(&self) -> PhysicalAddress {
125 PhysicalAddress::new(self.0 * FRAME_SIZE as u64)
126 }
127}
128
129#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
131pub struct PhysicalAddress(pub u64);
132
133impl PhysicalAddress {
134 pub const fn new(addr: u64) -> Self {
135 Self(addr)
136 }
137
138 pub const fn as_u64(&self) -> u64 {
139 self.0
140 }
141
142 pub const fn as_usize(&self) -> usize {
143 self.0 as usize
144 }
145
146 pub const fn as_frame(&self) -> FrameNumber {
147 FrameNumber::new(self.0 / FRAME_SIZE as u64)
148 }
149
150 pub const fn offset(&self, offset: u64) -> Self {
151 Self::new(self.0 + offset)
152 }
153}
154
155#[derive(Debug, Clone, Copy, PartialEq, Eq)]
157pub struct PhysicalFrame {
158 number: FrameNumber,
159}
160
161impl PhysicalFrame {
162 pub fn new(number: FrameNumber) -> Self {
163 Self { number }
164 }
165
166 pub fn number(&self) -> FrameNumber {
167 self.number
168 }
169
170 pub fn addr(&self) -> usize {
171 (self.number.0 * FRAME_SIZE as u64) as usize
172 }
173}
174
175pub type Result<T> = core::result::Result<T, FrameAllocatorError>;
177
178#[derive(Debug, Clone, Copy, PartialEq, Eq)]
180pub enum FrameAllocatorError {
181 OutOfMemory,
183 InvalidFrame,
185 InvalidSize,
187 InvalidNumaNode,
189 ReservedMemoryConflict,
191}
192
193#[derive(Debug, Clone, Copy)]
195pub struct ReservedRegion {
196 pub start: FrameNumber,
198 pub end: FrameNumber,
200 pub description: &'static str,
202}
203
204#[derive(Debug)]
206pub struct FrameAllocatorStats {
207 pub total_frames: u64,
208 pub free_frames: u64,
209 pub bitmap_allocations: u64,
210 pub buddy_allocations: u64,
211 pub allocation_time_ns: u64,
212}
213
214struct BitmapAllocator {
216 bitmap: Mutex<[u64; 2048]>,
220 start_frame: FrameNumber,
222 total_frames: usize,
224 free_frames: AtomicUsize,
226}
227
228impl BitmapAllocator {
229 const fn new(start_frame: FrameNumber, frame_count: usize) -> Self {
230 Self {
231 bitmap: Mutex::new([u64::MAX; 2048]),
232 start_frame,
233 total_frames: frame_count,
234 free_frames: AtomicUsize::new(frame_count),
235 }
236 }
237
238 fn allocate(&self, count: usize) -> Result<FrameNumber> {
240 if count == 0 || count >= BITMAP_BUDDY_THRESHOLD {
241 return Err(FrameAllocatorError::InvalidSize);
242 }
243
244 let mut bitmap = self.bitmap.lock();
245
246 let mut consecutive = 0;
248 let mut start_bit = 0;
249
250 for (word_idx, word) in bitmap.iter_mut().enumerate() {
251 if *word == 0 {
252 consecutive = 0;
253 continue;
254 }
255
256 for bit in 0..64 {
257 if *word & (1 << bit) != 0 {
258 if consecutive == 0 {
259 start_bit = word_idx * 64 + bit;
261 }
262 consecutive += 1;
263 if consecutive == count {
264 let first_frame = start_bit;
266
267 for i in 0..count {
269 let frame_bit = first_frame + i;
270 let word_idx = frame_bit / 64;
271 let bit_idx = frame_bit % 64;
272 bitmap[word_idx] &= !(1 << bit_idx);
273 }
274
275 self.free_frames.fetch_sub(count, Ordering::Release);
276
277 return Ok(FrameNumber::new(
278 self.start_frame.as_u64() + first_frame as u64,
279 ));
280 }
281 } else {
282 consecutive = 0;
283 }
284 }
285 }
286
287 Err(FrameAllocatorError::OutOfMemory)
288 }
289
290 fn mark_used(&self, frame: FrameNumber) -> Result<()> {
293 let frame_num = frame.as_u64();
294 let start = self.start_frame.as_u64();
295 if frame_num < start || frame_num >= start + self.total_frames as u64 {
296 return Ok(());
298 }
299 let offset = (frame_num - start) as usize;
300 let word_idx = offset / 64;
301 let bit_idx = offset % 64;
302
303 let mut bitmap = self.bitmap.lock();
304 if bitmap[word_idx] & (1 << bit_idx) != 0 {
305 bitmap[word_idx] &= !(1 << bit_idx);
307 self.free_frames.fetch_sub(1, Ordering::Relaxed);
308 }
309 Ok(())
310 }
311
312 fn free(&self, frame: FrameNumber, count: usize) -> Result<()> {
314 let offset = (frame.as_u64() - self.start_frame.as_u64()) as usize;
315
316 if offset + count > self.total_frames {
317 return Err(FrameAllocatorError::InvalidFrame);
318 }
319
320 let mut bitmap = self.bitmap.lock();
321
322 for i in 0..count {
324 let frame_bit = offset + i;
325 let word_idx = frame_bit / 64;
326 let bit_idx = frame_bit % 64;
327
328 if bitmap[word_idx] & (1 << bit_idx) != 0 {
330 return Err(FrameAllocatorError::InvalidFrame);
331 }
332
333 bitmap[word_idx] |= 1 << bit_idx;
334 }
335
336 self.free_frames.fetch_add(count, Ordering::Release);
337 Ok(())
338 }
339
340 fn free_count(&self) -> usize {
341 self.free_frames.load(Ordering::Acquire)
342 }
343}
344
345struct BuddyAllocator {
347 free_lists: [Mutex<Option<BuddyBlock>>; 21],
349 start_frame: FrameNumber,
351 total_frames: usize,
353 free_frames: AtomicUsize,
355}
356
357#[derive(Debug)]
358struct BuddyBlock {
359 frame: FrameNumber,
360 #[cfg(feature = "alloc")]
361 next: Option<Box<BuddyBlock>>,
362 #[cfg(not(feature = "alloc"))]
363 next: Option<*mut BuddyBlock>,
364}
365
366impl BuddyAllocator {
367 fn new(start_frame: FrameNumber, frame_count: usize) -> Self {
368 let total_frames = if frame_count.is_power_of_two() {
370 frame_count
371 } else {
372 frame_count.next_power_of_two() / 2
373 };
374
375 let mut allocator = Self {
376 free_lists: Default::default(),
377 start_frame,
378 total_frames,
379 free_frames: AtomicUsize::new(total_frames),
380 };
381
382 let max_order = total_frames.trailing_zeros() as usize;
384
385 #[cfg(feature = "alloc")]
387 {
388 allocator.free_lists[max_order] = Mutex::new(Some(BuddyBlock {
389 frame: start_frame,
390 next: None,
391 }));
392 }
393
394 allocator
395 }
396
397 fn get_order(count: usize) -> usize {
399 count.next_power_of_two().trailing_zeros() as usize
400 }
401
402 fn allocate(&self, count: usize) -> Result<FrameNumber> {
404 if count == 0 {
405 return Err(FrameAllocatorError::InvalidSize);
406 }
407
408 #[cfg(not(feature = "alloc"))]
409 {
410 return Err(FrameAllocatorError::OutOfMemory);
412 }
413
414 #[cfg(feature = "alloc")]
415 {
416 let order = Self::get_order(count);
417 if order >= self.free_lists.len() {
418 return Err(FrameAllocatorError::InvalidSize);
419 }
420
421 for current_order in order..self.free_lists.len() {
423 let mut list = self.free_lists[current_order].lock();
424
425 if let Some(mut block) = list.take() {
426 *list = block.next.take().map(|b| *b);
428
429 let mut split_order = current_order;
431 while split_order > order {
432 split_order -= 1;
433 let buddy_frame =
434 FrameNumber::new(block.frame.as_u64() + (1 << split_order));
435
436 let mut buddy_list = self.free_lists[split_order].lock();
438 let buddy_block = BuddyBlock {
439 frame: buddy_frame,
440 next: buddy_list.take().map(Box::new),
441 };
442 *buddy_list = Some(buddy_block);
443 }
444
445 self.free_frames.fetch_sub(1 << order, Ordering::Release);
446 return Ok(block.frame);
447 }
448 }
449
450 Err(FrameAllocatorError::OutOfMemory)
451 }
452 }
453
454 fn free(&self, frame: FrameNumber, count: usize) -> Result<()> {
456 #[cfg(not(feature = "alloc"))]
457 {
458 return Err(FrameAllocatorError::InvalidFrame);
460 }
461
462 #[cfg(feature = "alloc")]
463 {
464 let order = Self::get_order(count);
465 if order >= self.free_lists.len() {
466 return Err(FrameAllocatorError::InvalidSize);
467 }
468
469 let mut current_frame = frame;
471 let mut current_order = order;
472
473 while current_order < self.free_lists.len() - 1 {
474 let buddy_frame = FrameNumber::new(current_frame.as_u64() ^ (1 << current_order));
475
476 let mut list = self.free_lists[current_order].lock();
478 let mut found_buddy = false;
479
480 if let Some(ref mut head) = *list {
482 if head.frame == buddy_frame {
483 *list = head.next.take().map(|b| *b);
485 found_buddy = true;
486 } else {
487 let mut prev: *mut BuddyBlock = head;
489 unsafe {
499 while let Some(ref mut next_box) = (*prev).next {
500 if next_box.frame == buddy_frame {
501 (*prev).next = next_box.next.take();
503 found_buddy = true;
504 break;
505 }
506 prev = &mut **next_box as *mut BuddyBlock;
507 }
508 }
509 }
510 }
511
512 if found_buddy {
513 current_frame =
515 FrameNumber::new(current_frame.as_u64().min(buddy_frame.as_u64()));
516 current_order += 1;
517 } else {
518 break;
520 }
521 }
522
523 let mut list = self.free_lists[current_order].lock();
525 let block = BuddyBlock {
526 frame: current_frame,
527 next: list.take().map(Box::new),
528 };
529 *list = Some(block);
530
531 self.free_frames.fetch_add(1 << order, Ordering::Release);
532 Ok(())
533 }
534 }
535
536 fn free_count(&self) -> usize {
537 self.free_frames.load(Ordering::Acquire)
538 }
539}
540
541pub struct FrameAllocator {
543 bitmap_allocators: [Option<BitmapAllocator>; MAX_NUMA_NODES],
545 buddy_allocators: [Option<BuddyAllocator>; MAX_NUMA_NODES],
547 stats: Mutex<FrameAllocatorStats>,
549 allocation_count: AtomicU64,
551 #[cfg(feature = "alloc")]
553 reserved_regions: Mutex<Vec<ReservedRegion>>,
554}
555
556impl FrameAllocator {
557 pub const fn new() -> Self {
559 const NONE_BITMAP: Option<BitmapAllocator> = None;
560 const NONE_BUDDY: Option<BuddyAllocator> = None;
561
562 Self {
563 bitmap_allocators: [NONE_BITMAP; MAX_NUMA_NODES],
564 buddy_allocators: [NONE_BUDDY; MAX_NUMA_NODES],
565 stats: Mutex::new(FrameAllocatorStats {
566 total_frames: 0,
567 free_frames: 0,
568 bitmap_allocations: 0,
569 buddy_allocations: 0,
570 allocation_time_ns: 0,
571 }),
572 allocation_count: AtomicU64::new(0),
573 #[cfg(feature = "alloc")]
574 reserved_regions: Mutex::new(Vec::new()),
575 }
576 }
577
578 #[cfg(feature = "alloc")]
580 pub fn add_reserved_region(&self, region: ReservedRegion) -> Result<()> {
581 let mut reserved = self.reserved_regions.lock();
582
583 for existing in reserved.iter() {
585 if region.start < existing.end && region.end > existing.start {
586 return Err(FrameAllocatorError::ReservedMemoryConflict);
587 }
588 }
589
590 reserved.push(region);
591 Ok(())
592 }
593
594 #[cfg(feature = "alloc")]
596 pub fn is_reserved(&self, start: FrameNumber, count: usize) -> bool {
597 let end = FrameNumber::new(start.as_u64() + count as u64);
598 let reserved = self.reserved_regions.lock();
599
600 for region in reserved.iter() {
601 if start < region.end && end > region.start {
602 return true;
603 }
604 }
605
606 false
607 }
608
609 #[cfg(feature = "alloc")]
611 pub fn mark_standard_reserved_regions(&self) {
612 let _ = self.add_reserved_region(ReservedRegion {
614 start: FrameNumber::new(0),
615 end: FrameNumber::new(256), description: "BIOS and legacy devices",
617 });
618
619 }
621
622 pub fn init_numa_node(
624 &mut self,
625 node: usize,
626 start_frame: FrameNumber,
627 frame_count: usize,
628 ) -> Result<()> {
629 #[cfg(not(target_arch = "aarch64"))]
630 println!(
631 "[FA] init_numa_node: node={}, start_frame={}, frame_count={}",
632 node,
633 start_frame.as_u64(),
634 frame_count
635 );
636
637 if node >= MAX_NUMA_NODES {
638 return Err(FrameAllocatorError::InvalidNumaNode);
639 }
640
641 let bitmap_frames = frame_count.min(2048 * 64);
644 let buddy_frames = frame_count.saturating_sub(bitmap_frames);
645
646 #[cfg(not(target_arch = "aarch64"))]
647 println!(
648 "[FA] bitmap_frames={}, buddy_frames={}",
649 bitmap_frames, buddy_frames
650 );
651
652 if bitmap_frames > 0 {
653 #[cfg(not(target_arch = "aarch64"))]
654 println!("[FA] Creating BitmapAllocator...");
655 self.bitmap_allocators[node] = Some(BitmapAllocator::new(start_frame, bitmap_frames));
656 #[cfg(not(target_arch = "aarch64"))]
657 println!("[FA] BitmapAllocator created");
658 }
659
660 if buddy_frames > 0 {
661 #[cfg(not(target_arch = "aarch64"))]
662 println!("[FA] Creating BuddyAllocator...");
663 let buddy_start = FrameNumber::new(start_frame.as_u64() + bitmap_frames as u64);
664 self.buddy_allocators[node] = Some(BuddyAllocator::new(buddy_start, buddy_frames));
665 #[cfg(not(target_arch = "aarch64"))]
666 println!("[FA] BuddyAllocator created");
667 }
668
669 #[cfg(not(target_arch = "aarch64"))]
670 println!("[FA] Skipping stats update during init to avoid deadlock");
671
672 Ok(())
673 }
674
675 pub fn allocate_frames(&self, count: usize, numa_node: Option<usize>) -> Result<FrameNumber> {
677 self.allocate_frames_in_zone(count, numa_node, None)
678 }
679
680 pub fn allocate_frames_in_zone(
682 &self,
683 count: usize,
684 numa_node: Option<usize>,
685 zone: Option<MemoryZone>,
686 ) -> Result<FrameNumber> {
687 let start_time = crate::bench::read_timestamp();
688
689 let result = if count < BITMAP_BUDDY_THRESHOLD {
690 match self.allocate_bitmap_with_zone(count, numa_node, zone) {
692 Ok(frame) => Ok(frame),
693 Err(_) => {
694 self.allocate_buddy_with_zone(count, numa_node, zone)
696 }
697 }
698 } else {
699 self.allocate_buddy_with_zone(count, numa_node, zone)
701 };
702
703 let elapsed = crate::bench::read_timestamp() - start_time;
704 {
705 let mut stats = self.stats.lock();
706 stats.allocation_time_ns += crate::bench::cycles_to_ns(elapsed);
707 }
708 self.allocation_count.fetch_add(1, Ordering::Relaxed);
709
710 result
711 }
712
713 fn allocate_bitmap_with_zone(
715 &self,
716 count: usize,
717 numa_node: Option<usize>,
718 zone: Option<MemoryZone>,
719 ) -> Result<FrameNumber> {
720 if let Ok(frame) = self.allocate_bitmap_internal(count, numa_node, zone) {
722 return Ok(frame);
723 }
724
725 if zone.is_some() {
727 if zone == Some(MemoryZone::Dma) {
729 return Err(FrameAllocatorError::OutOfMemory);
730 }
731 self.allocate_bitmap_internal(count, numa_node, None)
733 } else {
734 Err(FrameAllocatorError::OutOfMemory)
735 }
736 }
737
738 fn allocate_bitmap(&self, count: usize, numa_node: Option<usize>) -> Result<FrameNumber> {
740 self.allocate_bitmap_internal(count, numa_node, None)
741 }
742
743 fn allocate_bitmap_internal(
745 &self,
746 count: usize,
747 numa_node: Option<usize>,
748 zone: Option<MemoryZone>,
749 ) -> Result<FrameNumber> {
750 if let Some(node) = numa_node {
751 if node < MAX_NUMA_NODES {
753 if let Some(ref allocator) = self.bitmap_allocators[node] {
754 if let Ok(frame) = allocator.allocate(count) {
755 if let Some(z) = zone {
757 if !z.contains(frame) {
758 let _ = allocator.free(frame, count);
759 return Err(FrameAllocatorError::OutOfMemory);
760 }
761 }
762
763 #[cfg(feature = "alloc")]
765 if self.is_reserved(frame, count) {
766 let _ = allocator.free(frame, count);
768 } else {
769 return Ok(frame);
770 }
771 #[cfg(not(feature = "alloc"))]
772 return Ok(frame);
773 }
774 }
775 }
776 }
777
778 for allocator in self.bitmap_allocators.iter().flatten() {
780 if let Ok(frame) = allocator.allocate(count) {
781 #[cfg(feature = "alloc")]
783 if self.is_reserved(frame, count) {
784 let _ = allocator.free(frame, count);
786 continue;
787 }
788 return Ok(frame);
789 }
790 }
791
792 Err(FrameAllocatorError::OutOfMemory)
793 }
794
795 fn allocate_buddy_with_zone(
797 &self,
798 count: usize,
799 numa_node: Option<usize>,
800 zone: Option<MemoryZone>,
801 ) -> Result<FrameNumber> {
802 if let Ok(frame) = self.allocate_buddy_internal(count, numa_node, zone) {
804 return Ok(frame);
805 }
806
807 if zone.is_some() {
809 if zone == Some(MemoryZone::Dma) {
811 return Err(FrameAllocatorError::OutOfMemory);
812 }
813 self.allocate_buddy_internal(count, numa_node, None)
815 } else {
816 Err(FrameAllocatorError::OutOfMemory)
817 }
818 }
819
820 fn allocate_buddy(&self, count: usize, numa_node: Option<usize>) -> Result<FrameNumber> {
822 self.allocate_buddy_internal(count, numa_node, None)
823 }
824
825 fn allocate_buddy_internal(
827 &self,
828 count: usize,
829 numa_node: Option<usize>,
830 zone: Option<MemoryZone>,
831 ) -> Result<FrameNumber> {
832 if let Some(node) = numa_node {
833 if node < MAX_NUMA_NODES {
835 if let Some(ref allocator) = self.buddy_allocators[node] {
836 if let Ok(frame) = allocator.allocate(count) {
837 if let Some(z) = zone {
839 if !z.contains(frame) {
840 let _ = allocator.free(frame, count);
841 return Err(FrameAllocatorError::OutOfMemory);
842 }
843 }
844
845 #[cfg(feature = "alloc")]
847 if self.is_reserved(frame, count) {
848 let _ = allocator.free(frame, count);
850 } else {
851 return Ok(frame);
852 }
853 #[cfg(not(feature = "alloc"))]
854 return Ok(frame);
855 }
856 }
857 }
858 }
859
860 for allocator in self.buddy_allocators.iter().flatten() {
862 if let Ok(frame) = allocator.allocate(count) {
863 #[cfg(feature = "alloc")]
865 if self.is_reserved(frame, count) {
866 let _ = allocator.free(frame, count);
868 continue;
869 }
870 return Ok(frame);
871 }
872 }
873
874 Err(FrameAllocatorError::OutOfMemory)
875 }
876
877 pub fn mark_frame_used(&self, frame: FrameNumber) -> Result<()> {
881 for allocator in self.bitmap_allocators.iter().flatten() {
882 allocator.mark_used(frame)?;
883 }
884 Ok(())
885 }
886
887 pub fn free_frames(&self, frame: FrameNumber, count: usize) -> Result<()> {
889 for allocator in self.bitmap_allocators.iter().flatten() {
891 if allocator.free(frame, count).is_ok() {
892 return Ok(());
893 }
894 }
895
896 for allocator in self.buddy_allocators.iter().flatten() {
898 if allocator.free(frame, count).is_ok() {
899 return Ok(());
900 }
901 }
902
903 Err(FrameAllocatorError::InvalidFrame)
904 }
905
906 pub fn get_stats(&self) -> FrameAllocatorStats {
908 let mut free_frames = 0u64;
909 let mut total_frames = 0u64;
910
911 for allocator in self.bitmap_allocators.iter().flatten() {
912 free_frames += allocator.free_count() as u64;
913 total_frames += allocator.total_frames as u64;
914 }
915
916 for allocator in self.buddy_allocators.iter().flatten() {
917 free_frames += allocator.free_count() as u64;
918 total_frames += allocator.total_frames as u64;
919 }
920
921 let stats = self.stats.lock();
922 FrameAllocatorStats {
923 total_frames,
924 free_frames,
925 bitmap_allocations: stats.bitmap_allocations,
926 buddy_allocations: stats.buddy_allocations,
927 allocation_time_ns: stats.allocation_time_ns,
928 }
929 }
930
931 pub fn allocate_frame_raii(&'static self) -> Result<FrameGuard> {
933 let frame_num = self.allocate_frames(1, None)?;
934 let frame = PhysicalFrame::new(frame_num);
935 Ok(FrameGuard::new(frame, self))
936 }
937
938 pub fn allocate_frames_raii(&'static self, count: usize) -> Result<FramesGuard> {
940 let start_frame = self.allocate_frames(count, None)?;
941 let mut frames = Vec::with_capacity(count);
942 for i in 0..count {
943 frames.push(PhysicalFrame::new(FrameNumber(start_frame.0 + i as u64)));
944 }
945 Ok(FramesGuard::new(frames, self))
946 }
947
948 pub fn allocate_frame_raii_numa(&'static self, numa_node: usize) -> Result<FrameGuard> {
950 let frame_num = self.allocate_frames(1, Some(numa_node))?;
951 let frame = PhysicalFrame::new(frame_num);
952 Ok(FrameGuard::new(frame, self))
953 }
954
955 pub unsafe fn free_frame(&self, frame: PhysicalFrame) {
964 if let Err(_e) = self.free_frames(frame.number(), 1) {
965 #[cfg(not(target_arch = "aarch64"))]
966 println!(
967 "[FrameAllocator] Warning: Failed to free frame {}: {:?}",
968 frame.number().0,
969 _e
970 );
971 }
972 }
973
974 pub fn deallocate_frame(&self, frame: PhysicalAddress) {
976 let frame_num = FrameNumber::new(frame.as_u64() / FRAME_SIZE as u64);
977 if let Err(_e) = self.free_frames(frame_num, 1) {
978 #[cfg(not(target_arch = "aarch64"))]
979 println!(
980 "[FrameAllocator] Warning: Failed to deallocate frame at {:#x}: {:?}",
981 frame.as_u64(),
982 _e
983 );
984 }
985 }
986}
987
988impl Default for FrameAllocator {
989 fn default() -> Self {
990 Self::new()
991 }
992}
993
994pub(crate) static FRAME_ALLOCATOR: Mutex<FrameAllocator> = Mutex::new(FrameAllocator::new());
996
997#[repr(align(64))]
1011pub struct PerCpuPageCache {
1012 frames: [u64; Self::CAPACITY],
1014 count: usize,
1016}
1017
1018impl Default for PerCpuPageCache {
1019 fn default() -> Self {
1020 Self::new()
1021 }
1022}
1023
1024impl PerCpuPageCache {
1025 const CAPACITY: usize = 64;
1027 const LOW_WATERMARK: usize = 16;
1029 const HIGH_WATERMARK: usize = 48;
1031 const BATCH_SIZE: usize = 32;
1033
1034 pub const fn new() -> Self {
1035 Self {
1036 frames: [0; Self::CAPACITY],
1037 count: 0,
1038 }
1039 }
1040
1041 #[inline]
1044 pub fn alloc_one(&mut self) -> Option<FrameNumber> {
1045 if self.count == 0 {
1046 return None;
1047 }
1048 self.count -= 1;
1049 Some(FrameNumber::new(self.frames[self.count]))
1050 }
1051
1052 #[inline]
1055 pub fn free_one(&mut self, frame: FrameNumber) -> bool {
1056 if self.count >= Self::CAPACITY {
1057 return false;
1058 }
1059 self.frames[self.count] = frame.as_u64();
1060 self.count += 1;
1061 true
1062 }
1063
1064 #[inline]
1066 pub fn needs_refill(&self) -> bool {
1067 self.count < Self::LOW_WATERMARK
1068 }
1069
1070 #[inline]
1072 pub fn needs_drain(&self) -> bool {
1073 self.count > Self::HIGH_WATERMARK
1074 }
1075
1076 pub fn batch_refill(&mut self) {
1079 let global = FRAME_ALLOCATOR.lock();
1080 let to_refill = Self::BATCH_SIZE.min(Self::CAPACITY - self.count);
1081 for _ in 0..to_refill {
1082 match global.allocate_frames(1, None) {
1083 Ok(frame) => {
1084 self.frames[self.count] = frame.as_u64();
1085 self.count += 1;
1086 }
1087 Err(_) => break,
1088 }
1089 }
1090 }
1091
1092 pub fn batch_drain(&mut self) {
1095 let global = FRAME_ALLOCATOR.lock();
1096 let to_drain = Self::BATCH_SIZE.min(self.count);
1097 for _ in 0..to_drain {
1098 if self.count == 0 {
1099 break;
1100 }
1101 self.count -= 1;
1102 let frame = FrameNumber::new(self.frames[self.count]);
1103 let _ = global.free_frames(frame, 1);
1104 }
1105 }
1106
1107 pub fn cached_count(&self) -> usize {
1109 self.count
1110 }
1111}
1112
1113static PER_CPU_PAGE_CACHES: Mutex<[PerCpuPageCache; 16]> =
1119 Mutex::new([const { PerCpuPageCache::new() }; 16]);
1120
1121pub fn per_cpu_alloc_frame() -> Result<FrameNumber> {
1126 let cpu_id = crate::sched::smp::current_cpu_id() as usize;
1127
1128 let mut caches = PER_CPU_PAGE_CACHES.lock();
1129 let cache = &mut caches[cpu_id.min(15)];
1130
1131 if let Some(frame) = cache.alloc_one() {
1133 crate::trace!(
1134 crate::perf::trace::TraceEventType::FrameAlloc,
1135 frame.as_u64(),
1136 cpu_id as u64
1137 );
1138 return Ok(frame);
1139 }
1140
1141 cache.batch_refill();
1143
1144 if let Some(frame) = cache.alloc_one() {
1146 crate::trace!(
1147 crate::perf::trace::TraceEventType::FrameAlloc,
1148 frame.as_u64(),
1149 cpu_id as u64
1150 );
1151 return Ok(frame);
1152 }
1153
1154 let frame = FRAME_ALLOCATOR.lock().allocate_frames(1, None)?;
1156 crate::trace!(
1157 crate::perf::trace::TraceEventType::FrameAlloc,
1158 frame.as_u64(),
1159 cpu_id as u64
1160 );
1161 Ok(frame)
1162}
1163
1164pub fn per_cpu_free_frame(frame: FrameNumber) -> Result<()> {
1169 let cpu_id = crate::sched::smp::current_cpu_id() as usize;
1170 crate::trace!(
1171 crate::perf::trace::TraceEventType::FrameFree,
1172 frame.as_u64(),
1173 cpu_id as u64
1174 );
1175
1176 let mut caches = PER_CPU_PAGE_CACHES.lock();
1177 let cache = &mut caches[cpu_id.min(15)];
1178
1179 if cache.free_one(frame) {
1181 if cache.needs_drain() {
1183 cache.batch_drain();
1184 }
1185 return Ok(());
1186 }
1187
1188 cache.batch_drain();
1190 if cache.free_one(frame) {
1191 return Ok(());
1192 }
1193
1194 FRAME_ALLOCATOR.lock().free_frames(frame, 1)
1196}
1197
1198#[cfg(all(test, not(target_os = "none")))]
1199mod tests {
1200 use super::*;
1201
1202 #[test]
1203 fn test_bitmap_allocator() {
1204 let allocator = BitmapAllocator::new(FrameNumber::new(0), 1000);
1205
1206 let frame = allocator
1208 .allocate(1)
1209 .expect("single frame allocation from fresh allocator should succeed");
1210 assert_eq!(frame.as_u64(), 0);
1211
1212 let frame = allocator
1214 .allocate(10)
1215 .expect("10-frame contiguous allocation should succeed with 999 free frames");
1216 assert_eq!(frame.as_u64(), 1);
1217
1218 allocator
1220 .free(frame, 10)
1221 .expect("freeing previously allocated frames should succeed");
1222
1223 let frame2 = allocator
1225 .allocate(10)
1226 .expect("re-allocation after free should succeed");
1227 assert_eq!(frame2.as_u64(), frame.as_u64());
1228 }
1229
1230 #[test]
1231 fn test_buddy_allocator() {
1232 let allocator = BuddyAllocator::new(FrameNumber::new(0), 1024);
1233
1234 let frame = allocator
1236 .allocate(512)
1237 .expect("512-frame allocation from 1024-frame buddy allocator should succeed");
1238 assert_eq!(frame.as_u64(), 0);
1239
1240 let frame2 = allocator
1242 .allocate(512)
1243 .expect("second 512-frame allocation should succeed after buddy split");
1244 assert_eq!(frame2.as_u64(), 512);
1245
1246 allocator
1248 .free(frame, 512)
1249 .expect("freeing first buddy block should succeed");
1250 allocator
1251 .free(frame2, 512)
1252 .expect("freeing second buddy block should succeed and trigger merge");
1253
1254 let frame3 = allocator
1256 .allocate(1024)
1257 .expect("full-size allocation should succeed after buddy merge");
1258 assert_eq!(frame3.as_u64(), 0);
1259 }
1260}