⚠️ VeridianOS Kernel Documentation - This is low-level kernel code. All functions are unsafe unless explicitly marked otherwise. no_std

veridian_kernel/mm/
mod.rs

1//! Memory management subsystem
2//!
3//! This module handles physical and virtual memory management,
4//! including page tables, allocators, and memory protection.
5
6// Memory management core -- many APIs exercised at boot and during allocation
7#![allow(dead_code)]
8
9pub mod bootloader;
10pub mod cache_aligned;
11pub mod cache_topology;
12pub mod demand_paging;
13pub mod frame_allocator;
14pub mod heap;
15pub mod ksm;
16pub mod page_fault;
17pub mod page_table;
18pub mod user_validation;
19pub mod vas;
20pub mod vmm;
21
22#[cfg(feature = "alloc")]
23extern crate alloc;
24#[cfg(feature = "alloc")]
25use alloc::vec::Vec;
26use core::sync::atomic::{AtomicU64, Ordering};
27
28pub(crate) use frame_allocator::FRAME_ALLOCATOR;
29// Re-export commonly used types
30pub use frame_allocator::{
31    FrameAllocatorError, FrameNumber, PhysicalAddress, PhysicalFrame, FRAME_SIZE,
32};
33pub use heap::init as init_heap;
34pub use user_validation::{is_user_addr_valid, translate_address as translate_user_address};
35pub use vas::VirtualAddressSpace;
36
37/// Page size constant (4KB)
38pub const PAGE_SIZE: usize = 4096;
39
40/// Physical memory offset: virtual = physical + PHYS_MEM_OFFSET.
41///
42/// On x86_64 with bootloader 0.11, physical memory is mapped at a dynamic
43/// offset provided by the bootloader. On AArch64 and RISC-V, physical
44/// memory is identity-mapped (offset = 0).
45pub(crate) static PHYS_MEM_OFFSET: AtomicU64 = AtomicU64::new(0);
46
47/// Set the physical memory offset (called once during early boot).
48pub fn set_phys_mem_offset(offset: u64) {
49    PHYS_MEM_OFFSET.store(offset, Ordering::Release);
50}
51
52/// Convert a physical address to a virtual pointer.
53///
54/// On x86_64, adds the bootloader's physical memory mapping offset.
55/// On AArch64/RISC-V, returns the address unchanged (identity-mapped).
56#[inline]
57pub fn phys_to_virt_addr(phys: u64) -> u64 {
58    phys + PHYS_MEM_OFFSET.load(Ordering::Acquire)
59}
60
61/// Virtual memory address
62#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
63pub struct VirtualAddress(pub u64);
64
65impl VirtualAddress {
66    pub fn new(addr: u64) -> Self {
67        Self(addr)
68    }
69
70    pub fn as_u64(&self) -> u64 {
71        self.0
72    }
73
74    pub fn as_usize(&self) -> usize {
75        self.0 as usize
76    }
77
78    pub fn add(&self, offset: usize) -> Self {
79        Self(self.0 + offset as u64)
80    }
81}
82
83/// Get kernel page table base address
84pub fn get_kernel_page_table() -> usize {
85    // Return the kernel page table base address
86    // This would be architecture-specific
87    #[cfg(target_arch = "x86_64")]
88    {
89        // CR3 holds the page table base
90        let cr3: u64;
91        // SAFETY: Reading CR3 is a privileged, read-only operation that returns the
92        // current page table root physical address. It has no side effects and is
93        // always valid in ring 0 (kernel mode).
94        unsafe {
95            core::arch::asm!("mov {}, cr3", out(reg) cr3);
96        }
97        cr3 as usize
98    }
99
100    #[cfg(target_arch = "aarch64")]
101    {
102        // TTBR0_EL1 holds the page table base
103        let ttbr0: u64;
104        // SAFETY: Reading TTBR0_EL1 is a read-only system register access that
105        // returns the EL0/EL1 page table base address. It has no side effects and
106        // is always accessible at EL1 (kernel mode).
107        unsafe {
108            core::arch::asm!("mrs {}, TTBR0_EL1", out(reg) ttbr0);
109        }
110        ttbr0 as usize
111    }
112
113    #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
114    {
115        // SATP holds the page table base
116        let satp: usize;
117        // SAFETY: Reading the SATP CSR is a read-only operation that returns the
118        // page table configuration (mode + ASID + PPN). It has no side effects and
119        // is always accessible in S-mode (supervisor/kernel mode).
120        unsafe {
121            core::arch::asm!("csrr {}, satp", out(reg) satp);
122        }
123        // Extract PPN field (bits 43:0 on RV64)
124        (satp & 0xFFF_FFFFFFFF) << 12
125    }
126}
127
128/// Page size options
129#[repr(usize)]
130#[derive(Debug, Clone, Copy, PartialEq, Eq)]
131pub enum PageSize {
132    /// 4 KiB pages
133    Small = 4096,
134    /// 2 MiB pages (x86_64) / 2 MiB (AArch64)
135    Large = 2 * 1024 * 1024,
136    /// 1 GiB pages (x86_64) / 1 GiB (AArch64)
137    Huge = 1024 * 1024 * 1024,
138}
139
140/// Page flags
141#[derive(Debug, Clone, Copy)]
142pub struct PageFlags(u64);
143
144impl PageFlags {
145    pub const PRESENT: Self = Self(1 << 0);
146    pub const WRITABLE: Self = Self(1 << 1);
147    pub const USER: Self = Self(1 << 2);
148    pub const WRITE_THROUGH: Self = Self(1 << 3);
149    pub const NO_CACHE: Self = Self(1 << 4);
150    pub const ACCESSED: Self = Self(1 << 5);
151    pub const DIRTY: Self = Self(1 << 6);
152    pub const HUGE: Self = Self(1 << 7);
153    pub const GLOBAL: Self = Self(1 << 8);
154    pub const NO_EXECUTE: Self = Self(1 << 63);
155
156    // Alias for NO_EXECUTE
157    pub const EXECUTABLE: Self = Self(0); // No NO_EXECUTE bit set
158
159    pub fn contains(&self, other: Self) -> bool {
160        (self.0 & other.0) == other.0
161    }
162}
163
164impl core::ops::BitOr for PageFlags {
165    type Output = Self;
166
167    fn bitor(self, rhs: Self) -> Self::Output {
168        Self(self.0 | rhs.0)
169    }
170}
171
172impl core::ops::BitOrAssign for PageFlags {
173    fn bitor_assign(&mut self, rhs: Self) {
174        self.0 |= rhs.0;
175    }
176}
177
178/// Memory region from bootloader/firmware
179#[derive(Debug, Clone, Copy)]
180pub struct MemoryRegion {
181    pub start: u64,
182    pub size: u64,
183    pub usable: bool,
184}
185
186/// Initialize the memory management subsystem
187#[allow(unused_variables, unused_assignments)]
188pub fn init(memory_map: &[MemoryRegion]) {
189    kprintln!("[MM] Initializing memory management...");
190
191    // Initialize frame allocator with available memory regions
192    kprintln!("[MM] Getting frame allocator lock...");
193
194    // Get frame allocator lock and initialize memory regions
195    {
196        let mut allocator = FRAME_ALLOCATOR.lock();
197
198        kprintln!("[MM] Frame allocator locked successfully");
199
200        #[allow(unused_assignments)]
201        let mut total_memory = 0u64;
202        #[allow(unused_assignments)]
203        let mut usable_memory = 0u64;
204
205        for (idx, region) in memory_map.iter().enumerate() {
206            kprintln!("[MM] Processing memory region");
207
208            total_memory += region.size;
209
210            if region.usable {
211                usable_memory += region.size;
212
213                let start_frame = FrameNumber::new(region.start / FRAME_SIZE as u64);
214                let frame_count = region.size as usize / FRAME_SIZE;
215
216                // Use region index as NUMA node for now
217                let numa_node = idx.min(7); // Max 8 NUMA nodes
218
219                kprintln!("[MM] Initializing NUMA node");
220
221                if let Err(_e) = allocator.init_numa_node(numa_node, start_frame, frame_count) {
222                    kprintln!("[MM] Warning: Failed to initialize memory region");
223                } else {
224                    kprintln!("[MM] Memory region initialized");
225                }
226            }
227        }
228
229        drop(allocator); // Release lock before getting stats
230
231        kprintln!("[MM] Memory initialization complete");
232    } // End of allocator block
233}
234
235/// Translate a kernel virtual address to its physical address by walking
236/// the boot page tables (CR3). Returns the physical address, or 0 on failure.
237///
238/// This is needed to find the kernel's actual physical extent, since UEFI
239/// may load the kernel at any physical address.
240#[cfg(target_arch = "x86_64")]
241fn translate_kernel_vaddr(vaddr: u64) -> u64 {
242    let cr3: u64;
243    // SAFETY: Reading CR3 is a read-only privileged operation with no side effects;
244    // always valid in ring 0.
245    unsafe {
246        core::arch::asm!("mov {}, cr3", out(reg) cr3, options(nomem, nostack));
247    }
248    let phys_offset = PHYS_MEM_OFFSET.load(core::sync::atomic::Ordering::Acquire);
249    let l4_phys = cr3 & !0xFFF;
250
251    // L4 index
252    let l4_idx = ((vaddr >> 39) & 0x1FF) as usize;
253    let l4_virt = (l4_phys + phys_offset) as *const u64;
254    // SAFETY: l4_virt points into the L4 page table via the identity-mapped
255    // physical memory region; index is bounded to 0..511.
256    let l4_entry = unsafe { core::ptr::read_volatile(l4_virt.add(l4_idx)) };
257    if l4_entry & 1 == 0 {
258        return 0;
259    }
260
261    // L3 index
262    let l3_phys = l4_entry & 0x000F_FFFF_FFFF_F000;
263    let l3_idx = ((vaddr >> 30) & 0x1FF) as usize;
264    let l3_virt = (l3_phys + phys_offset) as *const u64;
265    // SAFETY: l3_virt points into the L3 page table via identity-mapped region;
266    // index is bounded to 0..511.
267    let l3_entry = unsafe { core::ptr::read_volatile(l3_virt.add(l3_idx)) };
268    if l3_entry & 1 == 0 {
269        return 0;
270    }
271    if l3_entry & (1 << 7) != 0 {
272        // 1GB huge page
273        return (l3_entry & 0x000F_FFFF_C000_0000) | (vaddr & 0x3FFF_FFFF);
274    }
275
276    // L2 index
277    let l2_phys = l3_entry & 0x000F_FFFF_FFFF_F000;
278    let l2_idx = ((vaddr >> 21) & 0x1FF) as usize;
279    let l2_virt = (l2_phys + phys_offset) as *const u64;
280    // SAFETY: l2_virt points into the L2 page table via identity-mapped region;
281    // index is bounded to 0..511.
282    let l2_entry = unsafe { core::ptr::read_volatile(l2_virt.add(l2_idx)) };
283    if l2_entry & 1 == 0 {
284        return 0;
285    }
286    if l2_entry & (1 << 7) != 0 {
287        // 2MB huge page
288        return (l2_entry & 0x000F_FFFF_FFE0_0000) | (vaddr & 0x1F_FFFF);
289    }
290
291    // L1 index
292    let l1_phys = l2_entry & 0x000F_FFFF_FFFF_F000;
293    let l1_idx = ((vaddr >> 12) & 0x1FF) as usize;
294    let l1_virt = (l1_phys + phys_offset) as *const u64;
295    // SAFETY: l1_virt points into the L1 page table via identity-mapped region;
296    // index is bounded to 0..511.
297    let l1_entry = unsafe { core::ptr::read_volatile(l1_virt.add(l1_idx)) };
298    if l1_entry & 1 == 0 {
299        return 0;
300    }
301
302    (l1_entry & 0x000F_FFFF_FFFF_F000) | (vaddr & 0xFFF)
303}
304
305/// Initialize with default memory map for testing
306pub fn init_default() {
307    kprintln!("[MM] Using default memory map for initialization");
308
309    // Architecture-specific default memory maps
310    #[cfg(target_arch = "x86_64")]
311    let default_map = {
312        // Determine the kernel's physical end address by translating
313        // __kernel_end through the boot page tables. UEFI may load the
314        // kernel at any physical address, so we cannot hard-code the
315        // frame allocator start.
316        extern "C" {
317            static __kernel_end: u8;
318        }
319        // SAFETY: __kernel_end is a linker-defined symbol; we only take its address,
320        // not dereference it.
321        let kernel_end_virt = unsafe { &__kernel_end as *const u8 as u64 };
322        let kernel_end_phys = translate_kernel_vaddr(kernel_end_virt);
323
324        // If __kernel_end translation failed, try translating the last byte
325        // of HEAP_MEMORY instead. The heap is the largest object in BSS
326        // (~512MB) and its end address is a tight lower bound on the kernel's
327        // physical extent. __kernel_end may fail translation if the
328        // bootloader's page table walk hits an unmapped intermediate entry
329        // for the very last page of the BSS.
330        let kernel_end_phys = if kernel_end_phys != 0 {
331            kernel_end_phys
332        } else {
333            let heap_end_virt = heap::heap_end_vaddr();
334            let heap_end_phys = translate_kernel_vaddr(heap_end_virt);
335            if heap_end_phys != 0 {
336                kprintln!(
337                    "[MM] __kernel_end translation failed, using heap end at phys {:#x}",
338                    heap_end_phys
339                );
340            }
341            heap_end_phys
342        };
343
344        // Round up to next 2MB boundary for safety margin, then add 2MB
345        let alloc_start = if kernel_end_phys != 0 {
346            let aligned = (kernel_end_phys + 0x1FFFFF) & !0x1FFFFF; // Round up to 2MB
347            let start = aligned + 0x200000; // 2MB safety margin
348            kprintln!(
349                "[MM] Kernel ends at phys {:#x}, allocator starts at {:#x} ({} MB)",
350                kernel_end_phys,
351                start,
352                start / (1024 * 1024)
353            );
354            start
355        } else {
356            // Fallback: compute a safe start from HEAP_SIZE. The kernel's
357            // physical footprint is dominated by the BSS (which contains
358            // HEAP_MEMORY). Add 64MB for code, rodata, data, stacks, and
359            // the bootloader's own allocations.
360            let safe_start = (heap::HEAP_SIZE as u64 + 64 * 1024 * 1024 + 0x1FFFFF) & !0x1FFFFF;
361            kprintln!(
362                "[MM] WARNING: Could not find kernel physical end, using {} MB start (heap={}MB + \
363                 64MB margin)",
364                safe_start / (1024 * 1024),
365                heap::HEAP_SIZE / (1024 * 1024)
366            );
367            safe_start
368        };
369
370        // Total usable RAM. MUST match the QEMU `-m` flag exactly.
371        // Over-estimating causes the buddy allocator to hand out frames
372        // from non-existent physical addresses, corrupting kernel heap
373        // state (manifests as null-ptr deref in linked_list_allocator
374        // after ~200 fork+exec cycles exhaust the bitmap region).
375        // Current QEMU config: -m 2048M.
376        let ram_end: u64 = 2048 * 1024 * 1024;
377        let size = ram_end.saturating_sub(alloc_start);
378
379        [MemoryRegion {
380            start: alloc_start,
381            size,
382            usable: true,
383        }]
384    };
385
386    #[cfg(target_arch = "aarch64")]
387    let default_map = [MemoryRegion {
388        start: 0x48000000, // 1.125GB (after kernel at 0x40080000)
389        size: 134217728,   // 128MB pre-calculated
390        usable: true,
391    }];
392
393    #[cfg(target_arch = "riscv64")]
394    let default_map = [MemoryRegion {
395        // QEMU virt machine: RAM at 0x80000000, kernel loaded at 0x80200000.
396        // __kernel_end is at ~0x80D2C000 (includes BSS + 128KB stack).
397        // Start frame allocation well after the kernel image to avoid
398        // corrupting kernel data. 0x80E00000 provides ~1MB safety margin.
399        // End of RAM at 0x88000000 (128MB), giving ~114MB for frames.
400        start: 0x80E00000,
401        size: 0x88000000 - 0x80E00000, // ~114MB until end of 128MB RAM
402        usable: true,
403    }];
404
405    kprintln!("[MM] Calling init with default memory map");
406
407    init(&default_map);
408
409    kprintln!("[MM] init returned successfully");
410
411    // Initialize heap allocator after frame allocator is ready
412    init_heap().expect("Heap initialization failed");
413}
414
415/// Walk the boot page tables (CR3) and mark all intermediate table frames
416/// as reserved in the frame allocator. This prevents the allocator from
417/// handing out frames that the bootloader used for page tables, which would
418/// corrupt kernel address space mappings when those frames are overwritten.
419///
420/// Reserves page table frames for both:
421/// - Kernel-space L4 entries (256..512): kernel code, heap, stacks, MMIO
422/// - Physical memory mapping L4 entry (lower half): used by phys_to_virt_addr()
423///
424/// Must be called AFTER init_default() (frame allocator is ready).
425#[cfg(target_arch = "x86_64")]
426pub fn reserve_boot_page_table_frames() {
427    let cr3: u64;
428    // SAFETY: Reading CR3 is a read-only privileged operation with no side effects;
429    // always valid in ring 0.
430    unsafe {
431        core::arch::asm!("mov {}, cr3", out(reg) cr3, options(nomem, nostack));
432    }
433    let l4_phys = cr3 & !0xFFF; // Mask flags
434
435    let phys_offset = PHYS_MEM_OFFSET.load(core::sync::atomic::Ordering::Acquire);
436    let mut reserved_count = 0u32;
437
438    // Reserve the L4 table frame itself
439    let l4_frame = FrameNumber::new(l4_phys / FRAME_SIZE as u64);
440    let _ = FRAME_ALLOCATOR.lock().mark_frame_used(l4_frame);
441    reserved_count += 1;
442
443    let l4_virt = (l4_phys + phys_offset) as *const u64;
444
445    // Helper: walk one L4 entry's subtree (L3 → L2 → L1) and reserve all
446    // intermediate page table frames.
447    let mut reserve_l4_subtree = |l4_idx: usize| {
448        // SAFETY: l4_virt points into the L4 page table via identity-mapped region;
449        // l4_idx is bounded by callers to valid L4 indices.
450        let l4_entry = unsafe { core::ptr::read_volatile(l4_virt.add(l4_idx)) };
451        if l4_entry & 1 == 0 {
452            return; // Not present
453        }
454        let l3_phys = l4_entry & 0x000F_FFFF_FFFF_F000;
455        let l3_frame = FrameNumber::new(l3_phys / FRAME_SIZE as u64);
456        let _ = FRAME_ALLOCATOR.lock().mark_frame_used(l3_frame);
457        reserved_count += 1;
458
459        // Walk L3 entries
460        let l3_virt = (l3_phys + phys_offset) as *const u64;
461        for l3_idx in 0..512 {
462            // SAFETY: l3_virt points into the L3 page table via identity-mapped region;
463            // l3_idx is bounded to 0..511.
464            let l3_entry = unsafe { core::ptr::read_volatile(l3_virt.add(l3_idx)) };
465            if l3_entry & 1 == 0 {
466                continue;
467            }
468            if l3_entry & (1 << 7) != 0 {
469                continue; // 1GB huge page, no L2 table
470            }
471            let l2_phys = l3_entry & 0x000F_FFFF_FFFF_F000;
472            let l2_frame = FrameNumber::new(l2_phys / FRAME_SIZE as u64);
473            let _ = FRAME_ALLOCATOR.lock().mark_frame_used(l2_frame);
474            reserved_count += 1;
475
476            // Walk L2 entries
477            let l2_virt = (l2_phys + phys_offset) as *const u64;
478            for l2_idx in 0..512 {
479                // SAFETY: l2_virt points into the L2 page table via identity-mapped region;
480                // l2_idx is bounded to 0..511.
481                let l2_entry = unsafe { core::ptr::read_volatile(l2_virt.add(l2_idx)) };
482                if l2_entry & 1 == 0 {
483                    continue;
484                }
485                if l2_entry & (1 << 7) != 0 {
486                    continue; // 2MB huge page, no L1 table
487                }
488                let l1_phys = l2_entry & 0x000F_FFFF_FFFF_F000;
489                let l1_frame = FrameNumber::new(l1_phys / FRAME_SIZE as u64);
490                let _ = FRAME_ALLOCATOR.lock().mark_frame_used(l1_frame);
491                reserved_count += 1;
492            }
493        }
494    };
495
496    // Reserve kernel-half page table frames (L4 entries 256..512)
497    for l4_idx in 256..512 {
498        reserve_l4_subtree(l4_idx);
499    }
500
501    // Reserve physical memory mapping page table frames.
502    // The bootloader maps all physical memory at PHYS_MEM_OFFSET, which
503    // occupies one or more L4 entries in the lower half. Without reserving
504    // these, the frame allocator can hand out page table frames used by
505    // the physical memory mapping, corrupting it when those frames are
506    // overwritten (e.g., during fork's clone_from deep copy).
507    if phys_offset != 0 {
508        let phys_l4_idx = ((phys_offset >> 39) & 0x1FF) as usize;
509        if phys_l4_idx < 256 {
510            reserve_l4_subtree(phys_l4_idx);
511        }
512    }
513
514    kprintln!(
515        "[MM] Reserved {} boot page table frames from frame allocator",
516        reserved_count
517    );
518}
519
520/// Translate virtual address to physical address
521pub fn translate_address(
522    vas: &VirtualAddressSpace,
523    vaddr: VirtualAddress,
524) -> Option<PhysicalAddress> {
525    // Find the mapping for this virtual address
526    #[cfg(feature = "alloc")]
527    {
528        if let Some(mapping) = vas.find_mapping(vaddr) {
529            // Calculate offset within the mapping
530            let offset = vaddr.0 - mapping.start.0;
531            let page_index = (offset / PAGE_SIZE as u64) as usize;
532
533            // Check if we have physical frames allocated
534            if page_index < mapping.physical_frames.len() {
535                let frame = mapping.physical_frames[page_index];
536                let page_offset = offset % PAGE_SIZE as u64;
537                return Some(PhysicalAddress::new(frame.as_addr().as_u64() + page_offset));
538            }
539        }
540    }
541
542    None
543}
544
545/// Free a physical frame
546pub fn free_frame(frame: PhysicalAddress) {
547    let frame_num = FrameNumber::new(frame.as_u64() / FRAME_SIZE as u64);
548    if let Err(_e) = FRAME_ALLOCATOR.lock().free_frames(frame_num, 1) {
549        kprintln!(
550            "[MM] Warning: Failed to free frame at {:#x}: {:?}",
551            frame.as_u64(),
552            _e
553        );
554    }
555}
556
557/// Placeholder types for IPC integration
558pub type PagePermissions = PageFlags;
559pub type PhysicalPage = FrameNumber;
560
561/// Allocate physical pages
562pub fn allocate_pages(
563    count: usize,
564    numa_node: Option<usize>,
565) -> Result<Vec<PhysicalPage>, FrameAllocatorError> {
566    let frame = FRAME_ALLOCATOR.lock().allocate_frames(count, numa_node)?;
567
568    // Return a vector of consecutive frame numbers
569    let mut pages = Vec::with_capacity(count);
570    for i in 0..count {
571        pages.push(FrameNumber::new(frame.as_u64() + i as u64));
572    }
573
574    Ok(pages)
575}
576
577/// Free physical pages
578pub fn free_pages(pages: &[PhysicalPage]) -> Result<(), FrameAllocatorError> {
579    if pages.is_empty() {
580        return Ok(());
581    }
582
583    // Assume pages are contiguous for now
584    let first_frame = pages[0];
585    let count = pages.len();
586
587    FRAME_ALLOCATOR.lock().free_frames(first_frame, count)
588}
589
590/// Page cache frame counter: tracks the number of frames currently held in
591/// the page cache (file-backed pages kept in memory for faster re-access).
592static PAGE_CACHE_FRAMES: AtomicU64 = AtomicU64::new(0);
593
594/// Increment the page cache frame count when a page is added to the cache.
595pub fn page_cache_add(count: u64) {
596    PAGE_CACHE_FRAMES.fetch_add(count, Ordering::Relaxed);
597}
598
599/// Decrement the page cache frame count when a page is evicted from the cache.
600pub fn page_cache_remove(count: u64) {
601    PAGE_CACHE_FRAMES.fetch_sub(count, Ordering::Relaxed);
602}
603
604/// Memory statistics structure
605pub struct MemoryStats {
606    pub total_frames: usize,
607    pub free_frames: usize,
608    pub cached_frames: usize,
609}
610
611/// Get memory statistics
612pub fn get_memory_stats() -> MemoryStats {
613    let allocator = FRAME_ALLOCATOR.lock();
614    let stats = allocator.get_stats();
615
616    MemoryStats {
617        total_frames: stats.total_frames as usize,
618        free_frames: stats.free_frames as usize,
619        cached_frames: PAGE_CACHE_FRAMES.load(Ordering::Relaxed) as usize,
620    }
621}