⚠️ VeridianOS Kernel Documentation - This is low-level kernel code. All functions are unsafe unless explicitly marked otherwise. no_std

veridian_kernel/mm/
demand_paging.rs

1//! Demand Paging and Copy-on-Write (COW) Manager
2//!
3//! Provides infrastructure for lazy page allocation and COW fork support.
4//!
5//! ## Demand Paging
6//! Pages can be registered as "lazy" mappings via [`register_lazy`]. When
7//! a page fault hits a lazy-mapped address, the manager allocates a physical
8//! frame and returns it (along with the appropriate flags) so the caller can
9//! install the mapping in the page table.
10//!
11//! ## Copy-on-Write
12//! [`CowTable`] tracks shared physical frames with reference counts. When a
13//! COW page is written, the fault handler calls
14//! [`DemandPagingManager::handle_cow_fault`] to allocate a private copy and
15//! decrement the shared reference count.
16
17#![allow(dead_code)]
18
19#[cfg(feature = "alloc")]
20extern crate alloc;
21
22#[cfg(feature = "alloc")]
23use alloc::collections::BTreeMap;
24#[cfg(feature = "alloc")]
25use alloc::vec;
26#[cfg(feature = "alloc")]
27use alloc::vec::Vec;
28use core::sync::atomic::{AtomicU32, Ordering};
29
30use spin::Mutex;
31
32use crate::{
33    error::KernelError,
34    mm::{FrameNumber, PageFlags, FRAME_ALLOCATOR, PAGE_SIZE},
35};
36
37// ===========================================================================
38// Lazy Mapping Types
39// ===========================================================================
40
41/// How a lazy page is backed when finally faulted in.
42#[derive(Debug, Clone, Copy, PartialEq, Eq)]
43pub enum BackingType {
44    /// Anonymous memory (zero-filled on first access).
45    Anonymous,
46    /// File-backed memory (load from inode + offset).
47    FileBacked {
48        /// Inode number of the backing file.
49        inode: u64,
50        /// Byte offset into the file for this mapping.
51        offset: u64,
52    },
53}
54
55/// A region of virtual address space registered for demand paging.
56///
57/// No physical frames are allocated when a lazy mapping is created.
58/// The first access triggers a page fault, which the demand paging
59/// manager resolves by allocating a frame and returning it.
60#[cfg(feature = "alloc")]
61pub struct LazyMapping {
62    /// Start virtual address (page-aligned).
63    pub start_vaddr: usize,
64    /// Size in bytes (multiple of PAGE_SIZE).
65    pub size: usize,
66    /// Page flags to apply when the page is finally mapped.
67    pub flags: PageFlags,
68    /// Backing type for this mapping.
69    pub backing: BackingType,
70    /// Per-page tracking: true if the page has been faulted in.
71    faulted_in: Vec<bool>,
72}
73
74#[cfg(feature = "alloc")]
75impl LazyMapping {
76    /// Create a new lazy mapping.
77    pub fn new(start_vaddr: usize, size: usize, flags: PageFlags, backing: BackingType) -> Self {
78        let page_count = size.div_ceil(PAGE_SIZE);
79        Self {
80            start_vaddr,
81            size,
82            flags,
83            backing,
84            faulted_in: vec![false; page_count],
85        }
86    }
87
88    /// Check whether a virtual address falls within this mapping.
89    pub fn contains(&self, vaddr: usize) -> bool {
90        vaddr >= self.start_vaddr && vaddr < self.start_vaddr + self.size
91    }
92
93    /// Page index for a given virtual address within this mapping.
94    fn page_index(&self, vaddr: usize) -> usize {
95        (vaddr - self.start_vaddr) / PAGE_SIZE
96    }
97}
98
99// ===========================================================================
100// Copy-on-Write Table
101// ===========================================================================
102
103/// A single COW-shared physical frame.
104pub struct CowEntry {
105    /// The shared physical frame.
106    pub frame: FrameNumber,
107    /// Number of address spaces sharing this frame.
108    pub ref_count: AtomicU32,
109}
110
111/// Table of COW-shared frames keyed by virtual page address.
112#[cfg(feature = "alloc")]
113pub struct CowTable {
114    /// Map from virtual page address to COW entry.
115    pub entries: BTreeMap<usize, CowEntry>,
116}
117
118#[cfg(feature = "alloc")]
119impl Default for CowTable {
120    fn default() -> Self {
121        Self::new()
122    }
123}
124
125#[cfg(feature = "alloc")]
126impl CowTable {
127    /// Create an empty COW table.
128    pub fn new() -> Self {
129        Self {
130            entries: BTreeMap::new(),
131        }
132    }
133
134    /// Register a frame as COW-shared with initial ref_count = 2.
135    pub fn mark_cow(&mut self, vaddr: usize, frame: FrameNumber) {
136        self.entries.insert(
137            vaddr,
138            CowEntry {
139                frame,
140                ref_count: AtomicU32::new(2),
141            },
142        );
143    }
144
145    /// Decrement ref count for a COW page; returns true if this was the
146    /// last reference (frame can be freed).
147    pub fn release(&self, vaddr: usize) -> bool {
148        if let Some(entry) = self.entries.get(&vaddr) {
149            let prev = entry.ref_count.fetch_sub(1, Ordering::AcqRel);
150            prev == 1
151        } else {
152            false
153        }
154    }
155
156    /// Check whether an address is COW-tracked.
157    pub fn is_cow(&self, vaddr: usize) -> bool {
158        self.entries.contains_key(&vaddr)
159    }
160}
161
162// ===========================================================================
163// Demand Paging Manager
164// ===========================================================================
165
166/// Manages lazy mappings and COW state.
167///
168/// The manager does NOT directly modify page tables. Instead, its methods
169/// return allocation results (frame number + flags) that the caller uses
170/// to install the actual mapping via the VAS / page table infrastructure.
171#[cfg(feature = "alloc")]
172pub struct DemandPagingManager {
173    /// Registered lazy mappings keyed by start address.
174    lazy_mappings: BTreeMap<usize, LazyMapping>,
175    /// COW-shared frame tracking.
176    pub cow_table: CowTable,
177}
178
179#[cfg(feature = "alloc")]
180impl Default for DemandPagingManager {
181    fn default() -> Self {
182        Self::new()
183    }
184}
185
186#[cfg(feature = "alloc")]
187impl DemandPagingManager {
188    /// Create a new demand paging manager.
189    pub fn new() -> Self {
190        Self {
191            lazy_mappings: BTreeMap::new(),
192            cow_table: CowTable::new(),
193        }
194    }
195
196    /// Register a lazy mapping. No physical memory is allocated.
197    pub fn register_lazy(
198        &mut self,
199        start_vaddr: usize,
200        size: usize,
201        flags: PageFlags,
202        backing: BackingType,
203    ) {
204        let mapping = LazyMapping::new(start_vaddr, size, flags, backing);
205        self.lazy_mappings.insert(start_vaddr, mapping);
206    }
207
208    /// Try to resolve a demand-page fault at `vaddr`.
209    ///
210    /// If the address falls within a registered lazy mapping that has not
211    /// yet been faulted in, allocates a physical frame and returns
212    /// `Ok((frame, flags))`. The caller is responsible for installing the
213    /// mapping in the page table.
214    pub fn try_demand_page(
215        &mut self,
216        vaddr: usize,
217    ) -> Result<(FrameNumber, PageFlags), KernelError> {
218        // Find which lazy mapping contains this address.
219        let mapping = self.lazy_mappings.values_mut().find(|m| m.contains(vaddr));
220
221        let mapping = match mapping {
222            Some(m) => m,
223            None => {
224                return Err(KernelError::UnmappedMemory { addr: vaddr });
225            }
226        };
227
228        let idx = mapping.page_index(vaddr);
229        if idx >= mapping.faulted_in.len() {
230            return Err(KernelError::InvalidAddress { addr: vaddr });
231        }
232        if mapping.faulted_in[idx] {
233            // Already faulted in -- not a lazy fault.
234            return Err(KernelError::InvalidAddress { addr: vaddr });
235        }
236
237        // Allocate a physical frame.
238        let frame = FRAME_ALLOCATOR
239            .lock()
240            .allocate_frames(1, None)
241            .map_err(|_| KernelError::OutOfMemory {
242                requested: PAGE_SIZE,
243                available: 0,
244            })?;
245
246        // Zero the frame for anonymous mappings.
247        if mapping.backing == BackingType::Anonymous {
248            let virt = crate::mm::phys_to_virt_addr(frame.as_u64() * PAGE_SIZE as u64) as *mut u8;
249            // SAFETY: frame is freshly allocated within the physical memory window.
250            unsafe {
251                core::ptr::write_bytes(virt, 0, PAGE_SIZE);
252            }
253        }
254
255        mapping.faulted_in[idx] = true;
256        let flags = mapping.flags;
257
258        Ok((frame, flags))
259    }
260
261    /// Handle a COW fault at `vaddr`.
262    ///
263    /// Allocates a new frame, copies the contents from the old shared frame,
264    /// decrements the COW ref count, and returns the new frame.
265    pub fn handle_cow_fault(&self, vaddr: usize) -> Result<FrameNumber, KernelError> {
266        let page_addr = vaddr & !(PAGE_SIZE - 1);
267
268        let entry = self
269            .cow_table
270            .entries
271            .get(&page_addr)
272            .ok_or(KernelError::InvalidAddress { addr: vaddr })?;
273
274        let old_frame = entry.frame;
275
276        // Allocate a private copy.
277        let new_frame = FRAME_ALLOCATOR
278            .lock()
279            .allocate_frames(1, None)
280            .map_err(|_| KernelError::OutOfMemory {
281                requested: PAGE_SIZE,
282                available: 0,
283            })?;
284
285        // Copy old frame contents to new frame.
286        let old_virt = crate::mm::phys_to_virt_addr(old_frame.as_u64() * PAGE_SIZE as u64);
287        let new_virt = crate::mm::phys_to_virt_addr(new_frame.as_u64() * PAGE_SIZE as u64);
288        // SAFETY: Both virtual addresses are within the kernel physical memory
289        // window, pointing to valid 4KB frames.
290        unsafe {
291            core::ptr::copy_nonoverlapping(old_virt as *const u8, new_virt as *mut u8, PAGE_SIZE);
292        }
293
294        // Decrement ref count on the old shared frame.
295        let _last_ref = self.cow_table.release(page_addr);
296
297        Ok(new_frame)
298    }
299
300    /// Remove a lazy mapping.
301    pub fn unregister_lazy(&mut self, start_vaddr: usize) {
302        self.lazy_mappings.remove(&start_vaddr);
303    }
304
305    /// Mark a range of pages as COW-shared.
306    pub fn mark_cow_range(&mut self, base: usize, pages: &[(usize, FrameNumber)]) {
307        for &(vaddr, frame) in pages {
308            let _ = base; // base provided for future use (relative offsets)
309            self.cow_table.mark_cow(vaddr, frame);
310        }
311    }
312
313    /// Add a single COW entry (used by cow_fork).
314    pub fn add_cow_entry(&mut self, vaddr: usize, frame: FrameNumber) {
315        self.cow_table.mark_cow(vaddr, frame);
316    }
317}
318
319// ===========================================================================
320// Global Instance
321// ===========================================================================
322
323#[cfg(feature = "alloc")]
324static DEMAND_PAGING: Mutex<Option<DemandPagingManager>> = Mutex::new(None);
325
326/// Initialize the global demand paging manager.
327#[cfg(feature = "alloc")]
328pub fn init() {
329    *DEMAND_PAGING.lock() = Some(DemandPagingManager::new());
330    crate::println!("[DEMAND_PAGING] Manager initialized");
331}
332
333/// Register a lazy mapping via the global manager.
334#[cfg(feature = "alloc")]
335pub fn register_lazy(start_vaddr: usize, size: usize, flags: PageFlags, backing: BackingType) {
336    if let Some(ref mut mgr) = *DEMAND_PAGING.lock() {
337        mgr.register_lazy(start_vaddr, size, flags, backing);
338    }
339}
340
341/// Try to resolve a page fault via demand paging.
342///
343/// Returns `Ok((frame, flags))` if the fault was resolved.
344#[cfg(feature = "alloc")]
345pub fn handle_page_fault(vaddr: usize) -> Result<(FrameNumber, PageFlags), KernelError> {
346    let mut guard = DEMAND_PAGING.lock();
347    let mgr = guard.as_mut().ok_or(KernelError::NotInitialized {
348        subsystem: "demand_paging",
349    })?;
350    mgr.try_demand_page(vaddr)
351}
352
353/// Access the global demand paging manager (mutable).
354#[cfg(feature = "alloc")]
355pub fn with_manager_mut<R, F: FnOnce(&mut DemandPagingManager) -> R>(f: F) -> R {
356    let mut guard = DEMAND_PAGING.lock();
357    let mgr = guard.get_or_insert_with(DemandPagingManager::new);
358    f(mgr)
359}
360
361// ===========================================================================
362// Tests
363// ===========================================================================
364
365#[cfg(test)]
366mod tests {
367    use super::*;
368
369    #[test]
370    fn test_backing_type() {
371        let anon = BackingType::Anonymous;
372        let file = BackingType::FileBacked {
373            inode: 42,
374            offset: 0,
375        };
376        assert_eq!(anon, BackingType::Anonymous);
377        assert_ne!(anon, file);
378    }
379
380    #[cfg(feature = "alloc")]
381    #[test]
382    fn test_cow_table() {
383        let mut table = CowTable::new();
384        let frame = FrameNumber::new(100);
385        table.mark_cow(0x1000, frame);
386
387        assert!(table.is_cow(0x1000));
388        assert!(!table.is_cow(0x2000));
389
390        // First release: ref goes from 2 -> 1, not last
391        assert!(!table.release(0x1000));
392        // Second release: ref goes from 1 -> 0, last ref
393        assert!(table.release(0x1000));
394    }
395
396    #[cfg(feature = "alloc")]
397    #[test]
398    fn test_lazy_mapping_contains() {
399        let mapping = LazyMapping::new(
400            0x10000,
401            PAGE_SIZE * 4,
402            PageFlags::PRESENT | PageFlags::WRITABLE,
403            BackingType::Anonymous,
404        );
405
406        assert!(mapping.contains(0x10000));
407        assert!(mapping.contains(0x10000 + PAGE_SIZE * 3));
408        assert!(!mapping.contains(0x10000 + PAGE_SIZE * 4));
409        assert!(!mapping.contains(0x0));
410    }
411}