1#![allow(dead_code)]
18
19#[cfg(feature = "alloc")]
20extern crate alloc;
21
22#[cfg(feature = "alloc")]
23use alloc::collections::BTreeMap;
24#[cfg(feature = "alloc")]
25use alloc::vec;
26#[cfg(feature = "alloc")]
27use alloc::vec::Vec;
28use core::sync::atomic::{AtomicU32, Ordering};
29
30use spin::Mutex;
31
32use crate::{
33 error::KernelError,
34 mm::{FrameNumber, PageFlags, FRAME_ALLOCATOR, PAGE_SIZE},
35};
36
37#[derive(Debug, Clone, Copy, PartialEq, Eq)]
43pub enum BackingType {
44 Anonymous,
46 FileBacked {
48 inode: u64,
50 offset: u64,
52 },
53}
54
55#[cfg(feature = "alloc")]
61pub struct LazyMapping {
62 pub start_vaddr: usize,
64 pub size: usize,
66 pub flags: PageFlags,
68 pub backing: BackingType,
70 faulted_in: Vec<bool>,
72}
73
74#[cfg(feature = "alloc")]
75impl LazyMapping {
76 pub fn new(start_vaddr: usize, size: usize, flags: PageFlags, backing: BackingType) -> Self {
78 let page_count = size.div_ceil(PAGE_SIZE);
79 Self {
80 start_vaddr,
81 size,
82 flags,
83 backing,
84 faulted_in: vec![false; page_count],
85 }
86 }
87
88 pub fn contains(&self, vaddr: usize) -> bool {
90 vaddr >= self.start_vaddr && vaddr < self.start_vaddr + self.size
91 }
92
93 fn page_index(&self, vaddr: usize) -> usize {
95 (vaddr - self.start_vaddr) / PAGE_SIZE
96 }
97}
98
99pub struct CowEntry {
105 pub frame: FrameNumber,
107 pub ref_count: AtomicU32,
109}
110
111#[cfg(feature = "alloc")]
113pub struct CowTable {
114 pub entries: BTreeMap<usize, CowEntry>,
116}
117
118#[cfg(feature = "alloc")]
119impl Default for CowTable {
120 fn default() -> Self {
121 Self::new()
122 }
123}
124
125#[cfg(feature = "alloc")]
126impl CowTable {
127 pub fn new() -> Self {
129 Self {
130 entries: BTreeMap::new(),
131 }
132 }
133
134 pub fn mark_cow(&mut self, vaddr: usize, frame: FrameNumber) {
136 self.entries.insert(
137 vaddr,
138 CowEntry {
139 frame,
140 ref_count: AtomicU32::new(2),
141 },
142 );
143 }
144
145 pub fn release(&self, vaddr: usize) -> bool {
148 if let Some(entry) = self.entries.get(&vaddr) {
149 let prev = entry.ref_count.fetch_sub(1, Ordering::AcqRel);
150 prev == 1
151 } else {
152 false
153 }
154 }
155
156 pub fn is_cow(&self, vaddr: usize) -> bool {
158 self.entries.contains_key(&vaddr)
159 }
160}
161
162#[cfg(feature = "alloc")]
172pub struct DemandPagingManager {
173 lazy_mappings: BTreeMap<usize, LazyMapping>,
175 pub cow_table: CowTable,
177}
178
179#[cfg(feature = "alloc")]
180impl Default for DemandPagingManager {
181 fn default() -> Self {
182 Self::new()
183 }
184}
185
186#[cfg(feature = "alloc")]
187impl DemandPagingManager {
188 pub fn new() -> Self {
190 Self {
191 lazy_mappings: BTreeMap::new(),
192 cow_table: CowTable::new(),
193 }
194 }
195
196 pub fn register_lazy(
198 &mut self,
199 start_vaddr: usize,
200 size: usize,
201 flags: PageFlags,
202 backing: BackingType,
203 ) {
204 let mapping = LazyMapping::new(start_vaddr, size, flags, backing);
205 self.lazy_mappings.insert(start_vaddr, mapping);
206 }
207
208 pub fn try_demand_page(
215 &mut self,
216 vaddr: usize,
217 ) -> Result<(FrameNumber, PageFlags), KernelError> {
218 let mapping = self.lazy_mappings.values_mut().find(|m| m.contains(vaddr));
220
221 let mapping = match mapping {
222 Some(m) => m,
223 None => {
224 return Err(KernelError::UnmappedMemory { addr: vaddr });
225 }
226 };
227
228 let idx = mapping.page_index(vaddr);
229 if idx >= mapping.faulted_in.len() {
230 return Err(KernelError::InvalidAddress { addr: vaddr });
231 }
232 if mapping.faulted_in[idx] {
233 return Err(KernelError::InvalidAddress { addr: vaddr });
235 }
236
237 let frame = FRAME_ALLOCATOR
239 .lock()
240 .allocate_frames(1, None)
241 .map_err(|_| KernelError::OutOfMemory {
242 requested: PAGE_SIZE,
243 available: 0,
244 })?;
245
246 if mapping.backing == BackingType::Anonymous {
248 let virt = crate::mm::phys_to_virt_addr(frame.as_u64() * PAGE_SIZE as u64) as *mut u8;
249 unsafe {
251 core::ptr::write_bytes(virt, 0, PAGE_SIZE);
252 }
253 }
254
255 mapping.faulted_in[idx] = true;
256 let flags = mapping.flags;
257
258 Ok((frame, flags))
259 }
260
261 pub fn handle_cow_fault(&self, vaddr: usize) -> Result<FrameNumber, KernelError> {
266 let page_addr = vaddr & !(PAGE_SIZE - 1);
267
268 let entry = self
269 .cow_table
270 .entries
271 .get(&page_addr)
272 .ok_or(KernelError::InvalidAddress { addr: vaddr })?;
273
274 let old_frame = entry.frame;
275
276 let new_frame = FRAME_ALLOCATOR
278 .lock()
279 .allocate_frames(1, None)
280 .map_err(|_| KernelError::OutOfMemory {
281 requested: PAGE_SIZE,
282 available: 0,
283 })?;
284
285 let old_virt = crate::mm::phys_to_virt_addr(old_frame.as_u64() * PAGE_SIZE as u64);
287 let new_virt = crate::mm::phys_to_virt_addr(new_frame.as_u64() * PAGE_SIZE as u64);
288 unsafe {
291 core::ptr::copy_nonoverlapping(old_virt as *const u8, new_virt as *mut u8, PAGE_SIZE);
292 }
293
294 let _last_ref = self.cow_table.release(page_addr);
296
297 Ok(new_frame)
298 }
299
300 pub fn unregister_lazy(&mut self, start_vaddr: usize) {
302 self.lazy_mappings.remove(&start_vaddr);
303 }
304
305 pub fn mark_cow_range(&mut self, base: usize, pages: &[(usize, FrameNumber)]) {
307 for &(vaddr, frame) in pages {
308 let _ = base; self.cow_table.mark_cow(vaddr, frame);
310 }
311 }
312
313 pub fn add_cow_entry(&mut self, vaddr: usize, frame: FrameNumber) {
315 self.cow_table.mark_cow(vaddr, frame);
316 }
317}
318
319#[cfg(feature = "alloc")]
324static DEMAND_PAGING: Mutex<Option<DemandPagingManager>> = Mutex::new(None);
325
326#[cfg(feature = "alloc")]
328pub fn init() {
329 *DEMAND_PAGING.lock() = Some(DemandPagingManager::new());
330 crate::println!("[DEMAND_PAGING] Manager initialized");
331}
332
333#[cfg(feature = "alloc")]
335pub fn register_lazy(start_vaddr: usize, size: usize, flags: PageFlags, backing: BackingType) {
336 if let Some(ref mut mgr) = *DEMAND_PAGING.lock() {
337 mgr.register_lazy(start_vaddr, size, flags, backing);
338 }
339}
340
341#[cfg(feature = "alloc")]
345pub fn handle_page_fault(vaddr: usize) -> Result<(FrameNumber, PageFlags), KernelError> {
346 let mut guard = DEMAND_PAGING.lock();
347 let mgr = guard.as_mut().ok_or(KernelError::NotInitialized {
348 subsystem: "demand_paging",
349 })?;
350 mgr.try_demand_page(vaddr)
351}
352
353#[cfg(feature = "alloc")]
355pub fn with_manager_mut<R, F: FnOnce(&mut DemandPagingManager) -> R>(f: F) -> R {
356 let mut guard = DEMAND_PAGING.lock();
357 let mgr = guard.get_or_insert_with(DemandPagingManager::new);
358 f(mgr)
359}
360
361#[cfg(test)]
366mod tests {
367 use super::*;
368
369 #[test]
370 fn test_backing_type() {
371 let anon = BackingType::Anonymous;
372 let file = BackingType::FileBacked {
373 inode: 42,
374 offset: 0,
375 };
376 assert_eq!(anon, BackingType::Anonymous);
377 assert_ne!(anon, file);
378 }
379
380 #[cfg(feature = "alloc")]
381 #[test]
382 fn test_cow_table() {
383 let mut table = CowTable::new();
384 let frame = FrameNumber::new(100);
385 table.mark_cow(0x1000, frame);
386
387 assert!(table.is_cow(0x1000));
388 assert!(!table.is_cow(0x2000));
389
390 assert!(!table.release(0x1000));
392 assert!(table.release(0x1000));
394 }
395
396 #[cfg(feature = "alloc")]
397 #[test]
398 fn test_lazy_mapping_contains() {
399 let mapping = LazyMapping::new(
400 0x10000,
401 PAGE_SIZE * 4,
402 PageFlags::PRESENT | PageFlags::WRITABLE,
403 BackingType::Anonymous,
404 );
405
406 assert!(mapping.contains(0x10000));
407 assert!(mapping.contains(0x10000 + PAGE_SIZE * 3));
408 assert!(!mapping.contains(0x10000 + PAGE_SIZE * 4));
409 assert!(!mapping.contains(0x0));
410 }
411}