⚠️ VeridianOS Kernel Documentation - This is low-level kernel code. All functions are unsafe unless explicitly marked otherwise. no_std

veridian_kernel/process/
memory.rs

1//! Process memory management
2//!
3//! This module handles the integration between processes and the memory
4//! management subsystem, including virtual address space management and memory
5//! mapping.
6
7#[cfg(feature = "alloc")]
8extern crate alloc;
9
10#[cfg(feature = "alloc")]
11use alloc::vec::Vec;
12
13use crate::{
14    error::KernelError,
15    mm::{PageFlags, PhysicalAddress, VirtualAddress, PAGE_SIZE},
16};
17
18/// Memory layout constants for user processes
19pub mod layout {
20    /// User space start
21    pub const USER_SPACE_START: usize = 0x0000_0000_0001_0000;
22
23    /// User space end
24    pub const USER_SPACE_END: usize = 0x0000_7FFF_FFFF_0000;
25
26    /// Default code segment start
27    pub const CODE_START: usize = 0x0000_0000_0040_0000;
28
29    /// Default data segment start
30    pub const DATA_START: usize = 0x0000_0000_0080_0000;
31
32    /// Default heap start
33    pub const HEAP_START: usize = 0x0000_0000_1000_0000;
34
35    /// Maximum heap size (8GB) -- supports rustc self-compilation (4-8GB peak)
36    pub const MAX_HEAP_SIZE: usize = 8 * 1024 * 1024 * 1024;
37
38    /// Stack end address (grows down from here)
39    pub const STACK_END: usize = 0x0000_7FFF_0000_0000;
40
41    /// Default stack size (8MB)
42    pub const DEFAULT_STACK_SIZE: usize = 8 * 1024 * 1024;
43}
44
45/// Process memory region types
46#[derive(Debug, Clone, Copy, PartialEq, Eq)]
47pub enum MemoryRegionType {
48    /// Code segment (executable)
49    Code,
50    /// Data segment (read/write)
51    Data,
52    /// Read-only data
53    Rodata,
54    /// Stack region
55    Stack,
56    /// Heap region
57    Heap,
58    /// Memory-mapped file
59    MappedFile,
60    /// Shared memory
61    Shared,
62    /// Device memory
63    Device,
64}
65
66/// Memory region in a process's address space
67#[derive(Debug)]
68pub struct MemoryRegion {
69    /// Starting virtual address
70    pub start: VirtualAddress,
71    /// Ending virtual address (exclusive)
72    pub end: VirtualAddress,
73    /// Region type
74    pub region_type: MemoryRegionType,
75    /// Access permissions
76    pub flags: PageFlags,
77    /// Physical pages backing this region (if any)
78    #[cfg(feature = "alloc")]
79    pub physical_pages: Option<Vec<PhysicalAddress>>,
80    /// File mapping info (if mapped file)
81    pub file_mapping: Option<FileMapping>,
82}
83
84/// File mapping information
85#[derive(Debug)]
86pub struct FileMapping {
87    /// File descriptor
88    pub fd: u32,
89    /// Offset in file
90    pub offset: u64,
91    /// Mapping flags
92    pub flags: u32,
93}
94
95impl MemoryRegion {
96    /// Create a new memory region
97    pub fn new(
98        start: VirtualAddress,
99        size: usize,
100        region_type: MemoryRegionType,
101        flags: PageFlags,
102    ) -> Self {
103        Self {
104            start,
105            end: VirtualAddress::new(start.as_u64() + size as u64),
106            region_type,
107            flags,
108            physical_pages: None,
109            file_mapping: None,
110        }
111    }
112
113    /// Get region size
114    pub fn size(&self) -> usize {
115        self.end.as_usize() - self.start.as_usize()
116    }
117
118    /// Check if address is within this region
119    pub fn contains(&self, addr: VirtualAddress) -> bool {
120        addr >= self.start && addr < self.end
121    }
122
123    /// Check if region overlaps with another
124    pub fn overlaps(&self, other: &MemoryRegion) -> bool {
125        self.start < other.end && other.start < self.end
126    }
127}
128
129/// Process memory operations
130pub trait ProcessMemory {
131    /// Allocate memory in process address space
132    fn allocate(&mut self, size: usize, flags: PageFlags) -> Result<VirtualAddress, KernelError>;
133
134    /// Free memory in process address space
135    fn free(&mut self, addr: VirtualAddress, size: usize) -> Result<(), KernelError>;
136
137    /// Map physical memory into process address space
138    fn map_physical(
139        &mut self,
140        phys: PhysicalAddress,
141        virt: VirtualAddress,
142        size: usize,
143        flags: PageFlags,
144    ) -> Result<(), KernelError>;
145
146    /// Unmap memory from process address space
147    fn unmap(&mut self, virt: VirtualAddress, size: usize) -> Result<(), KernelError>;
148
149    /// Change memory protection
150    fn protect(
151        &mut self,
152        addr: VirtualAddress,
153        size: usize,
154        flags: PageFlags,
155    ) -> Result<(), KernelError>;
156
157    /// Grow the heap
158    fn grow_heap(&mut self, increment: usize) -> Result<VirtualAddress, KernelError>;
159
160    /// Grow the stack
161    fn grow_stack(&mut self, increment: usize) -> Result<(), KernelError>;
162}
163
164/// Stack management for threads
165pub struct ThreadStack {
166    /// Stack bottom (lowest address)
167    pub bottom: VirtualAddress,
168    /// Stack top (highest address)
169    pub top: VirtualAddress,
170    /// Current stack pointer
171    pub sp: VirtualAddress,
172    /// Guard page size
173    pub guard_size: usize,
174}
175
176impl ThreadStack {
177    /// Create a new thread stack
178    pub fn new(size: usize) -> Result<Self, KernelError> {
179        if size < PAGE_SIZE * 2 {
180            return Err(KernelError::InvalidArgument {
181                name: "stack size",
182                value: "stack too small (minimum 2 pages)",
183            });
184        }
185
186        // Allocate virtual address range for stack
187        let bottom = VirtualAddress::new((layout::STACK_END - size) as u64);
188        let top = VirtualAddress::new(layout::STACK_END as u64);
189
190        Ok(Self {
191            bottom,
192            top,
193            sp: top,
194            guard_size: PAGE_SIZE,
195        })
196    }
197
198    /// Get usable stack size (excluding guard page)
199    pub fn usable_size(&self) -> usize {
200        self.top.as_usize() - self.bottom.as_usize() - self.guard_size
201    }
202
203    /// Check if address is within stack
204    pub fn contains(&self, addr: VirtualAddress) -> bool {
205        addr >= self.bottom && addr <= self.top
206    }
207
208    /// Check if address is in guard page
209    pub fn in_guard_page(&self, addr: VirtualAddress) -> bool {
210        addr >= self.bottom
211            && addr < VirtualAddress::new(self.bottom.as_u64() + self.guard_size as u64)
212    }
213}
214
215/// Heap management for processes
216pub struct ProcessHeap {
217    /// Current heap break
218    pub brk: VirtualAddress,
219    /// Heap start
220    pub start: VirtualAddress,
221    /// Maximum heap size
222    pub max_size: usize,
223}
224
225impl Default for ProcessHeap {
226    fn default() -> Self {
227        Self {
228            brk: VirtualAddress::new(layout::HEAP_START as u64),
229            start: VirtualAddress::new(layout::HEAP_START as u64),
230            max_size: layout::MAX_HEAP_SIZE,
231        }
232    }
233}
234
235impl ProcessHeap {
236    /// Create a new process heap
237    pub fn new() -> Self {
238        Self::default()
239    }
240
241    /// Get current heap size
242    pub fn size(&self) -> usize {
243        self.brk.as_usize() - self.start.as_usize()
244    }
245
246    /// Set heap break (brk syscall)
247    pub fn set_brk(&mut self, new_brk: VirtualAddress) -> Result<VirtualAddress, KernelError> {
248        let new_size = new_brk.as_usize() - self.start.as_usize();
249
250        if new_size > self.max_size {
251            return Err(KernelError::ResourceExhausted {
252                resource: "heap size limit",
253            });
254        }
255
256        if new_brk < self.start {
257            return Err(KernelError::InvalidArgument {
258                name: "heap break",
259                value: "below heap start",
260            });
261        }
262
263        // Note: Actual page allocation for heap expansion is handled by
264        // VAS::brk() + brk_extend_heap() in mm/vas.rs, which is invoked by
265        // sys_brk().  This ProcessHeap struct tracks the logical break only.
266        self.brk = new_brk;
267        Ok(self.brk)
268    }
269
270    /// Grow heap by increment
271    pub fn grow(&mut self, increment: usize) -> Result<VirtualAddress, KernelError> {
272        let new_brk = VirtualAddress::new((self.brk.as_usize() + increment) as u64);
273        self.set_brk(new_brk)
274    }
275}
276
277/// Memory mapping operations
278pub mod mmap {
279    use super::*;
280
281    /// Memory mapping flags
282    pub mod flags {
283        /// Pages may be executed
284        pub const PROT_EXEC: u32 = 0x4;
285        /// Pages may be written
286        pub const PROT_WRITE: u32 = 0x2;
287        /// Pages may be read
288        pub const PROT_READ: u32 = 0x1;
289        /// Pages may not be accessed
290        pub const PROT_NONE: u32 = 0x0;
291
292        /// Share changes
293        pub const MAP_SHARED: u32 = 0x01;
294        /// Changes are private
295        pub const MAP_PRIVATE: u32 = 0x02;
296        /// Place mapping at exact address
297        pub const MAP_FIXED: u32 = 0x10;
298        /// Anonymous mapping (no file)
299        pub const MAP_ANONYMOUS: u32 = 0x20;
300    }
301
302    /// Convert mmap protection flags to page flags
303    pub fn prot_to_page_flags(prot: u32) -> PageFlags {
304        let mut flags = PageFlags::PRESENT | PageFlags::USER;
305
306        if prot & flags::PROT_WRITE != 0 {
307            flags |= PageFlags::WRITABLE;
308        }
309
310        if prot & flags::PROT_EXEC == 0 {
311            flags |= PageFlags::NO_EXECUTE;
312        }
313
314        flags
315    }
316}
317
318/// Copy-on-write (COW) support
319pub struct CowMapping {
320    /// Original physical page
321    pub original_page: PhysicalAddress,
322    /// Reference count
323    pub ref_count: core::sync::atomic::AtomicUsize,
324}
325
326impl CowMapping {
327    /// Create a new COW mapping
328    pub fn new(page: PhysicalAddress) -> Self {
329        Self {
330            original_page: page,
331            ref_count: core::sync::atomic::AtomicUsize::new(1),
332        }
333    }
334
335    /// Increment reference count
336    pub fn inc_ref(&self) {
337        self.ref_count
338            .fetch_add(1, core::sync::atomic::Ordering::Relaxed);
339    }
340
341    /// Decrement reference count
342    pub fn dec_ref(&self) -> usize {
343        self.ref_count
344            .fetch_sub(1, core::sync::atomic::Ordering::Relaxed)
345            - 1
346    }
347
348    /// Get reference count
349    pub fn ref_count(&self) -> usize {
350        self.ref_count.load(core::sync::atomic::Ordering::Relaxed)
351    }
352}