1#[cfg(feature = "alloc")]
8extern crate alloc;
9
10#[cfg(feature = "alloc")]
11use alloc::vec::Vec;
12
13use crate::{
14 error::KernelError,
15 mm::{PageFlags, PhysicalAddress, VirtualAddress, PAGE_SIZE},
16};
17
18pub mod layout {
20 pub const USER_SPACE_START: usize = 0x0000_0000_0001_0000;
22
23 pub const USER_SPACE_END: usize = 0x0000_7FFF_FFFF_0000;
25
26 pub const CODE_START: usize = 0x0000_0000_0040_0000;
28
29 pub const DATA_START: usize = 0x0000_0000_0080_0000;
31
32 pub const HEAP_START: usize = 0x0000_0000_1000_0000;
34
35 pub const MAX_HEAP_SIZE: usize = 8 * 1024 * 1024 * 1024;
37
38 pub const STACK_END: usize = 0x0000_7FFF_0000_0000;
40
41 pub const DEFAULT_STACK_SIZE: usize = 8 * 1024 * 1024;
43}
44
45#[derive(Debug, Clone, Copy, PartialEq, Eq)]
47pub enum MemoryRegionType {
48 Code,
50 Data,
52 Rodata,
54 Stack,
56 Heap,
58 MappedFile,
60 Shared,
62 Device,
64}
65
66#[derive(Debug)]
68pub struct MemoryRegion {
69 pub start: VirtualAddress,
71 pub end: VirtualAddress,
73 pub region_type: MemoryRegionType,
75 pub flags: PageFlags,
77 #[cfg(feature = "alloc")]
79 pub physical_pages: Option<Vec<PhysicalAddress>>,
80 pub file_mapping: Option<FileMapping>,
82}
83
84#[derive(Debug)]
86pub struct FileMapping {
87 pub fd: u32,
89 pub offset: u64,
91 pub flags: u32,
93}
94
95impl MemoryRegion {
96 pub fn new(
98 start: VirtualAddress,
99 size: usize,
100 region_type: MemoryRegionType,
101 flags: PageFlags,
102 ) -> Self {
103 Self {
104 start,
105 end: VirtualAddress::new(start.as_u64() + size as u64),
106 region_type,
107 flags,
108 physical_pages: None,
109 file_mapping: None,
110 }
111 }
112
113 pub fn size(&self) -> usize {
115 self.end.as_usize() - self.start.as_usize()
116 }
117
118 pub fn contains(&self, addr: VirtualAddress) -> bool {
120 addr >= self.start && addr < self.end
121 }
122
123 pub fn overlaps(&self, other: &MemoryRegion) -> bool {
125 self.start < other.end && other.start < self.end
126 }
127}
128
129pub trait ProcessMemory {
131 fn allocate(&mut self, size: usize, flags: PageFlags) -> Result<VirtualAddress, KernelError>;
133
134 fn free(&mut self, addr: VirtualAddress, size: usize) -> Result<(), KernelError>;
136
137 fn map_physical(
139 &mut self,
140 phys: PhysicalAddress,
141 virt: VirtualAddress,
142 size: usize,
143 flags: PageFlags,
144 ) -> Result<(), KernelError>;
145
146 fn unmap(&mut self, virt: VirtualAddress, size: usize) -> Result<(), KernelError>;
148
149 fn protect(
151 &mut self,
152 addr: VirtualAddress,
153 size: usize,
154 flags: PageFlags,
155 ) -> Result<(), KernelError>;
156
157 fn grow_heap(&mut self, increment: usize) -> Result<VirtualAddress, KernelError>;
159
160 fn grow_stack(&mut self, increment: usize) -> Result<(), KernelError>;
162}
163
164pub struct ThreadStack {
166 pub bottom: VirtualAddress,
168 pub top: VirtualAddress,
170 pub sp: VirtualAddress,
172 pub guard_size: usize,
174}
175
176impl ThreadStack {
177 pub fn new(size: usize) -> Result<Self, KernelError> {
179 if size < PAGE_SIZE * 2 {
180 return Err(KernelError::InvalidArgument {
181 name: "stack size",
182 value: "stack too small (minimum 2 pages)",
183 });
184 }
185
186 let bottom = VirtualAddress::new((layout::STACK_END - size) as u64);
188 let top = VirtualAddress::new(layout::STACK_END as u64);
189
190 Ok(Self {
191 bottom,
192 top,
193 sp: top,
194 guard_size: PAGE_SIZE,
195 })
196 }
197
198 pub fn usable_size(&self) -> usize {
200 self.top.as_usize() - self.bottom.as_usize() - self.guard_size
201 }
202
203 pub fn contains(&self, addr: VirtualAddress) -> bool {
205 addr >= self.bottom && addr <= self.top
206 }
207
208 pub fn in_guard_page(&self, addr: VirtualAddress) -> bool {
210 addr >= self.bottom
211 && addr < VirtualAddress::new(self.bottom.as_u64() + self.guard_size as u64)
212 }
213}
214
215pub struct ProcessHeap {
217 pub brk: VirtualAddress,
219 pub start: VirtualAddress,
221 pub max_size: usize,
223}
224
225impl Default for ProcessHeap {
226 fn default() -> Self {
227 Self {
228 brk: VirtualAddress::new(layout::HEAP_START as u64),
229 start: VirtualAddress::new(layout::HEAP_START as u64),
230 max_size: layout::MAX_HEAP_SIZE,
231 }
232 }
233}
234
235impl ProcessHeap {
236 pub fn new() -> Self {
238 Self::default()
239 }
240
241 pub fn size(&self) -> usize {
243 self.brk.as_usize() - self.start.as_usize()
244 }
245
246 pub fn set_brk(&mut self, new_brk: VirtualAddress) -> Result<VirtualAddress, KernelError> {
248 let new_size = new_brk.as_usize() - self.start.as_usize();
249
250 if new_size > self.max_size {
251 return Err(KernelError::ResourceExhausted {
252 resource: "heap size limit",
253 });
254 }
255
256 if new_brk < self.start {
257 return Err(KernelError::InvalidArgument {
258 name: "heap break",
259 value: "below heap start",
260 });
261 }
262
263 self.brk = new_brk;
267 Ok(self.brk)
268 }
269
270 pub fn grow(&mut self, increment: usize) -> Result<VirtualAddress, KernelError> {
272 let new_brk = VirtualAddress::new((self.brk.as_usize() + increment) as u64);
273 self.set_brk(new_brk)
274 }
275}
276
277pub mod mmap {
279 use super::*;
280
281 pub mod flags {
283 pub const PROT_EXEC: u32 = 0x4;
285 pub const PROT_WRITE: u32 = 0x2;
287 pub const PROT_READ: u32 = 0x1;
289 pub const PROT_NONE: u32 = 0x0;
291
292 pub const MAP_SHARED: u32 = 0x01;
294 pub const MAP_PRIVATE: u32 = 0x02;
296 pub const MAP_FIXED: u32 = 0x10;
298 pub const MAP_ANONYMOUS: u32 = 0x20;
300 }
301
302 pub fn prot_to_page_flags(prot: u32) -> PageFlags {
304 let mut flags = PageFlags::PRESENT | PageFlags::USER;
305
306 if prot & flags::PROT_WRITE != 0 {
307 flags |= PageFlags::WRITABLE;
308 }
309
310 if prot & flags::PROT_EXEC == 0 {
311 flags |= PageFlags::NO_EXECUTE;
312 }
313
314 flags
315 }
316}
317
318pub struct CowMapping {
320 pub original_page: PhysicalAddress,
322 pub ref_count: core::sync::atomic::AtomicUsize,
324}
325
326impl CowMapping {
327 pub fn new(page: PhysicalAddress) -> Self {
329 Self {
330 original_page: page,
331 ref_count: core::sync::atomic::AtomicUsize::new(1),
332 }
333 }
334
335 pub fn inc_ref(&self) {
337 self.ref_count
338 .fetch_add(1, core::sync::atomic::Ordering::Relaxed);
339 }
340
341 pub fn dec_ref(&self) -> usize {
343 self.ref_count
344 .fetch_sub(1, core::sync::atomic::Ordering::Relaxed)
345 - 1
346 }
347
348 pub fn ref_count(&self) -> usize {
350 self.ref_count.load(core::sync::atomic::Ordering::Relaxed)
351 }
352}