1#![allow(dead_code)]
8
9#[cfg(feature = "alloc")]
10extern crate alloc;
11
12#[cfg(feature = "alloc")]
13use alloc::collections::BTreeMap;
14#[cfg(feature = "alloc")]
15use alloc::vec::Vec;
16use core::sync::atomic::{AtomicU32, AtomicU64, Ordering};
17
18use spin::Mutex;
19
20use super::{error::Result, IpcError};
21use crate::{
22 mm::{PageSize, PhysicalAddress, VirtualAddress},
23 process::ProcessId,
24};
25
26static REGION_COUNTER: AtomicU64 = AtomicU64::new(1);
28
29#[repr(u32)]
31#[derive(Debug, Clone, Copy, PartialEq, Eq)]
32pub enum Permission {
33 Read = 0b001,
35 Write = 0b011,
37 Execute = 0b100,
39 ReadExecute = 0b101,
41 ReadWriteExecute = 0b111,
43}
44
45pub type Permissions = Permission;
47
48#[derive(Debug, Clone, Copy, PartialEq, Eq)]
50pub enum TransferMode {
51 Move,
53 Share,
55 CopyOnWrite,
57}
58
59impl Permission {
60 pub const READ_WRITE: Self = Self::Write;
62
63 pub fn can_read(self) -> bool {
65 (self as u32) & 0b001 != 0
66 }
67
68 pub fn can_write(self) -> bool {
70 (self as u32) & 0b010 != 0
71 }
72
73 pub fn can_execute(self) -> bool {
75 (self as u32) & 0b100 != 0
76 }
77}
78
79#[repr(u32)]
81#[derive(Debug, Clone, Copy, PartialEq, Eq)]
82pub enum CachePolicy {
83 WriteBack = 0,
85 WriteThrough = 1,
87 Uncached = 2,
89 WriteCombining = 3,
91}
92
93#[derive(Debug)]
95pub struct SharedRegion {
96 id: u64,
98 physical_base: PhysicalAddress,
100 size: usize,
102 owner: ProcessId,
104 mappings: Mutex<BTreeMap<ProcessId, RegionMapping>>,
106 ref_count: AtomicU32,
108 cache_policy: CachePolicy,
110 numa_node: Option<u32>,
112}
113
114#[derive(Debug, Clone)]
116struct RegionMapping {
117 virtual_base: VirtualAddress,
119 permissions: Permission,
121 active: bool,
123}
124
125impl SharedRegion {
126 pub fn new(owner: ProcessId, size: usize, _permissions: Permission) -> Result<Self> {
130 Self::new_with_policy(owner, size, CachePolicy::WriteBack, None)
131 }
132
133 pub fn new_with_policy(
139 owner: ProcessId,
140 size: usize,
141 cache_policy: CachePolicy,
142 numa_node: Option<u32>,
143 ) -> Result<Self> {
144 let page_size = PageSize::Small as usize;
146 let size = size.div_ceil(page_size) * page_size;
147 let num_frames = size / page_size;
148
149 let frame = crate::mm::FRAME_ALLOCATOR
151 .lock()
152 .allocate_frames(num_frames, numa_node.map(|n| n as usize))
153 .map_err(|_| IpcError::OutOfMemory)?;
154
155 let physical_base = PhysicalAddress::new(frame.as_u64() * page_size as u64);
156
157 Ok(Self {
158 id: REGION_COUNTER.fetch_add(1, Ordering::Relaxed),
159 physical_base,
160 size,
161 owner,
162 mappings: Mutex::new(BTreeMap::new()),
163 ref_count: AtomicU32::new(1),
164 cache_policy,
165 numa_node,
166 })
167 }
168
169 pub fn id(&self) -> u64 {
171 self.id
172 }
173
174 pub fn size(&self) -> usize {
176 self.size
177 }
178
179 pub fn physical_base(&self) -> PhysicalAddress {
181 self.physical_base
182 }
183
184 pub fn map(
186 &self,
187 process: ProcessId,
188 virtual_base: VirtualAddress,
189 permissions: Permission,
190 ) -> Result<()> {
191 if let Some(current_process) = crate::process::current_process() {
193 if current_process.pid != self.owner && current_process.pid != process {
195 return Err(IpcError::PermissionDenied);
198 }
199 }
200
201 let mut mappings = self.mappings.lock();
203 if mappings.contains_key(&process) {
204 return Err(IpcError::InvalidMemoryRegion);
205 }
206
207 let num_pages = self.size / (PageSize::Small as usize);
210 for i in 0..num_pages {
211 let page_addr = virtual_base.as_u64() + (i as u64) * (PageSize::Small as u64);
212 crate::arch::tlb_flush_address(page_addr);
213 }
214
215 mappings.insert(
216 process,
217 RegionMapping {
218 virtual_base,
219 permissions,
220 active: true,
221 },
222 );
223
224 self.ref_count.fetch_add(1, Ordering::Relaxed);
225 Ok(())
226 }
227
228 pub fn unmap(&self, process: ProcessId) -> Result<()> {
230 let mut mappings = self.mappings.lock();
231
232 if let Some(mapping) = mappings.get_mut(&process) {
233 if !mapping.active {
234 return Err(IpcError::InvalidMemoryRegion);
235 }
236
237 let num_pages = self.size / (PageSize::Small as usize);
240 for i in 0..num_pages {
241 let page_addr =
242 mapping.virtual_base.as_u64() + (i as u64) * (PageSize::Small as u64);
243 crate::arch::tlb_flush_address(page_addr);
244 }
245
246 mapping.active = false;
247 self.ref_count.fetch_sub(1, Ordering::Relaxed);
248 Ok(())
249 } else {
250 Err(IpcError::InvalidMemoryRegion)
251 }
252 }
253
254 pub fn transfer_ownership(&mut self, new_owner: ProcessId) -> Result<()> {
258 if crate::process::find_process(new_owner).is_none() {
260 return Err(IpcError::ProcessNotFound);
261 }
262 self.owner = new_owner;
263 Ok(())
264 }
265
266 pub fn get_mapping(&self, process: ProcessId) -> Option<VirtualAddress> {
268 self.mappings
269 .lock()
270 .get(&process)
271 .filter(|m| m.active)
272 .map(|m| m.virtual_base)
273 }
274
275 pub fn create_capability(&self, target_process: ProcessId, mode: TransferMode) -> u64 {
277 use crate::cap::{
278 token::{CapabilityFlags, CapabilityToken},
279 types::{Capability, CapabilityId, CapabilityPermissions, CapabilityType},
280 };
281
282 let perms = match mode {
284 TransferMode::Move => {
285 CapabilityPermissions::READ
286 | CapabilityPermissions::WRITE
287 | CapabilityPermissions::GRANT
288 }
289 TransferMode::Share => CapabilityPermissions::READ | CapabilityPermissions::WRITE,
290 TransferMode::CopyOnWrite => CapabilityPermissions::READ,
291 };
292
293 let cap_id = CapabilityId(self.id ^ target_process.0);
295
296 let _cap = Capability::new(
298 cap_id,
299 CapabilityType::Memory,
300 perms,
301 self.physical_base.as_u64(),
302 );
303
304 let flags = match mode {
306 TransferMode::Move => CapabilityFlags::Read as u8 | CapabilityFlags::Write as u8,
307 TransferMode::Share => CapabilityFlags::Read as u8 | CapabilityFlags::Write as u8,
308 TransferMode::CopyOnWrite => CapabilityFlags::Read as u8,
309 };
310
311 let token = CapabilityToken::new(cap_id.0, 0, CapabilityType::Memory as u8, flags);
312
313 token.to_u64()
314 }
315
316 pub fn numa_node(&self) -> usize {
318 self.numa_node.unwrap_or(0) as usize
319 }
320
321 pub fn new_numa(
325 owner: ProcessId,
326 size: usize,
327 _permissions: Permission,
328 numa_node: usize,
329 ) -> Result<Self> {
330 Self::new_with_policy(owner, size, CachePolicy::WriteBack, Some(numa_node as u32))
331 }
332}
333
334pub use super::message::MemoryRegion;
336
337impl MemoryRegion {
338 pub fn from_shared(region: &SharedRegion, vaddr: VirtualAddress) -> Self {
340 Self {
341 base_addr: vaddr.as_u64(),
342 size: region.size as u64,
343 permissions: Permission::Read as u32, cache_policy: region.cache_policy as u32,
345 }
346 }
347}
348
349pub struct SharedMemoryManager {
351 regions: Mutex<BTreeMap<u64, SharedRegion>>,
353 numa_stats: Vec<AtomicU64>,
355}
356
357impl SharedMemoryManager {
358 pub fn new(numa_nodes: usize) -> Self {
360 let mut numa_stats = Vec::with_capacity(numa_nodes);
361 for _ in 0..numa_nodes {
362 numa_stats.push(AtomicU64::new(0));
363 }
364
365 Self {
366 regions: Mutex::new(BTreeMap::new()),
367 numa_stats,
368 }
369 }
370
371 pub fn create_region(
373 &self,
374 owner: ProcessId,
375 size: usize,
376 cache_policy: CachePolicy,
377 numa_node: Option<u32>,
378 ) -> Result<u64> {
379 let region = SharedRegion::new_with_policy(owner, size, cache_policy, numa_node)?;
380 let id = region.id();
381
382 if let Some(node) = numa_node {
384 if (node as usize) < self.numa_stats.len() {
385 self.numa_stats[node as usize].fetch_add(size as u64, Ordering::Relaxed);
386 }
387 }
388
389 self.regions.lock().insert(id, region);
390 Ok(id)
391 }
392
393 pub fn get_region(&self, id: u64) -> Option<u64> {
395 self.regions.lock().get(&id).map(|r| r.id)
396 }
397
398 pub fn remove_region(&self, id: u64) -> Result<()> {
400 let mut regions = self.regions.lock();
401 if let Some(region) = regions.remove(&id) {
402 if region.ref_count.load(Ordering::Relaxed) > 0 {
404 regions.insert(id, region);
406 return Err(IpcError::ResourceBusy);
407 }
408
409 if let Some(node) = region.numa_node {
411 if (node as usize) < self.numa_stats.len() {
412 self.numa_stats[node as usize].fetch_sub(region.size as u64, Ordering::Relaxed);
413 }
414 }
415
416 let page_size = PageSize::Small as usize;
418 let num_frames = region.size / page_size;
419 let frame_number =
420 crate::mm::FrameNumber::new(region.physical_base.as_u64() / page_size as u64);
421 if let Err(_e) = crate::mm::FRAME_ALLOCATOR
422 .lock()
423 .free_frames(frame_number, num_frames)
424 {
425 crate::kprintln!(
426 "[IPC] Warning: Failed to free physical frames for shared memory region"
427 );
428 }
429
430 Ok(())
431 } else {
432 Err(IpcError::InvalidMemoryRegion)
433 }
434 }
435
436 pub fn share_with(&self, region_id: u64, target: ProcessId) -> Result<()> {
441 let regions = self.regions.lock();
442 let region = regions
443 .get(®ion_id)
444 .ok_or(IpcError::InvalidMemoryRegion)?;
445
446 let mut mappings = region.mappings.lock();
447 if mappings.contains_key(&target) {
448 return Ok(());
450 }
451
452 mappings.insert(
453 target,
454 RegionMapping {
455 virtual_base: VirtualAddress::new(0), permissions: Permission::Write, active: false, },
459 );
460 region.ref_count.fetch_add(1, Ordering::Relaxed);
461 Ok(())
462 }
463
464 pub fn numa_usage(&self, node: u32) -> Option<u64> {
466 self.numa_stats
467 .get(node as usize)
468 .map(|stat| stat.load(Ordering::Relaxed))
469 }
470}
471
472pub fn zero_copy_transfer(
479 region_id: u64,
480 from_process: ProcessId,
481 to_process: ProcessId,
482 manager: &SharedMemoryManager,
483) -> Result<()> {
484 let regions = manager.regions.lock();
486 let region = regions.get(®ion_id).ok_or(IpcError::EndpointNotFound)?;
487
488 if region.owner != from_process {
490 return Err(IpcError::PermissionDenied);
491 }
492
493 let _to_proc =
496 crate::process::table::get_process(to_process).ok_or(IpcError::ProcessNotFound)?;
497
498 drop(regions);
504 manager.share_with(region_id, to_process)?;
505
506 Ok(())
507}
508
509#[cfg(all(test, not(target_os = "none")))]
510mod tests {
511 use super::*;
512 use crate::process::ProcessId;
513
514 #[test]
515 fn test_permission_flags() {
516 assert!(Permission::Read.can_read());
517 assert!(!Permission::Read.can_write());
518 assert!(!Permission::Read.can_execute());
519
520 assert!(Permission::Write.can_read());
521 assert!(Permission::Write.can_write());
522 assert!(!Permission::Write.can_execute());
523
524 assert!(Permission::ReadWriteExecute.can_read());
525 assert!(Permission::ReadWriteExecute.can_write());
526 assert!(Permission::ReadWriteExecute.can_execute());
527 }
528
529 #[cfg(target_os = "none")]
532 #[test]
533 fn test_shared_region_creation() {
534 let region =
535 SharedRegion::new_with_policy(ProcessId(1), 4096, CachePolicy::WriteBack, None)
536 .unwrap();
537 assert_eq!(region.size(), 4096);
538 assert_eq!(region.owner, ProcessId(1));
539 }
540
541 #[cfg(target_os = "none")]
542 #[test]
543 fn test_memory_manager() {
544 let manager = SharedMemoryManager::new(4);
545 let id = manager
546 .create_region(ProcessId(1), 8192, CachePolicy::WriteBack, Some(0))
547 .unwrap();
548
549 assert!(manager.get_region(id).is_some());
550 assert_eq!(manager.numa_usage(0), Some(8192));
551 }
552}