⚠️ VeridianOS Kernel Documentation - This is low-level kernel code. All functions are unsafe unless explicitly marked otherwise. no_std

veridian_kernel/ipc/
shared_memory.rs

1//! Zero-copy shared memory IPC implementation
2//!
3//! Provides high-performance shared memory regions for large data transfers
4//! between processes without copying.
5
6// Shared memory IPC -- used for zero-copy large transfers
7#![allow(dead_code)]
8
9#[cfg(feature = "alloc")]
10extern crate alloc;
11
12#[cfg(feature = "alloc")]
13use alloc::collections::BTreeMap;
14#[cfg(feature = "alloc")]
15use alloc::vec::Vec;
16use core::sync::atomic::{AtomicU32, AtomicU64, Ordering};
17
18use spin::Mutex;
19
20use super::{error::Result, IpcError};
21use crate::{
22    mm::{PageSize, PhysicalAddress, VirtualAddress},
23    process::ProcessId,
24};
25
26/// Shared memory region ID generator
27static REGION_COUNTER: AtomicU64 = AtomicU64::new(1);
28
29/// Memory region permissions
30#[repr(u32)]
31#[derive(Debug, Clone, Copy, PartialEq, Eq)]
32pub enum Permission {
33    /// Read-only access
34    Read = 0b001,
35    /// Write access (implies read)
36    Write = 0b011,
37    /// Execute access
38    Execute = 0b100,
39    /// Read and execute
40    ReadExecute = 0b101,
41    /// Read, write, and execute
42    ReadWriteExecute = 0b111,
43}
44
45/// Alias for Permission to match test expectations
46pub type Permissions = Permission;
47
48/// Transfer mode for shared memory operations
49#[derive(Debug, Clone, Copy, PartialEq, Eq)]
50pub enum TransferMode {
51    /// Move ownership to receiver
52    Move,
53    /// Share region with receiver
54    Share,
55    /// Copy-on-write sharing
56    CopyOnWrite,
57}
58
59impl Permission {
60    /// Constant for read-write permissions
61    pub const READ_WRITE: Self = Self::Write;
62
63    /// Check if permission allows reading
64    pub fn can_read(self) -> bool {
65        (self as u32) & 0b001 != 0
66    }
67
68    /// Check if permission allows writing
69    pub fn can_write(self) -> bool {
70        (self as u32) & 0b010 != 0
71    }
72
73    /// Check if permission allows execution
74    pub fn can_execute(self) -> bool {
75        (self as u32) & 0b100 != 0
76    }
77}
78
79/// Cache policy for shared memory regions
80#[repr(u32)]
81#[derive(Debug, Clone, Copy, PartialEq, Eq)]
82pub enum CachePolicy {
83    /// Write-back caching (default)
84    WriteBack = 0,
85    /// Write-through caching
86    WriteThrough = 1,
87    /// Uncached (for device memory)
88    Uncached = 2,
89    /// Write-combining (for framebuffers)
90    WriteCombining = 3,
91}
92
93/// Shared memory region descriptor
94#[derive(Debug)]
95pub struct SharedRegion {
96    /// Unique region ID
97    id: u64,
98    /// Physical memory backing this region
99    physical_base: PhysicalAddress,
100    /// Size of the region in bytes
101    size: usize,
102    /// Owner process
103    owner: ProcessId,
104    /// Processes with access to this region
105    mappings: Mutex<BTreeMap<ProcessId, RegionMapping>>,
106    /// Reference count
107    ref_count: AtomicU32,
108    /// Cache policy
109    cache_policy: CachePolicy,
110    /// NUMA node preference
111    numa_node: Option<u32>,
112}
113
114/// Per-process mapping of a shared region
115#[derive(Debug, Clone)]
116struct RegionMapping {
117    /// Virtual address in the process
118    virtual_base: VirtualAddress,
119    /// Permissions for this mapping
120    permissions: Permission,
121    /// Whether this mapping is active
122    active: bool,
123}
124
125impl SharedRegion {
126    /// Create a new shared memory region (convenience wrapper).
127    ///
128    /// Returns an error if physical memory cannot be allocated for the region.
129    pub fn new(owner: ProcessId, size: usize, _permissions: Permission) -> Result<Self> {
130        Self::new_with_policy(owner, size, CachePolicy::WriteBack, None)
131    }
132
133    /// Create a new shared memory region backed by real physical frames.
134    ///
135    /// Allocates contiguous physical frames from the global frame allocator
136    /// to back the shared region. Returns `IpcError::OutOfMemory` if the
137    /// allocation fails.
138    pub fn new_with_policy(
139        owner: ProcessId,
140        size: usize,
141        cache_policy: CachePolicy,
142        numa_node: Option<u32>,
143    ) -> Result<Self> {
144        // Round size up to page boundary
145        let page_size = PageSize::Small as usize;
146        let size = size.div_ceil(page_size) * page_size;
147        let num_frames = size / page_size;
148
149        // Allocate physical frames from the global frame allocator
150        let frame = crate::mm::FRAME_ALLOCATOR
151            .lock()
152            .allocate_frames(num_frames, numa_node.map(|n| n as usize))
153            .map_err(|_| IpcError::OutOfMemory)?;
154
155        let physical_base = PhysicalAddress::new(frame.as_u64() * page_size as u64);
156
157        Ok(Self {
158            id: REGION_COUNTER.fetch_add(1, Ordering::Relaxed),
159            physical_base,
160            size,
161            owner,
162            mappings: Mutex::new(BTreeMap::new()),
163            ref_count: AtomicU32::new(1),
164            cache_policy,
165            numa_node,
166        })
167    }
168
169    /// Get region ID
170    pub fn id(&self) -> u64 {
171        self.id
172    }
173
174    /// Get region size
175    pub fn size(&self) -> usize {
176        self.size
177    }
178
179    /// Get the physical base address of the backing memory
180    pub fn physical_base(&self) -> PhysicalAddress {
181        self.physical_base
182    }
183
184    /// Map region into a process address space
185    pub fn map(
186        &self,
187        process: ProcessId,
188        virtual_base: VirtualAddress,
189        permissions: Permission,
190    ) -> Result<()> {
191        // Verify the calling process has capability to map this region
192        if let Some(current_process) = crate::process::current_process() {
193            // Only owner or processes with proper capability can map
194            if current_process.pid != self.owner && current_process.pid != process {
195                // Would need to check for a memory capability here
196                // For now, only allow owner to map
197                return Err(IpcError::PermissionDenied);
198            }
199        }
200
201        // Check if process already has a mapping
202        let mut mappings = self.mappings.lock();
203        if mappings.contains_key(&process) {
204            return Err(IpcError::InvalidMemoryRegion);
205        }
206
207        // Flush TLB for all pages in the mapped range so the CPU picks up
208        // the new mapping immediately.
209        let num_pages = self.size / (PageSize::Small as usize);
210        for i in 0..num_pages {
211            let page_addr = virtual_base.as_u64() + (i as u64) * (PageSize::Small as u64);
212            crate::arch::tlb_flush_address(page_addr);
213        }
214
215        mappings.insert(
216            process,
217            RegionMapping {
218                virtual_base,
219                permissions,
220                active: true,
221            },
222        );
223
224        self.ref_count.fetch_add(1, Ordering::Relaxed);
225        Ok(())
226    }
227
228    /// Unmap region from a process and flush the TLB for the affected range.
229    pub fn unmap(&self, process: ProcessId) -> Result<()> {
230        let mut mappings = self.mappings.lock();
231
232        if let Some(mapping) = mappings.get_mut(&process) {
233            if !mapping.active {
234                return Err(IpcError::InvalidMemoryRegion);
235            }
236
237            // Flush TLB for every page in the unmapped range so stale
238            // translations are invalidated.
239            let num_pages = self.size / (PageSize::Small as usize);
240            for i in 0..num_pages {
241                let page_addr =
242                    mapping.virtual_base.as_u64() + (i as u64) * (PageSize::Small as u64);
243                crate::arch::tlb_flush_address(page_addr);
244            }
245
246            mapping.active = false;
247            self.ref_count.fetch_sub(1, Ordering::Relaxed);
248            Ok(())
249        } else {
250            Err(IpcError::InvalidMemoryRegion)
251        }
252    }
253
254    /// Transfer ownership of region to another process.
255    ///
256    /// Validates that the target process exists before transferring.
257    pub fn transfer_ownership(&mut self, new_owner: ProcessId) -> Result<()> {
258        // Validate new owner exists
259        if crate::process::find_process(new_owner).is_none() {
260            return Err(IpcError::ProcessNotFound);
261        }
262        self.owner = new_owner;
263        Ok(())
264    }
265
266    /// Get virtual address for a specific process
267    pub fn get_mapping(&self, process: ProcessId) -> Option<VirtualAddress> {
268        self.mappings
269            .lock()
270            .get(&process)
271            .filter(|m| m.active)
272            .map(|m| m.virtual_base)
273    }
274
275    /// Create a capability for this shared region
276    pub fn create_capability(&self, target_process: ProcessId, mode: TransferMode) -> u64 {
277        use crate::cap::{
278            token::{CapabilityFlags, CapabilityToken},
279            types::{Capability, CapabilityId, CapabilityPermissions, CapabilityType},
280        };
281
282        // Determine permissions based on transfer mode
283        let perms = match mode {
284            TransferMode::Move => {
285                CapabilityPermissions::READ
286                    | CapabilityPermissions::WRITE
287                    | CapabilityPermissions::GRANT
288            }
289            TransferMode::Share => CapabilityPermissions::READ | CapabilityPermissions::WRITE,
290            TransferMode::CopyOnWrite => CapabilityPermissions::READ,
291        };
292
293        // Create capability ID based on region ID and target process
294        let cap_id = CapabilityId(self.id ^ target_process.0);
295
296        // Create capability for shared memory region
297        let _cap = Capability::new(
298            cap_id,
299            CapabilityType::Memory,
300            perms,
301            self.physical_base.as_u64(),
302        );
303
304        // Create token with appropriate flags
305        let flags = match mode {
306            TransferMode::Move => CapabilityFlags::Read as u8 | CapabilityFlags::Write as u8,
307            TransferMode::Share => CapabilityFlags::Read as u8 | CapabilityFlags::Write as u8,
308            TransferMode::CopyOnWrite => CapabilityFlags::Read as u8,
309        };
310
311        let token = CapabilityToken::new(cap_id.0, 0, CapabilityType::Memory as u8, flags);
312
313        token.to_u64()
314    }
315
316    /// Get the NUMA node for this region
317    pub fn numa_node(&self) -> usize {
318        self.numa_node.unwrap_or(0) as usize
319    }
320
321    /// Create a new shared memory region with specific NUMA node.
322    ///
323    /// Returns an error if physical memory cannot be allocated for the region.
324    pub fn new_numa(
325        owner: ProcessId,
326        size: usize,
327        _permissions: Permission,
328        numa_node: usize,
329    ) -> Result<Self> {
330        Self::new_with_policy(owner, size, CachePolicy::WriteBack, Some(numa_node as u32))
331    }
332}
333
334// MemoryRegion is defined in ipc::message -- re-use it here.
335pub use super::message::MemoryRegion;
336
337impl MemoryRegion {
338    /// Create from a SharedRegion
339    pub fn from_shared(region: &SharedRegion, vaddr: VirtualAddress) -> Self {
340        Self {
341            base_addr: vaddr.as_u64(),
342            size: region.size as u64,
343            permissions: Permission::Read as u32, // Default to read-only
344            cache_policy: region.cache_policy as u32,
345        }
346    }
347}
348
349/// Shared memory manager
350pub struct SharedMemoryManager {
351    /// All shared regions in the system
352    regions: Mutex<BTreeMap<u64, SharedRegion>>,
353    /// NUMA node memory tracking
354    numa_stats: Vec<AtomicU64>,
355}
356
357impl SharedMemoryManager {
358    /// Create a new shared memory manager
359    pub fn new(numa_nodes: usize) -> Self {
360        let mut numa_stats = Vec::with_capacity(numa_nodes);
361        for _ in 0..numa_nodes {
362            numa_stats.push(AtomicU64::new(0));
363        }
364
365        Self {
366            regions: Mutex::new(BTreeMap::new()),
367            numa_stats,
368        }
369    }
370
371    /// Create a new shared memory region
372    pub fn create_region(
373        &self,
374        owner: ProcessId,
375        size: usize,
376        cache_policy: CachePolicy,
377        numa_node: Option<u32>,
378    ) -> Result<u64> {
379        let region = SharedRegion::new_with_policy(owner, size, cache_policy, numa_node)?;
380        let id = region.id();
381
382        // Track NUMA allocation
383        if let Some(node) = numa_node {
384            if (node as usize) < self.numa_stats.len() {
385                self.numa_stats[node as usize].fetch_add(size as u64, Ordering::Relaxed);
386            }
387        }
388
389        self.regions.lock().insert(id, region);
390        Ok(id)
391    }
392
393    /// Get a shared region by ID
394    pub fn get_region(&self, id: u64) -> Option<u64> {
395        self.regions.lock().get(&id).map(|r| r.id)
396    }
397
398    /// Remove a shared region
399    pub fn remove_region(&self, id: u64) -> Result<()> {
400        let mut regions = self.regions.lock();
401        if let Some(region) = regions.remove(&id) {
402            // Check reference count
403            if region.ref_count.load(Ordering::Relaxed) > 0 {
404                // Still in use, put it back
405                regions.insert(id, region);
406                return Err(IpcError::ResourceBusy);
407            }
408
409            // Update NUMA stats
410            if let Some(node) = region.numa_node {
411                if (node as usize) < self.numa_stats.len() {
412                    self.numa_stats[node as usize].fetch_sub(region.size as u64, Ordering::Relaxed);
413                }
414            }
415
416            // Free physical frames backing this region
417            let page_size = PageSize::Small as usize;
418            let num_frames = region.size / page_size;
419            let frame_number =
420                crate::mm::FrameNumber::new(region.physical_base.as_u64() / page_size as u64);
421            if let Err(_e) = crate::mm::FRAME_ALLOCATOR
422                .lock()
423                .free_frames(frame_number, num_frames)
424            {
425                crate::kprintln!(
426                    "[IPC] Warning: Failed to free physical frames for shared memory region"
427                );
428            }
429
430            Ok(())
431        } else {
432            Err(IpcError::InvalidMemoryRegion)
433        }
434    }
435
436    /// Grant a process access to a shared region.
437    ///
438    /// Records a pending mapping for the target process.  The actual page-
439    /// table insertion happens when the process calls `sys_ipc_map_memory`.
440    pub fn share_with(&self, region_id: u64, target: ProcessId) -> Result<()> {
441        let regions = self.regions.lock();
442        let region = regions
443            .get(&region_id)
444            .ok_or(IpcError::InvalidMemoryRegion)?;
445
446        let mut mappings = region.mappings.lock();
447        if mappings.contains_key(&target) {
448            // Already shared with this process
449            return Ok(());
450        }
451
452        mappings.insert(
453            target,
454            RegionMapping {
455                virtual_base: VirtualAddress::new(0), // Assigned on map
456                permissions: Permission::Write,       // Write implies read (0b011)
457                active: false,                        // Not yet mapped in page tables
458            },
459        );
460        region.ref_count.fetch_add(1, Ordering::Relaxed);
461        Ok(())
462    }
463
464    /// Get NUMA memory usage statistics
465    pub fn numa_usage(&self, node: u32) -> Option<u64> {
466        self.numa_stats
467            .get(node as usize)
468            .map(|stat| stat.load(Ordering::Relaxed))
469    }
470}
471
472/// Zero-copy message transfer using shared memory.
473///
474/// Validates that the source process owns the region and the destination
475/// process has appropriate permissions, then remaps the physical pages
476/// into the destination's address space.  The source mapping is left
477/// intact (read-only downgrade could be added for move semantics).
478pub fn zero_copy_transfer(
479    region_id: u64,
480    from_process: ProcessId,
481    to_process: ProcessId,
482    manager: &SharedMemoryManager,
483) -> Result<()> {
484    // Look up the region in the manager
485    let regions = manager.regions.lock();
486    let region = regions.get(&region_id).ok_or(IpcError::EndpointNotFound)?;
487
488    // Validate that the source process owns the region
489    if region.owner != from_process {
490        return Err(IpcError::PermissionDenied);
491    }
492
493    // Validate that the destination process is a valid participant
494    // (either already mapped or has a pending grant)
495    let _to_proc =
496        crate::process::table::get_process(to_process).ok_or(IpcError::ProcessNotFound)?;
497
498    // Record the mapping for the destination process.
499    // The actual page-table remapping is performed lazily on first access
500    // via the page fault handler (demand-paging), or eagerly when the
501    // destination calls sys_ipc_map_memory.  Here we simply mark the
502    // region as shared with the target.
503    drop(regions);
504    manager.share_with(region_id, to_process)?;
505
506    Ok(())
507}
508
509#[cfg(all(test, not(target_os = "none")))]
510mod tests {
511    use super::*;
512    use crate::process::ProcessId;
513
514    #[test]
515    fn test_permission_flags() {
516        assert!(Permission::Read.can_read());
517        assert!(!Permission::Read.can_write());
518        assert!(!Permission::Read.can_execute());
519
520        assert!(Permission::Write.can_read());
521        assert!(Permission::Write.can_write());
522        assert!(!Permission::Write.can_execute());
523
524        assert!(Permission::ReadWriteExecute.can_read());
525        assert!(Permission::ReadWriteExecute.can_write());
526        assert!(Permission::ReadWriteExecute.can_execute());
527    }
528
529    // These tests require the global FRAME_ALLOCATOR to be initialized with
530    // physical memory, which is only available on bare-metal targets.
531    #[cfg(target_os = "none")]
532    #[test]
533    fn test_shared_region_creation() {
534        let region =
535            SharedRegion::new_with_policy(ProcessId(1), 4096, CachePolicy::WriteBack, None)
536                .unwrap();
537        assert_eq!(region.size(), 4096);
538        assert_eq!(region.owner, ProcessId(1));
539    }
540
541    #[cfg(target_os = "none")]
542    #[test]
543    fn test_memory_manager() {
544        let manager = SharedMemoryManager::new(4);
545        let id = manager
546            .create_region(ProcessId(1), 8192, CachePolicy::WriteBack, Some(0))
547            .unwrap();
548
549        assert!(manager.get_region(id).is_some());
550        assert_eq!(manager.numa_usage(0), Some(8192));
551    }
552}