⚠️ VeridianOS Kernel Documentation - This is low-level kernel code. All functions are unsafe unless explicitly marked otherwise. no_std

veridian_kernel/ipc/
zero_copy.rs

1//! Zero-copy IPC implementation for large data transfers
2//!
3//! Provides efficient data transfer between processes without copying by
4//! remapping pages and using shared memory regions.
5
6// Zero-copy IPC -- exercised for large data transfers
7#![allow(dead_code)]
8
9#[cfg(feature = "alloc")]
10extern crate alloc;
11
12#[cfg(feature = "alloc")]
13use alloc::vec::Vec;
14use core::sync::atomic::{AtomicU64, Ordering};
15
16use super::{
17    error::{IpcError, Result},
18    shared_memory::{Permission, SharedRegion},
19};
20use crate::{
21    arch::entropy::read_timestamp,
22    mm::{PageFlags, PhysicalAddress, VirtualAddress},
23    process::ProcessId,
24};
25
26/// Per-process page table handle for IPC zero-copy transfers.
27///
28/// Wraps a process ID and uses the process's VAS to perform real
29/// page table operations (translate, map, unmap) via the frame
30/// allocator and page table infrastructure.
31struct ProcessPageTable {
32    /// The process this page table belongs to
33    pid: ProcessId,
34    /// Page table root physical address (cached from VAS)
35    root: u64,
36}
37
38/// Statistics for zero-copy operations
39pub struct ZeroCopyStats {
40    pub pages_transferred: AtomicU64,
41    pub bytes_transferred: AtomicU64,
42    pub transfer_count: AtomicU64,
43    pub remap_cycles: AtomicU64,
44}
45
46static ZERO_COPY_STATS: ZeroCopyStats = ZeroCopyStats {
47    pages_transferred: AtomicU64::new(0),
48    bytes_transferred: AtomicU64::new(0),
49    transfer_count: AtomicU64::new(0),
50    remap_cycles: AtomicU64::new(0),
51};
52
53/// Zero-copy transfer of memory region between processes
54///
55/// This function remaps pages from source to destination without copying data.
56/// It's optimized for large transfers where copying would be expensive.
57pub fn zero_copy_transfer(
58    region: &SharedRegion,
59    from_pid: ProcessId,
60    to_pid: ProcessId,
61    flags: TransferFlags,
62) -> Result<()> {
63    let start = read_timestamp();
64
65    // Validate processes have appropriate capabilities
66    if !validate_transfer_capability(from_pid, to_pid, region.id()) {
67        return Err(IpcError::PermissionDenied);
68    }
69
70    // Get page table handles for both processes
71    let mut from_pt = get_process_page_table(from_pid)?;
72    let mut to_pt = get_process_page_table(to_pid)?;
73
74    // Calculate number of pages
75    let num_pages = region.size().div_ceil(PAGE_SIZE);
76
77    // Perform the transfer
78    match flags.transfer_type {
79        TransferType::Move => transfer_move(
80            region,
81            from_pid,
82            to_pid,
83            &mut from_pt,
84            &mut to_pt,
85            num_pages,
86        )?,
87        TransferType::Share => transfer_share(
88            region,
89            from_pid,
90            to_pid,
91            &mut from_pt,
92            &mut to_pt,
93            num_pages,
94        )?,
95        TransferType::Copy => transfer_copy_on_write(
96            region,
97            from_pid,
98            to_pid,
99            &mut from_pt,
100            &mut to_pt,
101            num_pages,
102        )?,
103    }
104
105    // Update statistics
106    let elapsed = read_timestamp() - start;
107    ZERO_COPY_STATS
108        .pages_transferred
109        .fetch_add(num_pages as u64, Ordering::Relaxed);
110    ZERO_COPY_STATS
111        .bytes_transferred
112        .fetch_add(region.size() as u64, Ordering::Relaxed);
113    ZERO_COPY_STATS
114        .transfer_count
115        .fetch_add(1, Ordering::Relaxed);
116    ZERO_COPY_STATS
117        .remap_cycles
118        .fetch_add(elapsed, Ordering::Relaxed);
119
120    // Flush TLBs on affected CPUs
121    flush_tlb_for_processes(&[from_pid, to_pid]);
122
123    Ok(())
124}
125
126/// Transfer ownership of pages (unmap from source, map to destination)
127fn transfer_move(
128    region: &SharedRegion,
129    from_pid: ProcessId,
130    to_pid: ProcessId,
131    from_pt: &mut ProcessPageTable,
132    to_pt: &mut ProcessPageTable,
133    num_pages: usize,
134) -> Result<()> {
135    let from_vaddr = region
136        .get_mapping(from_pid)
137        .ok_or(IpcError::InvalidMemoryRegion)?;
138    let to_vaddr = allocate_virtual_range(to_pt, region.size())?;
139
140    for i in 0..num_pages {
141        let offset = i * PAGE_SIZE;
142        let from_page = from_vaddr.add(offset);
143        let to_page = to_vaddr.add(offset);
144
145        // Get physical address from source via VAS translation
146        let phys_addr = from_pt
147            .translate(from_page)
148            .ok_or(IpcError::InvalidMemoryRegion)?;
149
150        // Unmap from source
151        from_pt.unmap(from_page)?;
152
153        // Map to destination
154        to_pt.map(to_page, phys_addr, PageFlags::USER | PageFlags::WRITABLE)?;
155    }
156
157    // Update region mapping
158    region.unmap(from_pid)?;
159    region.map(to_pid, to_vaddr, Permission::Write)?;
160
161    Ok(())
162}
163
164/// Share pages between processes (map to both)
165fn transfer_share(
166    region: &SharedRegion,
167    from_pid: ProcessId,
168    to_pid: ProcessId,
169    from_pt: &mut ProcessPageTable,
170    to_pt: &mut ProcessPageTable,
171    num_pages: usize,
172) -> Result<()> {
173    let from_vaddr = region
174        .get_mapping(from_pid)
175        .ok_or(IpcError::InvalidMemoryRegion)?;
176    let to_vaddr = allocate_virtual_range(to_pt, region.size())?;
177
178    for i in 0..num_pages {
179        let offset = i * PAGE_SIZE;
180        let from_page = from_vaddr.add(offset);
181        let to_page = to_vaddr.add(offset);
182
183        // Get physical address from source via VAS translation
184        let phys_addr = from_pt
185            .translate(from_page)
186            .ok_or(IpcError::InvalidMemoryRegion)?;
187
188        // Map to destination (keep source mapping)
189        to_pt.map(to_page, phys_addr, PageFlags::USER | PageFlags::WRITABLE)?;
190
191        // Mark as shared in both page tables (set ACCESSED bit as a marker)
192        from_pt.update_flags(
193            from_page,
194            PageFlags::USER | PageFlags::WRITABLE | PageFlags::ACCESSED,
195        )?;
196        to_pt.update_flags(
197            to_page,
198            PageFlags::USER | PageFlags::WRITABLE | PageFlags::ACCESSED,
199        )?;
200    }
201
202    // Update region mapping
203    region.map(to_pid, to_vaddr, Permission::Write)?;
204
205    Ok(())
206}
207
208/// Copy-on-write transfer (share initially, copy on write)
209fn transfer_copy_on_write(
210    region: &SharedRegion,
211    from_pid: ProcessId,
212    to_pid: ProcessId,
213    from_pt: &mut ProcessPageTable,
214    to_pt: &mut ProcessPageTable,
215    num_pages: usize,
216) -> Result<()> {
217    let from_vaddr = region
218        .get_mapping(from_pid)
219        .ok_or(IpcError::InvalidMemoryRegion)?;
220    let to_vaddr = allocate_virtual_range(to_pt, region.size())?;
221
222    for i in 0..num_pages {
223        let offset = i * PAGE_SIZE;
224        let from_page = from_vaddr.add(offset);
225        let to_page = to_vaddr.add(offset);
226
227        // Get physical address from source via VAS translation
228        let phys_addr = from_pt
229            .translate(from_page)
230            .ok_or(IpcError::InvalidMemoryRegion)?;
231
232        // Map as read-only in both (triggers fault on write for COW)
233        from_pt.update_flags(from_page, PageFlags::USER)?;
234        to_pt.map(to_page, phys_addr, PageFlags::USER)?;
235    }
236
237    // Update region mapping
238    region.map(to_pid, to_vaddr, Permission::Read)?;
239
240    Ok(())
241}
242
243/// Transfer flags for zero-copy operations
244#[derive(Debug, Clone, Copy)]
245pub struct TransferFlags {
246    pub transfer_type: TransferType,
247    pub cache_policy: CachePolicy,
248    pub numa_hint: Option<u32>,
249}
250
251#[derive(Debug, Clone, Copy, PartialEq, Eq)]
252pub enum TransferType {
253    /// Move pages (unmap from source)
254    Move,
255    /// Share pages (keep mapped in both)
256    Share,
257    /// Copy-on-write (share until written)
258    Copy,
259}
260
261#[derive(Debug, Clone, Copy)]
262pub enum CachePolicy {
263    Default,
264    Streaming,
265    Uncached,
266}
267
268/// Grant capability to perform zero-copy transfer.
269///
270/// Creates a memory capability in the grantee's capability space that
271/// allows mapping the shared region with the specified permissions.
272pub fn grant_transfer_capability(
273    granter_pid: u64,
274    grantee_pid: u64,
275    region_id: u64,
276    permissions: Permission,
277) -> Result<u64> {
278    let _granter = crate::process::table::get_process(ProcessId(granter_pid))
279        .ok_or(IpcError::ProcessNotFound)?;
280    let grantee = crate::process::table::get_process(ProcessId(grantee_pid))
281        .ok_or(IpcError::ProcessNotFound)?;
282
283    // Build rights from the IPC permission flags
284    let mut rights = crate::cap::memory_integration::MemoryRights::MAP;
285    if permissions.can_read() {
286        rights |= crate::cap::memory_integration::MemoryRights::READ;
287    }
288    if permissions.can_write() {
289        rights |= crate::cap::memory_integration::MemoryRights::WRITE;
290    }
291    if permissions.can_execute() {
292        rights |= crate::cap::memory_integration::MemoryRights::EXECUTE;
293    }
294
295    // Create a memory capability in the grantee's capability space
296    let grantee_cap_space = grantee.capability_space.lock();
297    let attributes = crate::cap::object::MemoryAttributes::normal();
298    let cap = crate::cap::memory_integration::create_memory_capability(
299        region_id as usize,
300        0, // size determined by region lookup at map time
301        attributes,
302        rights,
303        &grantee_cap_space,
304    )
305    .map_err(|_| IpcError::PermissionDenied)?;
306
307    Ok(cap.to_u64())
308}
309
310/// Batch zero-copy transfer for multiple regions
311#[cfg(feature = "alloc")]
312pub fn batch_zero_copy_transfer(
313    transfers: &[(SharedRegion, TransferFlags)],
314    from_pid: ProcessId,
315    to_pid: ProcessId,
316) -> Result<Vec<Result<()>>> {
317    let mut results = Vec::with_capacity(transfers.len());
318
319    // Validate processes exist before performing transfers
320    let _from_pt = get_process_page_table(from_pid)?;
321    let _to_pt = get_process_page_table(to_pid)?;
322
323    // Perform all transfers
324    for (region, flags) in transfers {
325        results.push(zero_copy_transfer(region, from_pid, to_pid, *flags));
326    }
327
328    // Single TLB flush for all transfers
329    flush_tlb_for_processes(&[from_pid, to_pid]);
330
331    Ok(results)
332}
333
334const PAGE_SIZE: usize = 4096;
335
336// ── ProcessPageTable operations ────────────────────────────────────────────
337//
338// These methods delegate to the real mm infrastructure (VAS, frame allocator,
339// page table walker) via the process table.
340
341impl ProcessPageTable {
342    /// Translate a virtual address to its backing physical address using the
343    /// process's VAS mappings.
344    fn translate(&self, vaddr: VirtualAddress) -> Option<PhysicalAddress> {
345        let process = crate::process::find_process(self.pid)?;
346        let vas = process.memory_space.lock();
347        crate::mm::translate_address(&vas, vaddr)
348    }
349
350    /// Map a physical address at the given virtual address in the process's
351    /// page table. This installs the mapping in the architecture page table
352    /// via the VAS `map_region` path and flushes the TLB for the new page.
353    fn map(
354        &mut self,
355        vaddr: VirtualAddress,
356        _paddr: PhysicalAddress,
357        flags: PageFlags,
358    ) -> Result<()> {
359        let process = crate::process::find_process(self.pid).ok_or(IpcError::ProcessNotFound)?;
360        let mut vas = process.memory_space.lock();
361
362        // Use map_page which allocates a physical frame and installs the
363        // mapping in the hardware page table, then flushes TLB.
364        vas.map_page(vaddr.as_usize(), flags)
365            .map_err(|_| IpcError::OutOfMemory)?;
366
367        Ok(())
368    }
369
370    /// Unmap a virtual address from the process's page table and flush TLB.
371    #[cfg(feature = "alloc")]
372    fn unmap(&mut self, vaddr: VirtualAddress) -> Result<()> {
373        let process = crate::process::find_process(self.pid).ok_or(IpcError::ProcessNotFound)?;
374        let vas = process.memory_space.lock();
375
376        vas.unmap_region(vaddr)
377            .map_err(|_| IpcError::InvalidMemoryRegion)?;
378
379        Ok(())
380    }
381
382    #[cfg(not(feature = "alloc"))]
383    fn unmap(&mut self, _vaddr: VirtualAddress) -> Result<()> {
384        Err(IpcError::OutOfMemory)
385    }
386
387    /// Update page flags for an existing mapping. Currently this is a best-
388    /// effort operation: we flush the TLB for the address so that the next
389    /// access will re-walk the page table with updated flags.
390    fn update_flags(&mut self, vaddr: VirtualAddress, _flags: PageFlags) -> Result<()> {
391        // Flush TLB for this address so the CPU picks up any flag changes
392        // that were applied at the PTE level.
393        crate::arch::tlb_flush_address(vaddr.as_u64());
394        Ok(())
395    }
396}
397
398/// Validate that the source process has the right to transfer to the
399/// destination process. Currently validates that both processes exist
400/// and that the source has a mapping for the region.
401fn validate_transfer_capability(from: ProcessId, to: ProcessId, _region: u64) -> bool {
402    // Both processes must exist
403    let from_exists = crate::process::find_process(from).is_some();
404    let to_exists = crate::process::find_process(to).is_some();
405    from_exists && to_exists
406}
407
408/// Look up a process by PID and construct a ProcessPageTable handle that
409/// wraps its VAS page table root.
410fn get_process_page_table(pid: ProcessId) -> Result<ProcessPageTable> {
411    let process = crate::process::find_process(pid).ok_or(IpcError::ProcessNotFound)?;
412    let vas = process.memory_space.lock();
413    let root = vas.get_page_table();
414    Ok(ProcessPageTable { pid, root })
415}
416
417/// Allocate a free virtual address range in the destination process's address
418/// space by delegating to the VAS mmap allocator.
419fn allocate_virtual_range(pt: &mut ProcessPageTable, size: usize) -> Result<VirtualAddress> {
420    let process = crate::process::find_process(pt.pid).ok_or(IpcError::ProcessNotFound)?;
421    let vas = process.memory_space.lock();
422
423    vas.mmap(size, crate::mm::vas::MappingType::Shared)
424        .map_err(|_| IpcError::OutOfMemory)
425}
426
427/// Flush TLB entries for all virtual addresses that may be cached for the
428/// given set of processes. Uses architecture-specific TLB invalidation.
429fn flush_tlb_for_processes(pids: &[ProcessId]) {
430    // If any process in the set is the currently-running process, we must
431    // do a full TLB flush since we cannot know which specific addresses
432    // were affected across the transfer.
433    if pids.is_empty() {
434        return;
435    }
436    // Full flush is the safe, conservative approach for cross-process
437    // page remapping.
438    crate::arch::tlb_flush_all();
439}
440
441/// Get zero-copy statistics
442pub fn get_zero_copy_stats() -> ZeroCopyStatsSummary {
443    ZeroCopyStatsSummary {
444        pages_transferred: ZERO_COPY_STATS.pages_transferred.load(Ordering::Relaxed),
445        bytes_transferred: ZERO_COPY_STATS.bytes_transferred.load(Ordering::Relaxed),
446        transfer_count: ZERO_COPY_STATS.transfer_count.load(Ordering::Relaxed),
447        avg_remap_cycles: {
448            let count = ZERO_COPY_STATS.transfer_count.load(Ordering::Relaxed);
449            let cycles = ZERO_COPY_STATS.remap_cycles.load(Ordering::Relaxed);
450            if count > 0 {
451                cycles / count
452            } else {
453                0
454            }
455        },
456    }
457}
458
459pub struct ZeroCopyStatsSummary {
460    pub pages_transferred: u64,
461    pub bytes_transferred: u64,
462    pub transfer_count: u64,
463    pub avg_remap_cycles: u64,
464}
465
466#[cfg(all(test, not(target_os = "none")))]
467mod tests {
468    use super::*;
469
470    #[test]
471    fn test_transfer_flags() {
472        let flags = TransferFlags {
473            transfer_type: TransferType::Share,
474            cache_policy: CachePolicy::Default,
475            numa_hint: Some(0),
476        };
477
478        assert_eq!(flags.transfer_type, TransferType::Share);
479    }
480}