1#![allow(dead_code)]
8
9#[cfg(feature = "alloc")]
10extern crate alloc;
11
12#[cfg(feature = "alloc")]
13use alloc::vec::Vec;
14use core::sync::atomic::{AtomicU64, Ordering};
15
16use super::{
17 error::{IpcError, Result},
18 shared_memory::{Permission, SharedRegion},
19};
20use crate::{
21 arch::entropy::read_timestamp,
22 mm::{PageFlags, PhysicalAddress, VirtualAddress},
23 process::ProcessId,
24};
25
26struct ProcessPageTable {
32 pid: ProcessId,
34 root: u64,
36}
37
38pub struct ZeroCopyStats {
40 pub pages_transferred: AtomicU64,
41 pub bytes_transferred: AtomicU64,
42 pub transfer_count: AtomicU64,
43 pub remap_cycles: AtomicU64,
44}
45
46static ZERO_COPY_STATS: ZeroCopyStats = ZeroCopyStats {
47 pages_transferred: AtomicU64::new(0),
48 bytes_transferred: AtomicU64::new(0),
49 transfer_count: AtomicU64::new(0),
50 remap_cycles: AtomicU64::new(0),
51};
52
53pub fn zero_copy_transfer(
58 region: &SharedRegion,
59 from_pid: ProcessId,
60 to_pid: ProcessId,
61 flags: TransferFlags,
62) -> Result<()> {
63 let start = read_timestamp();
64
65 if !validate_transfer_capability(from_pid, to_pid, region.id()) {
67 return Err(IpcError::PermissionDenied);
68 }
69
70 let mut from_pt = get_process_page_table(from_pid)?;
72 let mut to_pt = get_process_page_table(to_pid)?;
73
74 let num_pages = region.size().div_ceil(PAGE_SIZE);
76
77 match flags.transfer_type {
79 TransferType::Move => transfer_move(
80 region,
81 from_pid,
82 to_pid,
83 &mut from_pt,
84 &mut to_pt,
85 num_pages,
86 )?,
87 TransferType::Share => transfer_share(
88 region,
89 from_pid,
90 to_pid,
91 &mut from_pt,
92 &mut to_pt,
93 num_pages,
94 )?,
95 TransferType::Copy => transfer_copy_on_write(
96 region,
97 from_pid,
98 to_pid,
99 &mut from_pt,
100 &mut to_pt,
101 num_pages,
102 )?,
103 }
104
105 let elapsed = read_timestamp() - start;
107 ZERO_COPY_STATS
108 .pages_transferred
109 .fetch_add(num_pages as u64, Ordering::Relaxed);
110 ZERO_COPY_STATS
111 .bytes_transferred
112 .fetch_add(region.size() as u64, Ordering::Relaxed);
113 ZERO_COPY_STATS
114 .transfer_count
115 .fetch_add(1, Ordering::Relaxed);
116 ZERO_COPY_STATS
117 .remap_cycles
118 .fetch_add(elapsed, Ordering::Relaxed);
119
120 flush_tlb_for_processes(&[from_pid, to_pid]);
122
123 Ok(())
124}
125
126fn transfer_move(
128 region: &SharedRegion,
129 from_pid: ProcessId,
130 to_pid: ProcessId,
131 from_pt: &mut ProcessPageTable,
132 to_pt: &mut ProcessPageTable,
133 num_pages: usize,
134) -> Result<()> {
135 let from_vaddr = region
136 .get_mapping(from_pid)
137 .ok_or(IpcError::InvalidMemoryRegion)?;
138 let to_vaddr = allocate_virtual_range(to_pt, region.size())?;
139
140 for i in 0..num_pages {
141 let offset = i * PAGE_SIZE;
142 let from_page = from_vaddr.add(offset);
143 let to_page = to_vaddr.add(offset);
144
145 let phys_addr = from_pt
147 .translate(from_page)
148 .ok_or(IpcError::InvalidMemoryRegion)?;
149
150 from_pt.unmap(from_page)?;
152
153 to_pt.map(to_page, phys_addr, PageFlags::USER | PageFlags::WRITABLE)?;
155 }
156
157 region.unmap(from_pid)?;
159 region.map(to_pid, to_vaddr, Permission::Write)?;
160
161 Ok(())
162}
163
164fn transfer_share(
166 region: &SharedRegion,
167 from_pid: ProcessId,
168 to_pid: ProcessId,
169 from_pt: &mut ProcessPageTable,
170 to_pt: &mut ProcessPageTable,
171 num_pages: usize,
172) -> Result<()> {
173 let from_vaddr = region
174 .get_mapping(from_pid)
175 .ok_or(IpcError::InvalidMemoryRegion)?;
176 let to_vaddr = allocate_virtual_range(to_pt, region.size())?;
177
178 for i in 0..num_pages {
179 let offset = i * PAGE_SIZE;
180 let from_page = from_vaddr.add(offset);
181 let to_page = to_vaddr.add(offset);
182
183 let phys_addr = from_pt
185 .translate(from_page)
186 .ok_or(IpcError::InvalidMemoryRegion)?;
187
188 to_pt.map(to_page, phys_addr, PageFlags::USER | PageFlags::WRITABLE)?;
190
191 from_pt.update_flags(
193 from_page,
194 PageFlags::USER | PageFlags::WRITABLE | PageFlags::ACCESSED,
195 )?;
196 to_pt.update_flags(
197 to_page,
198 PageFlags::USER | PageFlags::WRITABLE | PageFlags::ACCESSED,
199 )?;
200 }
201
202 region.map(to_pid, to_vaddr, Permission::Write)?;
204
205 Ok(())
206}
207
208fn transfer_copy_on_write(
210 region: &SharedRegion,
211 from_pid: ProcessId,
212 to_pid: ProcessId,
213 from_pt: &mut ProcessPageTable,
214 to_pt: &mut ProcessPageTable,
215 num_pages: usize,
216) -> Result<()> {
217 let from_vaddr = region
218 .get_mapping(from_pid)
219 .ok_or(IpcError::InvalidMemoryRegion)?;
220 let to_vaddr = allocate_virtual_range(to_pt, region.size())?;
221
222 for i in 0..num_pages {
223 let offset = i * PAGE_SIZE;
224 let from_page = from_vaddr.add(offset);
225 let to_page = to_vaddr.add(offset);
226
227 let phys_addr = from_pt
229 .translate(from_page)
230 .ok_or(IpcError::InvalidMemoryRegion)?;
231
232 from_pt.update_flags(from_page, PageFlags::USER)?;
234 to_pt.map(to_page, phys_addr, PageFlags::USER)?;
235 }
236
237 region.map(to_pid, to_vaddr, Permission::Read)?;
239
240 Ok(())
241}
242
243#[derive(Debug, Clone, Copy)]
245pub struct TransferFlags {
246 pub transfer_type: TransferType,
247 pub cache_policy: CachePolicy,
248 pub numa_hint: Option<u32>,
249}
250
251#[derive(Debug, Clone, Copy, PartialEq, Eq)]
252pub enum TransferType {
253 Move,
255 Share,
257 Copy,
259}
260
261#[derive(Debug, Clone, Copy)]
262pub enum CachePolicy {
263 Default,
264 Streaming,
265 Uncached,
266}
267
268pub fn grant_transfer_capability(
273 granter_pid: u64,
274 grantee_pid: u64,
275 region_id: u64,
276 permissions: Permission,
277) -> Result<u64> {
278 let _granter = crate::process::table::get_process(ProcessId(granter_pid))
279 .ok_or(IpcError::ProcessNotFound)?;
280 let grantee = crate::process::table::get_process(ProcessId(grantee_pid))
281 .ok_or(IpcError::ProcessNotFound)?;
282
283 let mut rights = crate::cap::memory_integration::MemoryRights::MAP;
285 if permissions.can_read() {
286 rights |= crate::cap::memory_integration::MemoryRights::READ;
287 }
288 if permissions.can_write() {
289 rights |= crate::cap::memory_integration::MemoryRights::WRITE;
290 }
291 if permissions.can_execute() {
292 rights |= crate::cap::memory_integration::MemoryRights::EXECUTE;
293 }
294
295 let grantee_cap_space = grantee.capability_space.lock();
297 let attributes = crate::cap::object::MemoryAttributes::normal();
298 let cap = crate::cap::memory_integration::create_memory_capability(
299 region_id as usize,
300 0, attributes,
302 rights,
303 &grantee_cap_space,
304 )
305 .map_err(|_| IpcError::PermissionDenied)?;
306
307 Ok(cap.to_u64())
308}
309
310#[cfg(feature = "alloc")]
312pub fn batch_zero_copy_transfer(
313 transfers: &[(SharedRegion, TransferFlags)],
314 from_pid: ProcessId,
315 to_pid: ProcessId,
316) -> Result<Vec<Result<()>>> {
317 let mut results = Vec::with_capacity(transfers.len());
318
319 let _from_pt = get_process_page_table(from_pid)?;
321 let _to_pt = get_process_page_table(to_pid)?;
322
323 for (region, flags) in transfers {
325 results.push(zero_copy_transfer(region, from_pid, to_pid, *flags));
326 }
327
328 flush_tlb_for_processes(&[from_pid, to_pid]);
330
331 Ok(results)
332}
333
334const PAGE_SIZE: usize = 4096;
335
336impl ProcessPageTable {
342 fn translate(&self, vaddr: VirtualAddress) -> Option<PhysicalAddress> {
345 let process = crate::process::find_process(self.pid)?;
346 let vas = process.memory_space.lock();
347 crate::mm::translate_address(&vas, vaddr)
348 }
349
350 fn map(
354 &mut self,
355 vaddr: VirtualAddress,
356 _paddr: PhysicalAddress,
357 flags: PageFlags,
358 ) -> Result<()> {
359 let process = crate::process::find_process(self.pid).ok_or(IpcError::ProcessNotFound)?;
360 let mut vas = process.memory_space.lock();
361
362 vas.map_page(vaddr.as_usize(), flags)
365 .map_err(|_| IpcError::OutOfMemory)?;
366
367 Ok(())
368 }
369
370 #[cfg(feature = "alloc")]
372 fn unmap(&mut self, vaddr: VirtualAddress) -> Result<()> {
373 let process = crate::process::find_process(self.pid).ok_or(IpcError::ProcessNotFound)?;
374 let vas = process.memory_space.lock();
375
376 vas.unmap_region(vaddr)
377 .map_err(|_| IpcError::InvalidMemoryRegion)?;
378
379 Ok(())
380 }
381
382 #[cfg(not(feature = "alloc"))]
383 fn unmap(&mut self, _vaddr: VirtualAddress) -> Result<()> {
384 Err(IpcError::OutOfMemory)
385 }
386
387 fn update_flags(&mut self, vaddr: VirtualAddress, _flags: PageFlags) -> Result<()> {
391 crate::arch::tlb_flush_address(vaddr.as_u64());
394 Ok(())
395 }
396}
397
398fn validate_transfer_capability(from: ProcessId, to: ProcessId, _region: u64) -> bool {
402 let from_exists = crate::process::find_process(from).is_some();
404 let to_exists = crate::process::find_process(to).is_some();
405 from_exists && to_exists
406}
407
408fn get_process_page_table(pid: ProcessId) -> Result<ProcessPageTable> {
411 let process = crate::process::find_process(pid).ok_or(IpcError::ProcessNotFound)?;
412 let vas = process.memory_space.lock();
413 let root = vas.get_page_table();
414 Ok(ProcessPageTable { pid, root })
415}
416
417fn allocate_virtual_range(pt: &mut ProcessPageTable, size: usize) -> Result<VirtualAddress> {
420 let process = crate::process::find_process(pt.pid).ok_or(IpcError::ProcessNotFound)?;
421 let vas = process.memory_space.lock();
422
423 vas.mmap(size, crate::mm::vas::MappingType::Shared)
424 .map_err(|_| IpcError::OutOfMemory)
425}
426
427fn flush_tlb_for_processes(pids: &[ProcessId]) {
430 if pids.is_empty() {
434 return;
435 }
436 crate::arch::tlb_flush_all();
439}
440
441pub fn get_zero_copy_stats() -> ZeroCopyStatsSummary {
443 ZeroCopyStatsSummary {
444 pages_transferred: ZERO_COPY_STATS.pages_transferred.load(Ordering::Relaxed),
445 bytes_transferred: ZERO_COPY_STATS.bytes_transferred.load(Ordering::Relaxed),
446 transfer_count: ZERO_COPY_STATS.transfer_count.load(Ordering::Relaxed),
447 avg_remap_cycles: {
448 let count = ZERO_COPY_STATS.transfer_count.load(Ordering::Relaxed);
449 let cycles = ZERO_COPY_STATS.remap_cycles.load(Ordering::Relaxed);
450 if count > 0 {
451 cycles / count
452 } else {
453 0
454 }
455 },
456 }
457}
458
459pub struct ZeroCopyStatsSummary {
460 pub pages_transferred: u64,
461 pub bytes_transferred: u64,
462 pub transfer_count: u64,
463 pub avg_remap_cycles: u64,
464}
465
466#[cfg(all(test, not(target_os = "none")))]
467mod tests {
468 use super::*;
469
470 #[test]
471 fn test_transfer_flags() {
472 let flags = TransferFlags {
473 transfer_type: TransferType::Share,
474 cache_policy: CachePolicy::Default,
475 numa_hint: Some(0),
476 };
477
478 assert_eq!(flags.transfer_type, TransferType::Share);
479 }
480}