⚠️ VeridianOS Kernel Documentation - This is low-level kernel code. All functions are unsafe unless explicitly marked otherwise. no_std

veridian_kernel/ipc/
sync.rs

1//! Synchronous IPC implementation
2//!
3//! Provides blocking send/receive operations with direct handoff between
4//! processes.
5
6// Synchronous IPC -- exercised via syscall IPC paths
7#![allow(dead_code)]
8
9#[cfg(feature = "alloc")]
10extern crate alloc;
11
12use core::sync::atomic::{AtomicU64, Ordering};
13
14use super::{
15    error::{IpcError, Result},
16    fast_path::{fast_receive, fast_send},
17    message::Message,
18};
19use crate::{
20    arch::entropy::read_timestamp,
21    process::{ProcessId, ProcessState},
22    sched::{current_process, find_process},
23};
24
25/// Statistics for synchronous IPC
26pub struct SyncIpcStats {
27    pub send_count: AtomicU64,
28    pub receive_count: AtomicU64,
29    pub fast_path_count: AtomicU64,
30    pub slow_path_count: AtomicU64,
31    pub avg_latency_cycles: AtomicU64,
32}
33
34static SYNC_STATS: SyncIpcStats = SyncIpcStats {
35    send_count: AtomicU64::new(0),
36    receive_count: AtomicU64::new(0),
37    fast_path_count: AtomicU64::new(0),
38    slow_path_count: AtomicU64::new(0),
39    avg_latency_cycles: AtomicU64::new(0),
40};
41
42/// Simple send message function for tests
43#[cfg(test)]
44pub fn send_message(msg: Message, target_endpoint: u64) -> Result<()> {
45    sync_send(msg, target_endpoint)
46}
47
48/// Synchronous message send
49///
50/// Blocks until message is delivered to receiver.
51pub fn sync_send(msg: Message, target_endpoint: u64) -> Result<()> {
52    let start = read_timestamp();
53    SYNC_STATS.send_count.fetch_add(1, Ordering::Relaxed);
54
55    match msg {
56        Message::Small(small_msg) => {
57            // Try fast path first
58            match fast_send(&small_msg, target_endpoint) {
59                Ok(()) => {
60                    SYNC_STATS.fast_path_count.fetch_add(1, Ordering::Relaxed);
61                    update_latency_stats(start);
62                    Ok(())
63                }
64                Err(IpcError::WouldBlock) => {
65                    // Fall back to slow path
66                    SYNC_STATS.slow_path_count.fetch_add(1, Ordering::Relaxed);
67                    sync_send_slow_path(Message::Small(small_msg), target_endpoint)?;
68                    update_latency_stats(start);
69                    Ok(())
70                }
71                Err(e) => Err(e),
72            }
73        }
74        Message::Large(large_msg) => {
75            // Large messages always use slow path
76            SYNC_STATS.slow_path_count.fetch_add(1, Ordering::Relaxed);
77            sync_send_slow_path(Message::Large(large_msg), target_endpoint)?;
78            update_latency_stats(start);
79            Ok(())
80        }
81    }
82}
83
84/// Synchronous message receive
85///
86/// Blocks until a message is available.
87pub fn sync_receive(endpoint: u64) -> Result<Message> {
88    let start = read_timestamp();
89    SYNC_STATS.receive_count.fetch_add(1, Ordering::Relaxed);
90
91    // Try fast path for small messages
92    match fast_receive(endpoint, None) {
93        Ok(small_msg) => {
94            SYNC_STATS.fast_path_count.fetch_add(1, Ordering::Relaxed);
95            update_latency_stats(start);
96            Ok(Message::Small(small_msg))
97        }
98        Err(IpcError::WouldBlock) => {
99            // Fall back to slow path
100            SYNC_STATS.slow_path_count.fetch_add(1, Ordering::Relaxed);
101            let msg = sync_receive_slow_path(endpoint)?;
102            update_latency_stats(start);
103            Ok(msg)
104        }
105        Err(e) => Err(e),
106    }
107}
108
109/// Call operation (send and wait for reply)
110pub fn sync_call(request: Message, target: u64) -> Result<Message> {
111    // Send request
112    sync_send(request, target)?;
113
114    // Mark ourselves as waiting for reply
115    let current = current_process();
116    current.state = ProcessState::Blocked;
117
118    // Wait for reply using process ID as endpoint
119    sync_receive(current.pid.0)
120}
121
122/// Reply to a previous call
123pub fn sync_reply(reply: Message, caller: u64) -> Result<()> {
124    // Find caller process
125    let caller_process = find_process(ProcessId(caller)).ok_or(IpcError::ProcessNotFound)?;
126
127    // Verify caller is waiting for reply
128    if caller_process.state != ProcessState::Blocked {
129        return Err(IpcError::InvalidMessage);
130    }
131
132    // Send reply directly
133    sync_send(reply, caller)?;
134
135    // Wake the caller process after reply is sent
136    crate::sched::ipc_blocking::wake_up_process(ProcessId(caller));
137    Ok(())
138}
139
140/// Slow path for synchronous send
141fn sync_send_slow_path(msg: Message, target_endpoint: u64) -> Result<()> {
142    // Validate send capability
143    validate_send_capability(&msg, target_endpoint)?;
144
145    // Use message passing subsystem with retry-on-full blocking
146    #[cfg(feature = "alloc")]
147    {
148        const MAX_RETRIES: u32 = 3;
149        for _attempt in 0..MAX_RETRIES {
150            match crate::ipc::message_passing::send_to_endpoint(msg, target_endpoint) {
151                Ok(()) => {
152                    // Wake any processes waiting on this endpoint
153                    crate::sched::ipc_blocking::wake_up_endpoint_waiters(target_endpoint);
154                    return Ok(());
155                }
156                Err(IpcError::ChannelFull) => {
157                    // Block until space available
158                    crate::sched::ipc_blocking::block_on_ipc(target_endpoint);
159                }
160                Err(e) => return Err(e),
161            }
162        }
163        Err(IpcError::ChannelFull)
164    }
165    #[cfg(not(feature = "alloc"))]
166    {
167        Err(IpcError::OutOfMemory)
168    }
169}
170
171/// Slow path for synchronous receive
172fn sync_receive_slow_path(endpoint: u64) -> Result<Message> {
173    // Use message passing subsystem with blocking
174    #[cfg(feature = "alloc")]
175    {
176        crate::ipc::message_passing::receive_from_endpoint(endpoint, true)
177    }
178    #[cfg(not(feature = "alloc"))]
179    {
180        Err(IpcError::OutOfMemory)
181    }
182}
183
184/// Validate send capability
185fn validate_send_capability(msg: &Message, endpoint_id: u64) -> Result<()> {
186    let cap_id = msg.capability();
187
188    // Get current process's capability space
189    let current_process = crate::process::current_process().ok_or(IpcError::ProcessNotFound)?;
190    let cap_space = current_process.capability_space.lock();
191
192    // Convert capability ID to token
193    let cap_token = crate::cap::CapabilityToken::from_u64(cap_id);
194
195    // Check if the capability grants send permission for this endpoint
196    // Note: This checks the capability exists, is valid, and has SEND rights
197    crate::cap::ipc_integration::check_send_permission(cap_token, &cap_space).map_err(
198        |e| match e {
199            IpcError::InvalidCapability => IpcError::InvalidCapability,
200            IpcError::PermissionDenied => IpcError::PermissionDenied,
201            _ => IpcError::InvalidCapability,
202        },
203    )?;
204
205    // Verify the capability is associated with the target endpoint.
206    // Look up the full capability entry to check the ObjectRef.
207    // Non-endpoint capabilities are also valid for general IPC
208    // (e.g., process capabilities can send on any endpoint they
209    // have SEND rights for).
210    #[cfg(feature = "alloc")]
211    {
212        if let Some((crate::cap::object::ObjectRef::Endpoint { endpoint }, _rights)) =
213            cap_space.lookup_entry(cap_token)
214        {
215            if endpoint.id() != endpoint_id {
216                return Err(IpcError::InvalidCapability);
217            }
218        }
219    }
220    #[cfg(not(feature = "alloc"))]
221    {
222        let _ = endpoint_id;
223    }
224
225    Ok(())
226}
227
228/// Update latency statistics
229fn update_latency_stats(start_cycles: u64) {
230    let elapsed = read_timestamp() - start_cycles;
231    let count = SYNC_STATS.send_count.load(Ordering::Relaxed)
232        + SYNC_STATS.receive_count.load(Ordering::Relaxed);
233    let current_avg = SYNC_STATS.avg_latency_cycles.load(Ordering::Relaxed);
234
235    // Calculate new average
236    let new_avg = if count > 1 {
237        (current_avg * (count - 1) + elapsed) / count
238    } else {
239        elapsed
240    };
241
242    SYNC_STATS
243        .avg_latency_cycles
244        .store(new_avg, Ordering::Relaxed);
245
246    // Also record in global performance stats
247    let is_fast_path = SYNC_STATS.fast_path_count.load(Ordering::Relaxed)
248        > SYNC_STATS.slow_path_count.load(Ordering::Relaxed);
249    crate::ipc::perf::IPC_PERF_STATS.record_operation(elapsed, is_fast_path);
250}
251
252/// Get synchronous IPC statistics
253pub fn get_sync_stats() -> SyncStatsSummary {
254    SyncStatsSummary {
255        send_count: SYNC_STATS.send_count.load(Ordering::Relaxed),
256        receive_count: SYNC_STATS.receive_count.load(Ordering::Relaxed),
257        fast_path_count: SYNC_STATS.fast_path_count.load(Ordering::Relaxed),
258        slow_path_count: SYNC_STATS.slow_path_count.load(Ordering::Relaxed),
259        avg_latency_cycles: SYNC_STATS.avg_latency_cycles.load(Ordering::Relaxed),
260        fast_path_percentage: {
261            let fast = SYNC_STATS.fast_path_count.load(Ordering::Relaxed);
262            let total = fast + SYNC_STATS.slow_path_count.load(Ordering::Relaxed);
263            if total > 0 {
264                (fast * 100) / total
265            } else {
266                0
267            }
268        },
269    }
270}
271
272pub struct SyncStatsSummary {
273    pub send_count: u64,
274    pub receive_count: u64,
275    pub fast_path_count: u64,
276    pub slow_path_count: u64,
277    pub avg_latency_cycles: u64,
278    pub fast_path_percentage: u64,
279}
280
281#[cfg(all(test, not(target_os = "none")))]
282mod tests {
283    use super::*;
284
285    #[test]
286    fn test_sync_stats() {
287        let stats = get_sync_stats();
288        assert_eq!(stats.send_count, 0);
289        assert_eq!(stats.receive_count, 0);
290        assert_eq!(stats.fast_path_percentage, 0);
291    }
292}