veridian_kernel/sched/ipc_blocking.rs
1//! IPC blocking and waking operations
2//!
3//! Manages task blocking and waking for IPC endpoints, including per-endpoint
4//! wait queues. Tasks blocked on IPC are tracked here and woken when messages
5//! arrive or endpoints become available.
6
7use core::sync::atomic::Ordering;
8
9use super::{metrics, scheduler, smp, task::Task};
10use crate::process::{ProcessId, ProcessState};
11
12/// Yield CPU to scheduler
13pub fn yield_cpu() {
14 super::SCHEDULER.lock().schedule();
15}
16
17/// Block current process on IPC
18pub fn block_on_ipc(endpoint: u64) {
19 let scheduler = scheduler::current_scheduler();
20 let mut sched = scheduler.lock();
21
22 if let Some(current_task) = sched.current() {
23 // SAFETY: `current_task` is a valid NonNull<Task> from the scheduler.
24 // We hold the scheduler lock, so no other code can concurrently modify
25 // this task. We update the task's state and blocked_on fields, and
26 // optionally update the linked thread's state.
27 unsafe {
28 let task_mut = current_task.as_ptr();
29 (*task_mut).state = ProcessState::Blocked;
30 (*task_mut).blocked_on = Some(endpoint);
31
32 // Update thread state if linked
33 if let Some(thread_ptr) = (*task_mut).thread_ref {
34 // SAFETY: thread_ptr was set during task creation from a valid
35 // Thread reference and remains valid for the task's lifetime.
36 thread_ptr
37 .as_ref()
38 .set_state(crate::process::thread::ThreadState::Blocked);
39 }
40 }
41
42 // Add task to wait queue for this endpoint
43 add_to_wait_queue(current_task, endpoint);
44
45 // Record IPC block metric
46 metrics::SCHEDULER_METRICS.record_ipc_block();
47
48 // Force a reschedule
49 sched.schedule();
50 }
51}
52
53/// Block a process (for signal handling like SIGSTOP)
54/// Sets process and thread states to Blocked and triggers reschedule
55pub fn block_process(pid: ProcessId) {
56 // First try to find the process in wait queues or as current task
57 #[cfg(feature = "alloc")]
58 {
59 // Check if this is the current task
60 let scheduler = scheduler::current_scheduler();
61 let sched = scheduler.lock();
62
63 if let Some(current_task) = sched.current() {
64 // SAFETY: `current_task` is a valid NonNull<Task> from the
65 // scheduler. We hold the scheduler lock ensuring exclusive access
66 // to the task. We read pid for comparison and potentially update
67 // state and thread_ref fields.
68 unsafe {
69 if (*current_task.as_ptr()).pid == pid {
70 // This is the current task - block it
71 let task_mut = current_task.as_ptr();
72 (*task_mut).state = ProcessState::Blocked;
73
74 // Update thread state if linked
75 if let Some(thread_ptr) = (*task_mut).thread_ref {
76 // SAFETY: thread_ptr is valid for the task's lifetime.
77 thread_ptr
78 .as_ref()
79 .set_state(crate::process::thread::ThreadState::Blocked);
80 }
81
82 drop(sched);
83 // Force a reschedule
84 super::SCHEDULER.lock().schedule();
85 return;
86 }
87 }
88 }
89 drop(sched);
90
91 // Look up process in the process table and block all its threads
92 if let Some(process) = crate::process::table::get_process_mut(pid) {
93 // Update process state
94 process
95 .state
96 .store(ProcessState::Blocked as u32, Ordering::Release);
97
98 // Block all threads in the process
99 let threads = process.threads.lock();
100 for (_tid, thread) in threads.iter() {
101 thread.set_state(crate::process::thread::ThreadState::Blocked);
102
103 // If thread has a task, update task state too
104 if let Some(task_ptr) = thread.get_task_ptr() {
105 // SAFETY: task_ptr was set via Thread::set_task_ptr during
106 // task creation and points to a valid heap-allocated Task.
107 // We hold the process threads lock for synchronization.
108 unsafe {
109 (*task_ptr.as_ptr()).state = ProcessState::Blocked;
110 }
111 }
112 }
113
114 kprintln!("[SCHED] Blocked process and all its threads");
115 }
116 }
117
118 #[cfg(not(feature = "alloc"))]
119 {
120 let _ = pid;
121 }
122}
123
124/// Wake up process blocked on IPC
125pub fn wake_up_process(pid: ProcessId) {
126 // First check if task is in any wait queue
127 if let Some(task_ptr) = remove_from_wait_queue(pid) {
128 // SAFETY: `task_ptr` was stored in the wait queue during
129 // `block_on_ipc` when it was a valid NonNull<Task>. Tasks in wait
130 // queues are not deallocated until explicitly removed and cleaned up.
131 // We hold no other locks that could cause a deadlock.
132 unsafe {
133 let task_mut = task_ptr.as_ptr();
134 let previous_state = (*task_mut).state;
135 (*task_mut).state = ProcessState::Ready;
136 (*task_mut).blocked_on = None;
137
138 // Update thread state if linked
139 if let Some(thread_ptr) = (*task_mut).thread_ref {
140 // SAFETY: thread_ptr is valid for the task's lifetime.
141 thread_ptr
142 .as_ref()
143 .set_state(crate::process::thread::ThreadState::Ready);
144 }
145
146 // Record IPC wakeup metric if it was blocked on IPC
147 if previous_state == ProcessState::Blocked {
148 metrics::SCHEDULER_METRICS.record_ipc_wakeup();
149 }
150
151 // Find the best CPU to schedule on, preferring last-run CPU
152 // for cache locality (avoids cold-cache penalty on migration).
153 let last_cpu = (*task_mut).current_cpu;
154 let target_cpu = if let Some(last) = last_cpu {
155 // Prefer last-run CPU if it's online (cache warm)
156 if let Some(cpu_data) = smp::per_cpu(last) {
157 if cpu_data.cpu_info.is_online() {
158 last
159 } else {
160 smp::find_least_loaded_cpu()
161 }
162 } else {
163 smp::find_least_loaded_cpu()
164 }
165 } else if (*task_mut).cpu_affinity.mask() != 0 {
166 // Has affinity -- find least loaded CPU matching mask
167 smp::find_least_loaded_cpu_with_affinity((*task_mut).cpu_affinity.mask())
168 } else {
169 // No preference -- use least loaded CPU
170 smp::find_least_loaded_cpu()
171 };
172
173 // Schedule on target CPU
174 scheduler::schedule_on_cpu(target_cpu, task_ptr);
175 return;
176 }
177 }
178
179 // If not in wait queue, search all CPU ready queues
180 for cpu_id in 0..smp::MAX_CPUS as u8 {
181 if let Some(cpu_data) = smp::per_cpu(cpu_id) {
182 if cpu_data.cpu_info.is_online() {
183 // Per-CPU ready queues are in percpu_queue module; for
184 // wake-up we still check via the global scheduler since
185 // tasks may be current on any CPU.
186 let sched = super::SCHEDULER.lock();
187
188 // Search through the scheduler's tasks
189 if let Some(current) = sched.current() {
190 // SAFETY: `current` is a valid NonNull<Task> from the
191 // scheduler. We hold the scheduler lock so the task won't
192 // be modified concurrently. We only read/write task fields.
193 unsafe {
194 if (*current.as_ptr()).pid == pid {
195 // Found it as current task - just update state
196 (*current.as_ptr()).state = ProcessState::Ready;
197 if let Some(thread_ptr) = (*current.as_ptr()).thread_ref {
198 thread_ptr
199 .as_ref()
200 .set_state(crate::process::thread::ThreadState::Ready);
201 }
202 return;
203 }
204 }
205 }
206 }
207 }
208 }
209
210 // If still not found, try to look up in process table and create task if needed
211 #[cfg(feature = "alloc")]
212 {
213 if let Some(process) = crate::process::table::get_process_mut(pid) {
214 // Update process state
215 process
216 .state
217 .store(ProcessState::Ready as u32, Ordering::Release);
218
219 // Find main thread and wake it
220 if let Some(main_tid) = process.get_main_thread_id() {
221 // Update thread state through process
222 let threads = process.threads.lock();
223 if let Some(thread) = threads.get(&main_tid) {
224 thread.set_state(crate::process::thread::ThreadState::Ready);
225
226 // Try to schedule the thread if it has a task
227 if let Some(task_ptr) = thread.get_task_ptr() {
228 let target_cpu = smp::find_least_loaded_cpu();
229 scheduler::schedule_on_cpu(target_cpu, task_ptr);
230 }
231 }
232 }
233 }
234 }
235}
236
237/// Wake up all processes blocked on a specific endpoint
238pub fn wake_up_endpoint_waiters(endpoint: u64) {
239 #[cfg(feature = "alloc")]
240 {
241 let waiters = get_endpoint_waiters(endpoint);
242 for task_ptr in waiters {
243 // SAFETY: task_ptr was retrieved from the wait queue where it was
244 // stored as a valid NonNull<Task>. We only read the pid field to
245 // pass to wake_up_process.
246 unsafe {
247 let task = task_ptr.as_ref();
248 wake_up_process(task.pid);
249 }
250 }
251 }
252 #[cfg(not(feature = "alloc"))]
253 {
254 // Without alloc, we can't maintain wait queues
255 let _ = endpoint;
256 }
257}
258
259// ---------------------------------------------------------------------------
260// Wait queue management
261// ---------------------------------------------------------------------------
262
263/// Wait queue for blocked tasks
264#[cfg(feature = "alloc")]
265extern crate alloc;
266#[cfg(feature = "alloc")]
267use alloc::collections::BTreeMap;
268#[cfg(feature = "alloc")]
269use alloc::vec::Vec;
270
271#[cfg(feature = "alloc")]
272use spin::Lazy;
273
274/// Wrapper to make NonNull<Task> Send/Sync for use in wait queues.
275///
276/// # Safety
277///
278/// Tasks stored in wait queues are only accessed while holding the wait queue
279/// lock (WAIT_QUEUES Mutex). The scheduler ensures tasks are not deallocated
280/// while present in any wait queue.
281#[derive(Clone, Copy)]
282struct WaitQueueTaskPtr(core::ptr::NonNull<Task>);
283
284// SAFETY: WaitQueueTaskPtr wraps a NonNull<Task> that is only accessed under
285// the WAIT_QUEUES mutex lock, ensuring no data races. Task memory is managed
286// by the kernel allocator and outlives the wait queue entry.
287unsafe impl Send for WaitQueueTaskPtr {}
288// SAFETY: Same as Send -- all access is synchronized through the WAIT_QUEUES
289// mutex.
290unsafe impl Sync for WaitQueueTaskPtr {}
291
292#[cfg(feature = "alloc")]
293static WAIT_QUEUES: Lazy<spin::Mutex<BTreeMap<u64, Vec<WaitQueueTaskPtr>>>> =
294 Lazy::new(|| spin::Mutex::new(BTreeMap::new()));
295
296#[cfg(feature = "alloc")]
297fn wait_queues() -> &'static spin::Mutex<BTreeMap<u64, Vec<WaitQueueTaskPtr>>> {
298 &WAIT_QUEUES
299}
300
301/// Add task to wait queue for endpoint
302#[cfg(feature = "alloc")]
303pub(super) fn add_to_wait_queue(task: core::ptr::NonNull<Task>, endpoint: u64) {
304 let mut queues = wait_queues().lock();
305 queues
306 .entry(endpoint)
307 .or_default()
308 .push(WaitQueueTaskPtr(task));
309}
310
311/// Remove task from wait queue by PID
312#[cfg(feature = "alloc")]
313pub(super) fn remove_from_wait_queue(pid: ProcessId) -> Option<core::ptr::NonNull<Task>> {
314 let mut queues = wait_queues().lock();
315
316 for (_endpoint, waiters) in queues.iter_mut() {
317 if let Some(pos) = waiters.iter().position(|&WaitQueueTaskPtr(task_ptr)| {
318 // SAFETY: task_ptr was inserted into the wait queue as a valid
319 // NonNull<Task> during block_on_ipc. Tasks in wait queues are not
320 // deallocated. We only read the pid field for comparison.
321 unsafe { task_ptr.as_ref().pid == pid }
322 }) {
323 return Some(waiters.remove(pos).0);
324 }
325 }
326
327 None
328}
329
330/// Get all waiters for an endpoint
331#[cfg(feature = "alloc")]
332pub(super) fn get_endpoint_waiters(endpoint: u64) -> Vec<core::ptr::NonNull<Task>> {
333 let mut queues = wait_queues().lock();
334 queues
335 .remove(&endpoint)
336 .unwrap_or_default()
337 .into_iter()
338 .map(|WaitQueueTaskPtr(ptr)| ptr)
339 .collect()
340}
341
342// Stub implementations for no_std without alloc
343#[cfg(not(feature = "alloc"))]
344pub(super) fn add_to_wait_queue(_task: core::ptr::NonNull<Task>, _endpoint: u64) {
345 // No-op without alloc
346}
347
348#[cfg(not(feature = "alloc"))]
349pub(super) fn remove_from_wait_queue(_pid: ProcessId) -> Option<core::ptr::NonNull<Task>> {
350 None
351}
352
353#[cfg(not(feature = "alloc"))]
354pub(super) fn get_endpoint_waiters(_endpoint: u64) -> [core::ptr::NonNull<Task>; 0] {
355 []
356}