veridian_kernel/process/mod.rs
1//! Process management module
2//!
3//! This module provides the core process and thread management functionality
4//! for the VeridianOS microkernel, including:
5//! - Process Control Block (PCB) management
6//! - Thread creation and management
7//! - Process lifecycle (creation, termination, state transitions)
8//! - Global process table
9//! - Memory space management
10//! - Capability integration
11
12// Process management is fully implemented but many functions are not yet
13// called from user-space syscall paths. Will be exercised once the process
14// lifecycle is driven by real user-space programs.
15
16use core::sync::atomic::{AtomicU64, Ordering};
17
18#[cfg(feature = "alloc")]
19extern crate alloc;
20
21// Import println! macro - may be no-op on some architectures
22#[allow(unused_imports)]
23use crate::println;
24
25// Re-export submodules
26pub mod creation;
27pub mod cwd;
28pub mod exit;
29pub mod fork;
30pub mod lifecycle;
31pub mod loader;
32pub mod memory;
33pub mod pcb;
34pub mod session;
35pub mod signal_delivery;
36pub mod sync;
37pub mod table;
38pub mod thread;
39pub mod wait;
40
41// Re-export common types
42pub use lifecycle::{exec_process, fork_process, wait_process as wait_for_child};
43pub use pcb::{Process, ProcessId, ProcessPriority, ProcessState};
44pub use table::get_process;
45pub use thread::{Thread, ThreadId, ThreadState};
46
47// Re-export thread context types for compatibility
48pub use crate::arch::context::{ArchThreadContext, ThreadContext};
49
50/// Maximum number of concurrent processes (including zombies awaiting reaping).
51///
52/// This limit is enforced in fork() to prevent unbounded process table growth
53/// during workloads like BusyBox native compilation (213+ sequential gcc
54/// invocations). Zombie processes count against this limit until reaped by
55/// their parent via waitpid().
56pub const MAX_PROCESSES: usize = 1024;
57
58/// Maximum threads per process
59pub const MAX_THREADS_PER_PROCESS: usize = 256;
60
61/// Process ID allocator
62static NEXT_PID: AtomicU64 = AtomicU64::new(1);
63
64/// Thread ID allocator
65static NEXT_TID: AtomicU64 = AtomicU64::new(1);
66
67/// Boot-launched process tracking.
68///
69/// During bootstrap, user processes are launched via
70/// `enter_usermode_returnable()` without registering them in the scheduler.
71/// When these processes make syscalls (e.g., fork), `current_process()` queries
72/// the scheduler which only knows about the idle task (pid=0). These atomics
73/// provide a fallback: the bootstrap wrapper sets them before entering user
74/// mode, and `current_process()`/`current_thread()` check them when the
75/// scheduler returns no valid process.
76///
77/// Using atomics avoids the SCHEDULER lock entirely, which is critical because
78/// acquiring the lock from the bootstrap stack corrupts SSE alignment (movaps
79/// GP fault).
80static BOOT_CURRENT_PID: AtomicU64 = AtomicU64::new(0);
81static BOOT_CURRENT_TID: AtomicU64 = AtomicU64::new(0);
82
83/// Register a boot-launched process as the current process.
84///
85/// Called from the bootstrap wrapper before entering user mode via
86/// `enter_usermode_returnable()`. This allows `current_process()` and
87/// `current_thread()` to return the correct process/thread during syscalls.
88pub fn set_boot_current(pid: ProcessId, tid: ThreadId) {
89 BOOT_CURRENT_PID.store(pid.0, Ordering::Release);
90 BOOT_CURRENT_TID.store(tid.0, Ordering::Release);
91}
92
93/// Clear the boot-launched process tracking.
94///
95/// Called from the bootstrap wrapper after the user process exits and control
96/// returns to the kernel bootstrap code.
97pub fn clear_boot_current() {
98 BOOT_CURRENT_PID.store(0, Ordering::Release);
99 BOOT_CURRENT_TID.store(0, Ordering::Release);
100}
101
102/// Allocate a new process ID
103pub fn alloc_pid() -> ProcessId {
104 ProcessId(NEXT_PID.fetch_add(1, Ordering::Relaxed))
105}
106
107/// Allocate a new thread ID
108pub fn alloc_tid() -> ThreadId {
109 ThreadId(NEXT_TID.fetch_add(1, Ordering::Relaxed))
110}
111
112/// Initialize process management subsystem without creating init process
113///
114/// This is used during bootstrap to initialize process structures
115/// without creating the init process (which requires scheduler).
116pub fn init_without_init_process() -> crate::error::KernelResult<()> {
117 println!("[PROCESS] Initializing process management structures...");
118
119 // Initialize process table
120 table::init();
121
122 println!("[PROCESS] Process management structures initialized");
123 Ok(())
124}
125
126/// Initialize process management subsystem (legacy)
127///
128/// This creates the init process, so scheduler must be initialized first.
129pub fn init() {
130 println!("[PROCESS] Initializing process management...");
131
132 // Initialize process table
133 table::init();
134
135 // Create init process (PID 1)
136 #[cfg(feature = "alloc")]
137 {
138 use alloc::string::String;
139 match lifecycle::create_process(String::from("init"), 0) {
140 Ok(_pid) => {
141 println!("[PROCESS] Created init process with PID {}", _pid.0);
142 }
143 Err(_e) => {
144 // Log the error but do not panic. The bootstrap sequence
145 // creates its own init process as a fallback, so this path
146 // is recoverable. The legacy init() path is rarely used.
147 println!("[PROCESS] WARNING: Failed to create init process: {}", _e);
148 }
149 }
150 }
151
152 println!("[PROCESS] Process management initialized");
153}
154
155/// Get current process
156pub fn current_process() -> Option<&'static Process> {
157 // Get from current CPU's scheduler
158 if let Some(task) = crate::sched::SCHEDULER.lock().current() {
159 // SAFETY: `task` is a NonNull<Task> returned by the scheduler's
160 // current() method. The scheduler guarantees the pointer is valid
161 // for the lifetime of the lock. We read pid to look up the process.
162 unsafe {
163 let task_ref = task.as_ref();
164 if let Some(proc) = table::get_process(task_ref.pid) {
165 return Some(proc);
166 }
167 }
168 }
169
170 // Fallback: check boot-launched process atomics.
171 // During bootstrap, user processes run via enter_usermode_returnable()
172 // without scheduler registration. The bootstrap wrapper sets these
173 // atomics so syscalls (fork, wait, etc.) can find the calling process.
174 let boot_pid = BOOT_CURRENT_PID.load(Ordering::Acquire);
175 if boot_pid != 0 {
176 return table::get_process(ProcessId(boot_pid));
177 }
178
179 None
180}
181
182/// Find process by ID
183pub fn find_process(pid: ProcessId) -> Option<&'static Process> {
184 table::get_process(pid)
185}
186
187/// Get current process (alias for compatibility)
188pub fn get_current_process() -> Option<&'static Process> {
189 current_process()
190}
191
192/// Get current thread
193pub fn current_thread() -> Option<&'static Thread> {
194 // Get from current CPU's scheduler
195 if let Some(task) = crate::sched::SCHEDULER.lock().current() {
196 // SAFETY: `task` is a NonNull<Task> returned by the scheduler's
197 // current() method. The scheduler guarantees the pointer is valid
198 // for the lifetime of the lock. We read pid and tid to look up
199 // the thread via the process table.
200 unsafe {
201 let task_ref = task.as_ref();
202 if let Some(process) = table::get_process(task_ref.pid) {
203 if let Some(thread) = process.get_thread(task_ref.tid) {
204 return Some(thread);
205 }
206 }
207 }
208 }
209
210 // Fallback: check boot-launched process atomics.
211 let boot_pid = BOOT_CURRENT_PID.load(Ordering::Acquire);
212 let boot_tid = BOOT_CURRENT_TID.load(Ordering::Acquire);
213 if boot_pid != 0 {
214 if let Some(process) = table::get_process(ProcessId(boot_pid)) {
215 return process.get_thread(ThreadId(boot_tid));
216 }
217 }
218
219 None
220}
221
222/// Yield current thread
223pub fn yield_thread() {
224 crate::sched::yield_cpu();
225}
226
227/// Exit current thread
228pub fn exit_thread(exit_code: i32) {
229 if let (Some(thread), Some(process)) = (current_thread(), current_process()) {
230 println!(
231 "[PROCESS] Thread {} exiting with code {}",
232 thread.tid.0, exit_code
233 );
234
235 // Mark thread as exited with state synchronization
236 thread.set_exited(exit_code);
237
238 // If detached, clean up immediately (no join will occur)
239 if thread.detached.load(core::sync::atomic::Ordering::Acquire) {
240 let _ = crate::process::exit::cleanup_thread(process, thread.tid);
241 }
242
243 // Handle CLONE_CHILD_CLEARTID: clear *clear_tid and futex wake
244 let clear_ptr = thread.clear_tid.load(core::sync::atomic::Ordering::Acquire);
245 if clear_ptr != 0 {
246 unsafe {
247 // Ignore copy_to_user errors here; best effort
248 let _ = crate::syscall::copy_to_user(clear_ptr, &0u32);
249 }
250 // Wake futex waiters on that address
251 let _ = crate::syscall::sys_futex_wake(clear_ptr, 1, 0);
252 }
253
254 // Never return - schedule another thread
255 crate::sched::exit_task(exit_code);
256 }
257}
258
259/// Terminate a specific thread
260pub fn terminate_thread(pid: ProcessId, tid: ThreadId) -> crate::error::KernelResult<()> {
261 if let Some(process) = find_process(pid) {
262 if let Some(thread) = process.get_thread(tid) {
263 println!(
264 "[PROCESS] Terminating thread {} in process {}",
265 tid.0, pid.0
266 );
267
268 // Mark thread as dead
269 thread.set_state(thread::ThreadState::Dead);
270
271 // Remove from scheduler if it has a task
272 if let Some(task_ptr) = thread.get_task_ptr() {
273 // SAFETY: task_ptr is a NonNull<Task> stored in the thread.
274 // We set the task state to Dead so the scheduler will not
275 // run this task again. The thread was found via a valid
276 // process/thread lookup above.
277 unsafe {
278 let task = task_ptr.as_ptr();
279 (*task).state = ProcessState::Dead;
280 }
281 }
282
283 Ok(())
284 } else {
285 Err(crate::error::KernelError::ThreadNotFound { tid: tid.0 })
286 }
287 } else {
288 Err(crate::error::KernelError::ProcessNotFound { pid: pid.0 })
289 }
290}
291
292/// Block current thread
293pub fn block_thread() {
294 if let Some(thread) = current_thread() {
295 // Update thread state to blocked
296 thread.set_blocked(None);
297 crate::sched::yield_cpu();
298 }
299}
300
301/// Wake up a thread
302pub fn wake_thread(tid: ThreadId) {
303 println!("[PROCESS] Waking thread {}", tid.0);
304
305 // Find thread in current process
306 if let Some(current_process) = get_current_process() {
307 let threads = current_process.threads.lock();
308 if let Some(thread) = threads.get(&tid) {
309 // Mark thread as ready
310 thread.set_ready();
311
312 // Wake up in scheduler if it has a task
313 if let Some(task_ptr) = thread.get_task_ptr() {
314 // SAFETY: task_ptr is a NonNull<Task> stored in the thread.
315 // We read the pid field to wake the process in the
316 // scheduler. The thread was found via the threads lock.
317 unsafe {
318 let task = task_ptr.as_ptr();
319 crate::sched::wake_up_process((*task).pid);
320 }
321 }
322 }
323 }
324}
325
326/// Create a new thread in the current process
327///
328/// Allocates real stack frames for the thread via the frame allocator using
329/// [`ThreadBuilder`]. If `stack_ptr` is non-zero, it overrides the user stack
330/// pointer. If `tls_ptr` is non-zero, it sets the TLS base address.
331pub fn create_thread(
332 entry_point: usize,
333 stack_ptr: usize,
334 arg: usize,
335 tls_ptr: usize,
336) -> crate::error::KernelResult<ThreadId> {
337 if let Some(process) = current_process() {
338 #[cfg(feature = "alloc")]
339 {
340 use alloc::string::String;
341
342 use thread::ThreadBuilder;
343
344 // Build thread with real stack allocation via ThreadBuilder
345 let thread = ThreadBuilder::new(process.pid, String::from("user_thread"), entry_point)
346 .user_stack_size(1024 * 1024) // 1MB user stack
347 .kernel_stack_size(64 * 1024) // 64KB kernel stack
348 .build()?;
349
350 let tid = thread.tid;
351
352 // Override the stack pointer if provided by caller
353 if stack_ptr != 0 {
354 thread.user_stack.set_sp(stack_ptr);
355 }
356
357 // Set up thread-local storage if provided
358 if tls_ptr != 0 {
359 thread.tls.lock().base = tls_ptr;
360 }
361
362 // Store argument in a register (architecture-specific)
363 // For now, we'll skip this as it requires arch-specific code
364 let _ = arg;
365
366 // Add thread to process
367 process.add_thread(thread)?;
368
369 Ok(tid)
370 }
371
372 #[cfg(not(feature = "alloc"))]
373 {
374 let _ = (entry_point, stack_ptr, arg, tls_ptr);
375 Err(crate::error::KernelError::NotImplemented {
376 feature: "create_thread (requires alloc)",
377 })
378 }
379 } else {
380 Err(crate::error::KernelError::ProcessNotFound { pid: 0 })
381 }
382}
383
384/// Set thread CPU affinity
385pub fn set_thread_affinity(tid: ThreadId, cpu_mask: u64) -> crate::error::KernelResult<()> {
386 if let Some(process) = current_process() {
387 if let Some(thread) = process.get_thread(tid) {
388 thread
389 .cpu_affinity
390 .store(cpu_mask as usize, Ordering::SeqCst);
391 Ok(())
392 } else {
393 Err(crate::error::KernelError::ThreadNotFound { tid: tid.0 })
394 }
395 } else {
396 Err(crate::error::KernelError::ProcessNotFound { pid: 0 })
397 }
398}
399
400/// Get current thread ID
401pub fn get_thread_tid() -> ThreadId {
402 if let Some(thread) = current_thread() {
403 thread.tid
404 } else {
405 // Fallback to main thread ID
406 ThreadId(0)
407 }
408}
409
410/// Get a list of all process IDs
411pub fn get_process_list() -> Option<alloc::vec::Vec<u64>> {
412 #[cfg(feature = "alloc")]
413 {
414 use table::PROCESS_TABLE;
415 let mut pids = alloc::vec::Vec::new();
416
417 // Iterate through all processes
418 PROCESS_TABLE.for_each(|process| {
419 pids.push(process.pid.0);
420 });
421
422 if pids.is_empty() {
423 None
424 } else {
425 Some(pids)
426 }
427 }
428 #[cfg(not(feature = "alloc"))]
429 None
430}
431
432// get_process is already re-exported at the top of the module