⚠️ VeridianOS Kernel Documentation - This is low-level kernel code. All functions are unsafe unless explicitly marked otherwise. no_std

veridian_kernel/sched/
process_compat.rs

1//! Process compatibility wrapper types
2//!
3//! Provides a [`TaskProcessAdapter`] struct that wraps the scheduler's `Task`
4//! representation to present a process-oriented view used by IPC, syscalls,
5//! and other kernel subsystems.
6
7use core::{
8    ptr::NonNull,
9    sync::atomic::{AtomicPtr, AtomicU64, Ordering},
10};
11
12use super::task::Task;
13use crate::process::{ProcessId, ProcessState};
14
15/// Process-oriented adapter for scheduler tasks.
16///
17/// Bridges the scheduler's task-centric model with the rest of the kernel's
18/// process-centric view. Contains a reference back to the underlying `Task`.
19///
20/// Previously named `Process`, renamed to `TaskProcessAdapter` to avoid
21/// confusion with `process::pcb::Process` (the full process control block).
22pub struct TaskProcessAdapter {
23    pub pid: ProcessId,
24    pub state: ProcessState,
25    pub blocked_on: Option<u64>,
26    /// Underlying task
27    pub(super) task: Option<NonNull<Task>>,
28}
29
30static NEXT_PID: AtomicU64 = AtomicU64::new(1);
31
32/// Allocate new process ID
33pub fn alloc_pid() -> ProcessId {
34    ProcessId(NEXT_PID.fetch_add(1, Ordering::Relaxed))
35}
36
37// Thread-safe current process storage using atomic pointer
38static CURRENT_PROCESS_PTR: AtomicPtr<TaskProcessAdapter> = AtomicPtr::new(core::ptr::null_mut());
39
40/// Map a task state to a process state
41fn task_state_to_process_state(state: ProcessState) -> ProcessState {
42    match state {
43        ProcessState::Creating => ProcessState::Ready,
44        ProcessState::Ready => ProcessState::Ready,
45        ProcessState::Running => ProcessState::Running,
46        ProcessState::Blocked => ProcessState::Blocked,
47        ProcessState::Sleeping => ProcessState::Sleeping,
48        ProcessState::Zombie => ProcessState::Dead,
49        ProcessState::Dead => ProcessState::Dead,
50    }
51}
52
53/// Get the current process
54///
55/// Returns a static mutable reference to a [`TaskProcessAdapter`] wrapper
56/// around the currently scheduled task. If no task is running, returns a
57/// dummy adapter.
58///
59/// This function reuses a cached heap allocation to avoid allocating a new
60/// `Box<TaskProcessAdapter>` on every call. The first call allocates;
61/// subsequent calls update the existing allocation in-place.
62pub fn current_process() -> &'static mut TaskProcessAdapter {
63    // Get from per-CPU scheduler
64    if let Some(task_ptr) = super::SCHEDULER.lock().current() {
65        // SAFETY: `task_ptr` is a valid NonNull<Task> returned by the scheduler
66        // which owns and manages task lifetimes. The task remains valid while it
67        // is the current task. We read task fields (pid, state, blocked_on) to
68        // populate a TaskProcessAdapter wrapper.
69        unsafe {
70            let task = task_ptr.as_ref();
71
72            #[cfg(feature = "alloc")]
73            {
74                use alloc::boxed::Box;
75
76                // Reuse the existing allocation if available, otherwise allocate once.
77                // SAFETY: CURRENT_PROCESS_PTR is only modified by this function.
78                // The pointer, if non-null, was created by Box::into_raw in a
79                // previous call and has not been freed. We update it in-place to
80                // avoid per-call allocation/deallocation churn.
81                let process_ptr = CURRENT_PROCESS_PTR.load(Ordering::SeqCst);
82                if !process_ptr.is_null() {
83                    // Update the existing allocation in-place
84                    let process = &mut *process_ptr;
85                    process.pid = task.pid;
86                    process.state = task_state_to_process_state(task.state);
87                    process.blocked_on = task.blocked_on;
88                    process.task = Some(task_ptr);
89                    process
90                } else {
91                    // First call: allocate and leak a TaskProcessAdapter on the heap
92                    let process = Box::new(TaskProcessAdapter {
93                        pid: task.pid,
94                        state: task_state_to_process_state(task.state),
95                        blocked_on: task.blocked_on,
96                        task: Some(task_ptr),
97                    });
98                    let new_ptr = Box::into_raw(process);
99                    CURRENT_PROCESS_PTR.store(new_ptr, Ordering::SeqCst);
100                    &mut *new_ptr
101                }
102            }
103
104            #[cfg(not(feature = "alloc"))]
105            {
106                // Without alloc, fall back to static storage with interior
107                // mutability. SyncCell is a minimal Sync wrapper around
108                // UnsafeCell for early-boot single-threaded contexts.
109                struct SyncCell(core::cell::UnsafeCell<TaskProcessAdapter>);
110                unsafe impl Sync for SyncCell {}
111
112                static CURRENT_PROCESS: SyncCell =
113                    SyncCell(core::cell::UnsafeCell::new(TaskProcessAdapter {
114                        pid: ProcessId(0),
115                        state: ProcessState::Running,
116                        blocked_on: None,
117                        task: None,
118                    }));
119
120                // SAFETY: This static is only accessed from the scheduler path
121                // which runs with interrupts disabled on a single CPU during
122                // early boot (no alloc).
123                let current_ref = &mut *CURRENT_PROCESS.0.get();
124                current_ref.pid = task.pid;
125                current_ref.state = task_state_to_process_state(task.state);
126                current_ref.blocked_on = task.blocked_on;
127                current_ref.task = Some(task_ptr);
128
129                current_ref
130            }
131        }
132    } else {
133        // No current task, return dummy process.
134        // SAFETY: We reuse the cached allocation if available, or allocate once.
135        // The pointer is managed through CURRENT_PROCESS_PTR and is never freed
136        // during normal operation (only at kernel shutdown via Drop if applicable).
137        unsafe {
138            #[cfg(feature = "alloc")]
139            {
140                use alloc::boxed::Box;
141
142                let process_ptr = CURRENT_PROCESS_PTR.load(Ordering::SeqCst);
143                if !process_ptr.is_null() {
144                    // Reuse existing allocation with dummy values
145                    let process = &mut *process_ptr;
146                    process.pid = ProcessId(0);
147                    process.state = ProcessState::Running;
148                    process.blocked_on = None;
149                    process.task = None;
150                    process
151                } else {
152                    // First call: allocate and leak a dummy TaskProcessAdapter
153                    let dummy = Box::new(TaskProcessAdapter {
154                        pid: ProcessId(0),
155                        state: ProcessState::Running,
156                        blocked_on: None,
157                        task: None,
158                    });
159                    let new_ptr = Box::into_raw(dummy);
160                    CURRENT_PROCESS_PTR.store(new_ptr, Ordering::SeqCst);
161                    &mut *new_ptr
162                }
163            }
164
165            #[cfg(not(feature = "alloc"))]
166            {
167                struct SyncCell(core::cell::UnsafeCell<TaskProcessAdapter>);
168                unsafe impl Sync for SyncCell {}
169
170                static DUMMY_PROCESS: SyncCell =
171                    SyncCell(core::cell::UnsafeCell::new(TaskProcessAdapter {
172                        pid: ProcessId(0),
173                        state: ProcessState::Running,
174                        blocked_on: None,
175                        task: None,
176                    }));
177                // SAFETY: Accessed only during early boot without alloc, single-
178                // threaded context.
179                &mut *DUMMY_PROCESS.0.get()
180            }
181        }
182    }
183}
184
185/// Switch to another process
186pub fn switch_to_process(target: &TaskProcessAdapter) {
187    if let Some(task_ptr) = target.task {
188        let mut scheduler = super::SCHEDULER.lock();
189        scheduler.enqueue(task_ptr);
190        scheduler.schedule();
191    }
192}
193
194// Thread-safe found process storage using atomic pointer
195static FOUND_PROCESS_PTR: AtomicPtr<TaskProcessAdapter> = AtomicPtr::new(core::ptr::null_mut());
196
197/// Find process by PID
198pub fn find_process(pid: ProcessId) -> Option<&'static mut TaskProcessAdapter> {
199    // First check if it's the current process (fast path)
200    let current = current_process();
201    if current.pid == pid {
202        return Some(current);
203    }
204
205    // Otherwise, look it up in the process table
206    #[cfg(feature = "alloc")]
207    {
208        // Get the actual process from the process table
209        if let Some(process) = crate::process::table::get_process_mut(pid) {
210            use alloc::boxed::Box;
211
212            // Create a TaskProcessAdapter wrapper for the scheduler
213            let found = Box::new(TaskProcessAdapter {
214                pid: process.pid,
215                state: process.get_state(),
216                blocked_on: None, // Would need to be tracked
217                task: None,       // Would need task mapping
218            });
219
220            // SAFETY: `found` is a freshly heap-allocated Box. We leak it via
221            // `into_raw` and store the pointer atomically. The old pointer
222            // (from a previous call) is reclaimed via `Box::from_raw`. This is
223            // safe because the atomic swap provides exclusive ownership of the
224            // old allocation.
225            unsafe {
226                let found_ptr = Box::into_raw(found);
227                let old_ptr = FOUND_PROCESS_PTR.swap(found_ptr, Ordering::SeqCst);
228
229                if !old_ptr.is_null() {
230                    drop(Box::from_raw(old_ptr));
231                }
232
233                Some(&mut *found_ptr)
234            }
235        } else {
236            None
237        }
238    }
239
240    #[cfg(not(feature = "alloc"))]
241    None
242}