⚠️ VeridianOS Kernel Documentation - This is low-level kernel code. All functions are unsafe unless explicitly marked otherwise. no_std

veridian_kernel/sched/
task_management.rs

1//! Task creation, exit, and thread scheduling
2//!
3//! Provides functions to create scheduler tasks from process threads,
4//! schedule them on appropriate CPUs, and handle task exit with deferred
5//! cleanup.
6
7// Task lifecycle management -- exercised via process creation/exit paths
8
9use core::{ptr::NonNull, sync::atomic::Ordering};
10
11#[cfg(feature = "alloc")]
12use super::task;
13use super::{
14    scheduler, smp,
15    task::{CpuSet, Priority, SchedClass, Task, TaskContext},
16};
17use crate::{
18    arch::context::ThreadContext,
19    error::KernelError,
20    process::{thread::ThreadState, ProcessId as ProcId, ProcessState, ThreadId as ThrId},
21};
22
23/// Create new user task
24#[cfg(feature = "alloc")]
25pub fn create_task(
26    name: &str,
27    entry_point: usize,
28    stack_size: usize,
29    priority: Priority,
30) -> Result<ProcId, KernelError> {
31    extern crate alloc;
32    use alloc::{boxed::Box, string::String};
33
34    // Allocate PID and TID
35    let pid = super::process_compat::alloc_pid();
36    let tid = task::alloc_tid();
37
38    // Allocate kernel stack frames (stack_size in bytes, 4KB pages)
39    let stack_pages = stack_size.div_ceil(0x1000);
40    let frames = crate::mm::allocate_pages(stack_pages, None).map_err(|_| {
41        KernelError::ResourceExhausted {
42            resource: "stack frames",
43        }
44    })?;
45    // Convert first frame's physical address to virtual address for stack base
46    let stack_phys = frames[0].as_u64() * 0x1000;
47    let stack_base = crate::mm::phys_to_virt_addr(stack_phys) as usize;
48
49    // Use kernel's page table for now (kernel tasks share address space)
50    let page_table = crate::mm::get_kernel_page_table();
51
52    // Create task
53    let mut new_task = Box::new(Task::new(
54        pid,
55        tid,
56        String::from(name),
57        entry_point,
58        stack_base + stack_size,
59        page_table,
60    ));
61
62    new_task.priority = priority;
63
64    // Leak the Box to get a stable pointer, then enqueue
65    let task_ptr =
66        NonNull::new(Box::leak(new_task) as *mut _).expect("Box::leak returned null (impossible)");
67
68    // Register in global PID-to-Task registry for O(log n) IPC lookup
69    scheduler::register_task(pid.0, task_ptr);
70
71    scheduler::SCHEDULER.lock().enqueue(task_ptr);
72
73    Ok(pid)
74}
75
76/// Exit current task
77#[allow(unused_variables)]
78pub fn exit_task(exit_code: i32) {
79    #[cfg(feature = "alloc")]
80    extern crate alloc;
81    #[cfg(feature = "alloc")]
82    use alloc::vec::Vec;
83
84    #[cfg(feature = "alloc")]
85    use spin::Lazy;
86
87    /// Wrapper to make NonNull<Task> Send/Sync for the cleanup queue.
88    ///
89    /// # Safety
90    ///
91    /// The cleanup queue is protected by a spin::Mutex, ensuring exclusive
92    /// access. Task pointers in the queue are only deallocated after a
93    /// sufficient tick delay to ensure no other CPU holds a reference.
94    #[derive(Clone, Copy)]
95    struct CleanupTaskPtr(core::ptr::NonNull<Task>);
96
97    // SAFETY: CleanupTaskPtr is only accessed under the CLEANUP_QUEUE mutex.
98    // Task memory outlives the queue entry due to the deferred cleanup delay.
99    unsafe impl Send for CleanupTaskPtr {}
100    // SAFETY: Same as Send -- all access synchronized via mutex.
101    unsafe impl Sync for CleanupTaskPtr {}
102
103    let mut scheduler = super::SCHEDULER.lock();
104
105    if let Some(current_task) = scheduler.current() {
106        // SAFETY: `current_task` is a valid NonNull<Task> from the scheduler.
107        // We hold the scheduler lock ensuring exclusive access. We update
108        // the task's state to Dead, clean up thread references, and clear
109        // scheduler data structure links.
110        unsafe {
111            let task_mut = current_task.as_ptr();
112            let task_ref = &*task_mut;
113
114            // Unregister from global PID-to-Task registry
115            scheduler::unregister_task(task_ref.pid.0);
116
117            // Mark task as dead
118            (*task_mut).state = ProcessState::Dead;
119
120            // Clean up thread reference if exists
121            if let Some(thread_ptr) = task_ref.thread_ref {
122                // SAFETY: thread_ptr was set during task creation and points
123                // to a valid Thread. We update its state and exit code.
124                let thread = thread_ptr.as_ref();
125
126                // Remove task pointer from thread
127                thread.set_task_ptr(None);
128
129                // Mark thread as dead
130                thread.set_state(ThreadState::Dead);
131
132                // Store exit code
133                thread.exit_code.store(exit_code as u32, Ordering::Release);
134            }
135
136            // Clean up scheduler data structures.
137            // Clear queue links -- the task is already dequeued from the
138            // ready queue (it was running), so we just null the links to
139            // prevent dangling references during deferred cleanup.
140            (*task_mut).ready_link = None;
141            (*task_mut).wait_link = None;
142
143            // Clear current CPU assignment
144            (*task_mut).current_cpu = None;
145
146            // Mark task for deferred cleanup
147            // We can't free immediately as other CPUs might have references
148            #[cfg(feature = "alloc")]
149            {
150                // Add to cleanup queue for deferred deallocation
151                static CLEANUP_QUEUE: Lazy<spin::Mutex<Vec<(CleanupTaskPtr, u64)>>> =
152                    Lazy::new(|| spin::Mutex::new(Vec::new()));
153
154                // Get current tick count for deferred cleanup
155                let cleanup_tick = crate::arch::timer::get_ticks() + 100; // Cleanup after 100 ticks
156                CLEANUP_QUEUE
157                    .lock()
158                    .push((CleanupTaskPtr(current_task), cleanup_tick));
159            }
160        }
161
162        // Schedule another task
163        scheduler.schedule();
164    }
165
166    // Should not return
167    loop {
168        crate::arch::idle();
169    }
170}
171
172/// Create task from process thread
173#[cfg(feature = "alloc")]
174pub fn create_task_from_thread(
175    process_id: ProcId,
176    thread_id: ThrId,
177    thread: &crate::process::Thread,
178) -> Result<NonNull<Task>, KernelError> {
179    extern crate alloc;
180    use alloc::{boxed::Box, string::String};
181
182    // Get thread context to extract entry point and stack
183    let ctx = thread.context.lock();
184    let entry_point = ctx.get_instruction_pointer();
185    let kernel_stack_top = thread.kernel_stack.top();
186    drop(ctx);
187
188    // Create scheduler task from process thread
189    let mut new_task = Box::new(Task::new(
190        process_id,
191        thread_id,
192        String::from(&thread.name),
193        entry_point,
194        kernel_stack_top,
195        0, // Will be set to process page table
196    ));
197
198    // Set priority based on thread priority (numeric value)
199    new_task.priority = match thread.priority {
200        0..=10 => Priority::RealTimeHigh,
201        11..=20 => Priority::RealTimeNormal,
202        21..=30 => Priority::RealTimeLow,
203        31..=40 => Priority::SystemHigh,
204        41..=50 => Priority::SystemNormal,
205        51..=60 => Priority::UserHigh,
206        61..=70 => Priority::UserNormal,
207        71..=80 => Priority::UserLow,
208        _ => Priority::Idle,
209    };
210
211    // Set scheduling class
212    new_task.sched_class = if new_task.priority <= Priority::RealTimeLow {
213        SchedClass::RealTime
214    } else if new_task.priority == Priority::Idle {
215        SchedClass::Idle
216    } else {
217        SchedClass::Normal
218    };
219
220    // Set CPU affinity
221    new_task.cpu_affinity = CpuSet::from_mask(thread.cpu_affinity.load(Ordering::Relaxed) as u64);
222
223    // Copy thread context - create new task context from thread context
224    let thread_ctx = thread.context.lock();
225    new_task.context = TaskContext::new(entry_point, kernel_stack_top);
226    new_task.tls_base = thread_ctx.tls_base();
227    drop(thread_ctx);
228
229    // Set user stack
230    new_task.user_stack = thread.user_stack.top();
231
232    // Get thread pointer
233    let thread_ptr = NonNull::new(thread as *const _ as *mut _);
234    new_task.thread_ref = thread_ptr;
235
236    // Get the task pointer
237    // Box::leak always returns a non-null pointer
238    let task_ptr =
239        NonNull::new(Box::leak(new_task) as *mut _).expect("Box::leak returned null (impossible)");
240
241    // Register in global PID-to-Task registry for O(log n) IPC lookup
242    scheduler::register_task(process_id.0, task_ptr);
243
244    // Link thread and task bidirectionally
245    thread.set_task_ptr(Some(task_ptr));
246
247    // Return pointer to leaked task
248    Ok(task_ptr)
249}
250
251/// Schedule a process thread
252#[cfg(feature = "alloc")]
253pub fn schedule_thread(
254    process_id: ProcId,
255    thread_id: ThrId,
256    thread: &crate::process::Thread,
257) -> Result<(), KernelError> {
258    let task_ptr = create_task_from_thread(process_id, thread_id, thread)?;
259
260    // Find best CPU for this task
261    let target_cpu = if thread.cpu_affinity.load(Ordering::Relaxed) == !0usize {
262        // No affinity restriction, use least loaded CPU
263        smp::find_least_loaded_cpu()
264    } else {
265        // Find least loaded CPU that matches affinity
266        let mut best_cpu = 0;
267        let mut min_load = 100;
268        let affinity = thread.cpu_affinity.load(Ordering::Relaxed) as u64;
269
270        for cpu in 0..8 {
271            // Check first 8 CPUs
272            if (affinity & (1 << cpu)) != 0 {
273                if let Some(cpu_data) = smp::per_cpu(cpu) {
274                    if cpu_data.cpu_info.is_online() {
275                        let load = cpu_data.cpu_info.load.load(Ordering::Relaxed);
276                        if load < min_load {
277                            min_load = load;
278                            best_cpu = cpu;
279                        }
280                    }
281                }
282            }
283        }
284        best_cpu
285    };
286
287    // Schedule on target CPU
288    scheduler::schedule_on_cpu(target_cpu, task_ptr);
289    Ok(())
290}