veridian_kernel/sched/
task_management.rs1use core::{ptr::NonNull, sync::atomic::Ordering};
10
11#[cfg(feature = "alloc")]
12use super::task;
13use super::{
14 scheduler, smp,
15 task::{CpuSet, Priority, SchedClass, Task, TaskContext},
16};
17use crate::{
18 arch::context::ThreadContext,
19 error::KernelError,
20 process::{thread::ThreadState, ProcessId as ProcId, ProcessState, ThreadId as ThrId},
21};
22
23#[cfg(feature = "alloc")]
25pub fn create_task(
26 name: &str,
27 entry_point: usize,
28 stack_size: usize,
29 priority: Priority,
30) -> Result<ProcId, KernelError> {
31 extern crate alloc;
32 use alloc::{boxed::Box, string::String};
33
34 let pid = super::process_compat::alloc_pid();
36 let tid = task::alloc_tid();
37
38 let stack_pages = stack_size.div_ceil(0x1000);
40 let frames = crate::mm::allocate_pages(stack_pages, None).map_err(|_| {
41 KernelError::ResourceExhausted {
42 resource: "stack frames",
43 }
44 })?;
45 let stack_phys = frames[0].as_u64() * 0x1000;
47 let stack_base = crate::mm::phys_to_virt_addr(stack_phys) as usize;
48
49 let page_table = crate::mm::get_kernel_page_table();
51
52 let mut new_task = Box::new(Task::new(
54 pid,
55 tid,
56 String::from(name),
57 entry_point,
58 stack_base + stack_size,
59 page_table,
60 ));
61
62 new_task.priority = priority;
63
64 let task_ptr =
66 NonNull::new(Box::leak(new_task) as *mut _).expect("Box::leak returned null (impossible)");
67
68 scheduler::register_task(pid.0, task_ptr);
70
71 scheduler::SCHEDULER.lock().enqueue(task_ptr);
72
73 Ok(pid)
74}
75
76#[allow(unused_variables)]
78pub fn exit_task(exit_code: i32) {
79 #[cfg(feature = "alloc")]
80 extern crate alloc;
81 #[cfg(feature = "alloc")]
82 use alloc::vec::Vec;
83
84 #[cfg(feature = "alloc")]
85 use spin::Lazy;
86
87 #[derive(Clone, Copy)]
95 struct CleanupTaskPtr(core::ptr::NonNull<Task>);
96
97 unsafe impl Send for CleanupTaskPtr {}
100 unsafe impl Sync for CleanupTaskPtr {}
102
103 let mut scheduler = super::SCHEDULER.lock();
104
105 if let Some(current_task) = scheduler.current() {
106 unsafe {
111 let task_mut = current_task.as_ptr();
112 let task_ref = &*task_mut;
113
114 scheduler::unregister_task(task_ref.pid.0);
116
117 (*task_mut).state = ProcessState::Dead;
119
120 if let Some(thread_ptr) = task_ref.thread_ref {
122 let thread = thread_ptr.as_ref();
125
126 thread.set_task_ptr(None);
128
129 thread.set_state(ThreadState::Dead);
131
132 thread.exit_code.store(exit_code as u32, Ordering::Release);
134 }
135
136 (*task_mut).ready_link = None;
141 (*task_mut).wait_link = None;
142
143 (*task_mut).current_cpu = None;
145
146 #[cfg(feature = "alloc")]
149 {
150 static CLEANUP_QUEUE: Lazy<spin::Mutex<Vec<(CleanupTaskPtr, u64)>>> =
152 Lazy::new(|| spin::Mutex::new(Vec::new()));
153
154 let cleanup_tick = crate::arch::timer::get_ticks() + 100; CLEANUP_QUEUE
157 .lock()
158 .push((CleanupTaskPtr(current_task), cleanup_tick));
159 }
160 }
161
162 scheduler.schedule();
164 }
165
166 loop {
168 crate::arch::idle();
169 }
170}
171
172#[cfg(feature = "alloc")]
174pub fn create_task_from_thread(
175 process_id: ProcId,
176 thread_id: ThrId,
177 thread: &crate::process::Thread,
178) -> Result<NonNull<Task>, KernelError> {
179 extern crate alloc;
180 use alloc::{boxed::Box, string::String};
181
182 let ctx = thread.context.lock();
184 let entry_point = ctx.get_instruction_pointer();
185 let kernel_stack_top = thread.kernel_stack.top();
186 drop(ctx);
187
188 let mut new_task = Box::new(Task::new(
190 process_id,
191 thread_id,
192 String::from(&thread.name),
193 entry_point,
194 kernel_stack_top,
195 0, ));
197
198 new_task.priority = match thread.priority {
200 0..=10 => Priority::RealTimeHigh,
201 11..=20 => Priority::RealTimeNormal,
202 21..=30 => Priority::RealTimeLow,
203 31..=40 => Priority::SystemHigh,
204 41..=50 => Priority::SystemNormal,
205 51..=60 => Priority::UserHigh,
206 61..=70 => Priority::UserNormal,
207 71..=80 => Priority::UserLow,
208 _ => Priority::Idle,
209 };
210
211 new_task.sched_class = if new_task.priority <= Priority::RealTimeLow {
213 SchedClass::RealTime
214 } else if new_task.priority == Priority::Idle {
215 SchedClass::Idle
216 } else {
217 SchedClass::Normal
218 };
219
220 new_task.cpu_affinity = CpuSet::from_mask(thread.cpu_affinity.load(Ordering::Relaxed) as u64);
222
223 let thread_ctx = thread.context.lock();
225 new_task.context = TaskContext::new(entry_point, kernel_stack_top);
226 new_task.tls_base = thread_ctx.tls_base();
227 drop(thread_ctx);
228
229 new_task.user_stack = thread.user_stack.top();
231
232 let thread_ptr = NonNull::new(thread as *const _ as *mut _);
234 new_task.thread_ref = thread_ptr;
235
236 let task_ptr =
239 NonNull::new(Box::leak(new_task) as *mut _).expect("Box::leak returned null (impossible)");
240
241 scheduler::register_task(process_id.0, task_ptr);
243
244 thread.set_task_ptr(Some(task_ptr));
246
247 Ok(task_ptr)
249}
250
251#[cfg(feature = "alloc")]
253pub fn schedule_thread(
254 process_id: ProcId,
255 thread_id: ThrId,
256 thread: &crate::process::Thread,
257) -> Result<(), KernelError> {
258 let task_ptr = create_task_from_thread(process_id, thread_id, thread)?;
259
260 let target_cpu = if thread.cpu_affinity.load(Ordering::Relaxed) == !0usize {
262 smp::find_least_loaded_cpu()
264 } else {
265 let mut best_cpu = 0;
267 let mut min_load = 100;
268 let affinity = thread.cpu_affinity.load(Ordering::Relaxed) as u64;
269
270 for cpu in 0..8 {
271 if (affinity & (1 << cpu)) != 0 {
273 if let Some(cpu_data) = smp::per_cpu(cpu) {
274 if cpu_data.cpu_info.is_online() {
275 let load = cpu_data.cpu_info.load.load(Ordering::Relaxed);
276 if load < min_load {
277 min_load = load;
278 best_cpu = cpu;
279 }
280 }
281 }
282 }
283 }
284 best_cpu
285 };
286
287 scheduler::schedule_on_cpu(target_cpu, task_ptr);
289 Ok(())
290}