1#[cfg(feature = "alloc")]
4extern crate alloc;
5#[cfg(feature = "alloc")]
6use alloc::string::String;
7use core::sync::atomic::{AtomicU64, Ordering};
8
9use super::{ProcessId, ProcessState, ThreadId};
10
11#[repr(u8)]
13#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Default)]
14pub enum Priority {
15 RealTimeHigh = 0,
17 RealTimeNormal = 10,
19 RealTimeLow = 20,
21 SystemHigh = 30,
23 SystemNormal = 40,
25 UserHigh = 50,
27 #[default]
29 UserNormal = 60,
30 UserLow = 70,
32 Idle = 99,
34}
35
36#[derive(Debug, Clone, Copy, PartialEq, Eq)]
38pub enum SchedClass {
39 RealTime,
41 Normal,
43 Idle,
45}
46
47#[derive(Debug, Clone, Copy, PartialEq, Eq)]
49pub enum SchedPolicy {
50 Fifo,
52 RoundRobin,
54 Cfs,
56 Idle,
58}
59
60#[derive(Debug, Clone)]
62pub struct CpuSet {
63 mask: u64,
65}
66
67impl CpuSet {
68 pub fn all() -> Self {
70 Self { mask: !0u64 }
71 }
72
73 pub fn mask(&self) -> u64 {
75 self.mask
76 }
77
78 pub fn single(cpu: u8) -> Self {
80 Self { mask: 1u64 << cpu }
81 }
82
83 pub fn from_mask(mask: u64) -> Self {
85 Self { mask }
86 }
87
88 pub fn contains(&self, cpu: u8) -> bool {
90 (self.mask & (1u64 << cpu)) != 0
91 }
92
93 pub fn add(&mut self, cpu: u8) {
95 self.mask |= 1u64 << cpu;
96 }
97
98 pub fn remove(&mut self, cpu: u8) {
100 self.mask &= !(1u64 << cpu);
101 }
102}
103
104impl Default for CpuSet {
105 fn default() -> Self {
106 Self::all()
107 }
108}
109
110#[derive(Debug, Default)]
112pub struct TaskStats {
113 pub runtime: AtomicU64,
115 pub run_count: AtomicU64,
117 pub voluntary_switches: AtomicU64,
119 pub involuntary_switches: AtomicU64,
121 pub last_run: AtomicU64,
123}
124
125#[derive(Debug)]
127pub enum TaskContext {
128 #[cfg(target_arch = "x86_64")]
130 X86_64(crate::arch::x86_64::context::X86_64Context),
131
132 #[cfg(target_arch = "aarch64")]
134 AArch64(crate::arch::aarch64::context::AArch64Context),
135
136 #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
138 RiscV(crate::arch::riscv::context::RiscVContext),
139}
140
141pub struct Task {
143 pub pid: ProcessId,
145 pub tid: ThreadId,
147 pub parent_pid: ProcessId,
149 #[cfg(feature = "alloc")]
151 pub name: String,
152 pub state: ProcessState,
154 pub priority: Priority,
156 pub sched_class: SchedClass,
158 pub sched_policy: SchedPolicy,
160 pub cpu_affinity: CpuSet,
162 pub current_cpu: Option<u8>,
164 pub time_slice: u32,
166 pub vruntime: u64,
168 pub stats: TaskStats,
170 pub context: TaskContext,
172 pub kernel_stack: usize,
174 pub user_stack: usize,
176 pub page_table: usize,
178 pub blocked_on: Option<u64>,
180 pub wait_link: Option<usize>,
182 pub ready_link: Option<usize>,
184 pub thread_ref: Option<core::ptr::NonNull<crate::process::Thread>>,
186 pub last_cpu: Option<u8>,
188 pub migrations: u32,
190 pub tls_base: u64,
192 pub priority_boost: Option<Priority>,
196 pub ipc_regs: [u64; 7],
199 pub has_user_mappings: bool,
202}
203
204impl Task {
205 #[cfg(feature = "alloc")]
207 pub fn new(
208 pid: ProcessId,
209 tid: ThreadId,
210 name: String,
211 entry_point: usize,
212 stack_base: usize,
213 page_table: usize,
214 ) -> Self {
215 Self {
216 pid,
217 tid,
218 parent_pid: ProcessId(0),
219 name,
220 state: ProcessState::Ready,
221 priority: Priority::default(),
222 sched_class: SchedClass::Normal,
223 sched_policy: SchedPolicy::Cfs,
224 cpu_affinity: CpuSet::default(),
225 current_cpu: None,
226 time_slice: DEFAULT_TIME_SLICE,
227 vruntime: 0,
228 stats: TaskStats::default(),
229 context: TaskContext::new(entry_point, stack_base),
230 kernel_stack: stack_base,
231 user_stack: 0,
232 page_table,
233 blocked_on: None,
234 wait_link: None,
235 ready_link: None,
236 thread_ref: None,
237 last_cpu: None,
238 migrations: 0,
239 tls_base: 0,
240 priority_boost: None,
241 ipc_regs: [0; 7],
242 has_user_mappings: false,
243 }
244 }
245
246 pub fn can_run_on(&self, cpu: u8) -> bool {
248 self.cpu_affinity.contains(cpu)
249 }
250
251 pub fn update_runtime(&self, ticks: u64) {
253 self.stats.runtime.fetch_add(ticks, Ordering::Relaxed);
254 self.stats
255 .last_run
256 .store(crate::arch::timer::get_ticks(), Ordering::Relaxed);
257 }
258
259 pub fn mark_scheduled(&self, _cpu: u8, voluntary: bool) {
261 self.stats.run_count.fetch_add(1, Ordering::Relaxed);
262 if voluntary {
263 self.stats
264 .voluntary_switches
265 .fetch_add(1, Ordering::Relaxed);
266 } else {
267 self.stats
268 .involuntary_switches
269 .fetch_add(1, Ordering::Relaxed);
270 }
271 }
272
273 pub fn effective_priority(&self) -> u8 {
275 if let Some(boosted) = self.priority_boost {
278 let boosted_val = boosted as u8;
279 let base_val = self.priority as u8;
280 if boosted_val < base_val {
282 return boosted_val;
283 }
284 }
285
286 match self.sched_class {
287 SchedClass::RealTime => self.priority as u8,
288 SchedClass::Normal => {
289 let wait_time =
291 crate::arch::timer::get_ticks() - self.stats.last_run.load(Ordering::Relaxed);
292 let boost = (wait_time / PRIORITY_BOOST_INTERVAL).min(20) as u8;
293 (self.priority as u8).saturating_sub(boost)
294 }
295 SchedClass::Idle => Priority::Idle as u8,
296 }
297 }
298}
299
300pub const DEFAULT_TIME_SLICE: u32 = 10;
302
303pub const PRIORITY_BOOST_INTERVAL: u64 = 100;
305
306impl TaskContext {
307 #[cfg(target_arch = "x86_64")]
309 pub fn new(entry_point: usize, stack_base: usize) -> Self {
310 TaskContext::X86_64(crate::arch::x86_64::context::X86_64Context::new(
311 entry_point,
312 stack_base,
313 ))
314 }
315
316 #[cfg(target_arch = "aarch64")]
317 pub fn new(entry_point: usize, stack_base: usize) -> Self {
318 TaskContext::AArch64(crate::arch::aarch64::context::AArch64Context::new(
319 entry_point,
320 stack_base,
321 ))
322 }
323
324 #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
325 pub fn new(entry_point: usize, stack_base: usize) -> Self {
326 TaskContext::RiscV(crate::arch::riscv::context::RiscVContext::new(
327 entry_point,
328 stack_base,
329 ))
330 }
331}
332
333static NEXT_TID: AtomicU64 = AtomicU64::new(1);
335
336pub fn alloc_tid() -> ThreadId {
338 ThreadId(NEXT_TID.fetch_add(1, Ordering::Relaxed))
339}