⚠️ VeridianOS Kernel Documentation - This is low-level kernel code. All functions are unsafe unless explicitly marked otherwise. no_std

veridian_kernel/sched/
task.rs

1//! Task management and task control block (TCB) implementation
2
3#[cfg(feature = "alloc")]
4extern crate alloc;
5#[cfg(feature = "alloc")]
6use alloc::string::String;
7use core::sync::atomic::{AtomicU64, Ordering};
8
9use super::{ProcessId, ProcessState, ThreadId};
10
11/// Task priority levels
12#[repr(u8)]
13#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Default)]
14pub enum Priority {
15    /// Real-time highest priority
16    RealTimeHigh = 0,
17    /// Real-time normal priority
18    RealTimeNormal = 10,
19    /// Real-time low priority
20    RealTimeLow = 20,
21    /// System high priority
22    SystemHigh = 30,
23    /// System normal priority
24    SystemNormal = 40,
25    /// User high priority
26    UserHigh = 50,
27    /// User normal priority
28    #[default]
29    UserNormal = 60,
30    /// User low priority
31    UserLow = 70,
32    /// Idle priority
33    Idle = 99,
34}
35
36/// Scheduling class
37#[derive(Debug, Clone, Copy, PartialEq, Eq)]
38pub enum SchedClass {
39    /// Real-time scheduling (FIFO/RR)
40    RealTime,
41    /// Normal scheduling (CFS-like)
42    Normal,
43    /// Idle scheduling
44    Idle,
45}
46
47/// Task scheduling policy
48#[derive(Debug, Clone, Copy, PartialEq, Eq)]
49pub enum SchedPolicy {
50    /// First-In-First-Out (real-time)
51    Fifo,
52    /// Round-Robin (real-time)
53    RoundRobin,
54    /// Completely Fair Scheduler
55    Cfs,
56    /// Idle tasks
57    Idle,
58}
59
60/// CPU affinity mask
61#[derive(Debug, Clone)]
62pub struct CpuSet {
63    /// Bitmap of allowed CPUs (bit N = CPU N)
64    mask: u64,
65}
66
67impl CpuSet {
68    /// Create new CPU set with all CPUs allowed
69    pub fn all() -> Self {
70        Self { mask: !0u64 }
71    }
72
73    /// Get the CPU mask
74    pub fn mask(&self) -> u64 {
75        self.mask
76    }
77
78    /// Create new CPU set with single CPU
79    pub fn single(cpu: u8) -> Self {
80        Self { mask: 1u64 << cpu }
81    }
82
83    /// Create from raw mask
84    pub fn from_mask(mask: u64) -> Self {
85        Self { mask }
86    }
87
88    /// Check if CPU is in set
89    pub fn contains(&self, cpu: u8) -> bool {
90        (self.mask & (1u64 << cpu)) != 0
91    }
92
93    /// Add CPU to set
94    pub fn add(&mut self, cpu: u8) {
95        self.mask |= 1u64 << cpu;
96    }
97
98    /// Remove CPU from set
99    pub fn remove(&mut self, cpu: u8) {
100        self.mask &= !(1u64 << cpu);
101    }
102}
103
104impl Default for CpuSet {
105    fn default() -> Self {
106        Self::all()
107    }
108}
109
110/// Task statistics
111#[derive(Debug, Default)]
112pub struct TaskStats {
113    /// Total time spent running (in ticks)
114    pub runtime: AtomicU64,
115    /// Number of times scheduled
116    pub run_count: AtomicU64,
117    /// Number of voluntary context switches
118    pub voluntary_switches: AtomicU64,
119    /// Number of involuntary context switches
120    pub involuntary_switches: AtomicU64,
121    /// Last time scheduled (in ticks)
122    pub last_run: AtomicU64,
123}
124
125/// Architecture-specific task context
126#[derive(Debug)]
127pub enum TaskContext {
128    /// x86_64 task context
129    #[cfg(target_arch = "x86_64")]
130    X86_64(crate::arch::x86_64::context::X86_64Context),
131
132    /// AArch64 task context
133    #[cfg(target_arch = "aarch64")]
134    AArch64(crate::arch::aarch64::context::AArch64Context),
135
136    /// RISC-V task context
137    #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
138    RiscV(crate::arch::riscv::context::RiscVContext),
139}
140
141/// Task Control Block (TCB)
142pub struct Task {
143    /// Process ID
144    pub pid: ProcessId,
145    /// Thread ID
146    pub tid: ThreadId,
147    /// Parent process ID
148    pub parent_pid: ProcessId,
149    /// Task name
150    #[cfg(feature = "alloc")]
151    pub name: String,
152    /// Task state
153    pub state: ProcessState,
154    /// Scheduling priority
155    pub priority: Priority,
156    /// Scheduling class
157    pub sched_class: SchedClass,
158    /// Scheduling policy
159    pub sched_policy: SchedPolicy,
160    /// CPU affinity
161    pub cpu_affinity: CpuSet,
162    /// Current CPU (if running)
163    pub current_cpu: Option<u8>,
164    /// Time slice remaining (in ticks)
165    pub time_slice: u32,
166    /// Virtual runtime (for CFS)
167    pub vruntime: u64,
168    /// Task statistics
169    pub stats: TaskStats,
170    /// Architecture-specific context
171    pub context: TaskContext,
172    /// Kernel stack pointer
173    pub kernel_stack: usize,
174    /// User stack pointer
175    pub user_stack: usize,
176    /// Page table base address
177    pub page_table: usize,
178    /// IPC endpoint blocked on (if any)
179    pub blocked_on: Option<u64>,
180    /// Wait queue link (for blocking)
181    pub wait_link: Option<usize>,
182    /// Ready queue link
183    pub ready_link: Option<usize>,
184    /// Thread reference (for state synchronization)
185    pub thread_ref: Option<core::ptr::NonNull<crate::process::Thread>>,
186    /// Last CPU this task ran on
187    pub last_cpu: Option<u8>,
188    /// Number of times this task has been migrated
189    pub migrations: u32,
190    /// TLS base snapshot for context switch
191    pub tls_base: u64,
192    /// Priority boost from priority inheritance protocol.
193    /// When a high-priority task blocks on a resource held by this task,
194    /// the holder's effective priority is boosted to prevent inversion.
195    pub priority_boost: Option<Priority>,
196    /// IPC register set for fast-path direct message transfer.
197    /// Sender copies message data here; receiver reads on wake-up.
198    pub ipc_regs: [u64; 7],
199    /// Whether this task has user-space address mappings.
200    /// Used for lazy TLB optimization: kernel threads skip CR3 reload.
201    pub has_user_mappings: bool,
202}
203
204impl Task {
205    /// Create new task
206    #[cfg(feature = "alloc")]
207    pub fn new(
208        pid: ProcessId,
209        tid: ThreadId,
210        name: String,
211        entry_point: usize,
212        stack_base: usize,
213        page_table: usize,
214    ) -> Self {
215        Self {
216            pid,
217            tid,
218            parent_pid: ProcessId(0),
219            name,
220            state: ProcessState::Ready,
221            priority: Priority::default(),
222            sched_class: SchedClass::Normal,
223            sched_policy: SchedPolicy::Cfs,
224            cpu_affinity: CpuSet::default(),
225            current_cpu: None,
226            time_slice: DEFAULT_TIME_SLICE,
227            vruntime: 0,
228            stats: TaskStats::default(),
229            context: TaskContext::new(entry_point, stack_base),
230            kernel_stack: stack_base,
231            user_stack: 0,
232            page_table,
233            blocked_on: None,
234            wait_link: None,
235            ready_link: None,
236            thread_ref: None,
237            last_cpu: None,
238            migrations: 0,
239            tls_base: 0,
240            priority_boost: None,
241            ipc_regs: [0; 7],
242            has_user_mappings: false,
243        }
244    }
245
246    /// Check if task can run on given CPU
247    pub fn can_run_on(&self, cpu: u8) -> bool {
248        self.cpu_affinity.contains(cpu)
249    }
250
251    /// Update runtime statistics
252    pub fn update_runtime(&self, ticks: u64) {
253        self.stats.runtime.fetch_add(ticks, Ordering::Relaxed);
254        self.stats
255            .last_run
256            .store(crate::arch::timer::get_ticks(), Ordering::Relaxed);
257    }
258
259    /// Mark as scheduled
260    pub fn mark_scheduled(&self, _cpu: u8, voluntary: bool) {
261        self.stats.run_count.fetch_add(1, Ordering::Relaxed);
262        if voluntary {
263            self.stats
264                .voluntary_switches
265                .fetch_add(1, Ordering::Relaxed);
266        } else {
267            self.stats
268                .involuntary_switches
269                .fetch_add(1, Ordering::Relaxed);
270        }
271    }
272
273    /// Calculate dynamic priority, accounting for priority inheritance boost.
274    pub fn effective_priority(&self) -> u8 {
275        // Priority inheritance: if a high-priority task is waiting on us,
276        // use the boosted priority to prevent priority inversion.
277        if let Some(boosted) = self.priority_boost {
278            let boosted_val = boosted as u8;
279            let base_val = self.priority as u8;
280            // Lower numeric value = higher priority
281            if boosted_val < base_val {
282                return boosted_val;
283            }
284        }
285
286        match self.sched_class {
287            SchedClass::RealTime => self.priority as u8,
288            SchedClass::Normal => {
289                // Boost priority based on how long task has been waiting
290                let wait_time =
291                    crate::arch::timer::get_ticks() - self.stats.last_run.load(Ordering::Relaxed);
292                let boost = (wait_time / PRIORITY_BOOST_INTERVAL).min(20) as u8;
293                (self.priority as u8).saturating_sub(boost)
294            }
295            SchedClass::Idle => Priority::Idle as u8,
296        }
297    }
298}
299
300/// Default time slice in timer ticks
301pub const DEFAULT_TIME_SLICE: u32 = 10;
302
303/// Interval for priority boosting (in ticks)
304pub const PRIORITY_BOOST_INTERVAL: u64 = 100;
305
306impl TaskContext {
307    /// Create new task context for entry point
308    #[cfg(target_arch = "x86_64")]
309    pub fn new(entry_point: usize, stack_base: usize) -> Self {
310        TaskContext::X86_64(crate::arch::x86_64::context::X86_64Context::new(
311            entry_point,
312            stack_base,
313        ))
314    }
315
316    #[cfg(target_arch = "aarch64")]
317    pub fn new(entry_point: usize, stack_base: usize) -> Self {
318        TaskContext::AArch64(crate::arch::aarch64::context::AArch64Context::new(
319            entry_point,
320            stack_base,
321        ))
322    }
323
324    #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
325    pub fn new(entry_point: usize, stack_base: usize) -> Self {
326        TaskContext::RiscV(crate::arch::riscv::context::RiscVContext::new(
327            entry_point,
328            stack_base,
329        ))
330    }
331}
332
333/// Task ID allocator
334static NEXT_TID: AtomicU64 = AtomicU64::new(1);
335
336/// Allocate new thread ID
337pub fn alloc_tid() -> ThreadId {
338    ThreadId(NEXT_TID.fetch_add(1, Ordering::Relaxed))
339}