⚠️ VeridianOS Kernel Documentation - This is low-level kernel code. All functions are unsafe unless explicitly marked otherwise. no_std

veridian_kernel/sched/
queue.rs

1//! Ready queue management for scheduler
2
3#[cfg(feature = "alloc")]
4extern crate alloc;
5#[cfg(feature = "alloc")]
6use alloc::{collections::BTreeMap, vec::Vec};
7use core::ptr::NonNull;
8
9#[allow(unused_imports)]
10use spin::Mutex;
11
12use super::{
13    task::{Priority, SchedClass, Task},
14    task_ptr::TaskPtr,
15};
16
17/// Ready queue for a single priority level
18pub struct PriorityQueue {
19    /// Circular queue of task pointers
20    tasks: [Option<TaskPtr>; MAX_TASKS_PER_QUEUE],
21    /// Head index (next to dequeue)
22    head: usize,
23    /// Tail index (next to enqueue)
24    tail: usize,
25    /// Number of tasks in queue
26    count: usize,
27}
28
29impl PriorityQueue {
30    /// Create new empty priority queue
31    pub const fn new() -> Self {
32        Self {
33            tasks: [None; MAX_TASKS_PER_QUEUE],
34            head: 0,
35            tail: 0,
36            count: 0,
37        }
38    }
39
40    /// Check if queue is empty
41    pub fn is_empty(&self) -> bool {
42        self.count == 0
43    }
44
45    /// Check if queue is full
46    pub fn is_full(&self) -> bool {
47        self.count == MAX_TASKS_PER_QUEUE
48    }
49
50    /// Enqueue task
51    pub fn enqueue(&mut self, task: NonNull<Task>) -> bool {
52        if self.is_full() {
53            return false;
54        }
55
56        self.tasks[self.tail] = Some(TaskPtr::new(task));
57        self.tail = (self.tail + 1) % MAX_TASKS_PER_QUEUE;
58        self.count += 1;
59        true
60    }
61
62    /// Dequeue task
63    pub fn dequeue(&mut self) -> Option<NonNull<Task>> {
64        if self.is_empty() {
65            return None;
66        }
67
68        let task = self.tasks[self.head].take();
69        self.head = (self.head + 1) % MAX_TASKS_PER_QUEUE;
70        self.count -= 1;
71        task.map(|t| t.as_ptr())
72    }
73
74    /// Peek at next task without removing
75    pub fn peek(&self) -> Option<NonNull<Task>> {
76        if self.is_empty() {
77            None
78        } else {
79            self.tasks[self.head].map(|t| t.as_ptr())
80        }
81    }
82
83    /// Remove specific task from queue
84    pub fn remove(&mut self, target: NonNull<Task>) -> bool {
85        if self.is_empty() {
86            return false;
87        }
88
89        let mut found = false;
90        let mut new_tasks = [None; MAX_TASKS_PER_QUEUE];
91        let mut new_count = 0;
92
93        // Copy all tasks except target to new array
94        let mut idx = self.head;
95        for _ in 0..self.count {
96            if let Some(task) = self.tasks[idx] {
97                if task.as_ptr() != target {
98                    new_tasks[new_count] = Some(task);
99                    new_count += 1;
100                } else {
101                    found = true;
102                }
103            }
104            idx = (idx + 1) % MAX_TASKS_PER_QUEUE;
105        }
106
107        if found {
108            // Replace with new array
109            self.tasks = new_tasks;
110            self.head = 0;
111            self.tail = new_count;
112            self.count = new_count;
113        }
114
115        found
116    }
117}
118
119/// Multi-level ready queue
120///
121/// Cache-line aligned to prevent false sharing when per-CPU ready queues
122/// are stored in adjacent array slots accessed by different cores.
123#[repr(align(64))]
124pub struct ReadyQueue {
125    /// Real-time queues by priority
126    rt_queues: [PriorityQueue; NUM_RT_PRIORITIES],
127    /// Normal priority queues
128    normal_queues: [PriorityQueue; NUM_NORMAL_PRIORITIES],
129    /// Idle queue
130    idle_queue: PriorityQueue,
131    /// Bitmap of non-empty real-time queues
132    rt_bitmap: u32,
133    /// Bitmap of non-empty normal queues
134    normal_bitmap: u32,
135    /// Whether idle queue has tasks
136    idle_flag: bool,
137}
138
139impl ReadyQueue {
140    /// Create new ready queue
141    pub const fn new() -> Self {
142        Self {
143            rt_queues: [const { PriorityQueue::new() }; NUM_RT_PRIORITIES],
144            normal_queues: [const { PriorityQueue::new() }; NUM_NORMAL_PRIORITIES],
145            idle_queue: PriorityQueue::new(),
146            rt_bitmap: 0,
147            normal_bitmap: 0,
148            idle_flag: false,
149        }
150    }
151
152    /// Add task to appropriate queue
153    pub fn enqueue(&mut self, task: NonNull<Task>) -> bool {
154        // SAFETY: `task` is a valid NonNull<Task> provided by the scheduler.
155        // We read sched_class and priority to determine which sub-queue to
156        // use. The ReadyQueue is protected by a Mutex, ensuring exclusive
157        // access during this operation.
158        unsafe {
159            let task_ref = task.as_ref();
160            match task_ref.sched_class {
161                SchedClass::RealTime => {
162                    let idx = (task_ref.priority as usize).min(NUM_RT_PRIORITIES - 1);
163                    if self.rt_queues[idx].enqueue(task) {
164                        self.rt_bitmap |= 1 << idx;
165                        true
166                    } else {
167                        false
168                    }
169                }
170                SchedClass::Normal => {
171                    let idx = ((task_ref.priority as usize).saturating_sub(30) / 10)
172                        .min(NUM_NORMAL_PRIORITIES - 1);
173                    if self.normal_queues[idx].enqueue(task) {
174                        self.normal_bitmap |= 1 << idx;
175                        true
176                    } else {
177                        false
178                    }
179                }
180                SchedClass::Idle => {
181                    if self.idle_queue.enqueue(task) {
182                        self.idle_flag = true;
183                        true
184                    } else {
185                        false
186                    }
187                }
188            }
189        }
190    }
191
192    /// Dequeue highest priority task
193    pub fn dequeue(&mut self) -> Option<NonNull<Task>> {
194        // Check real-time queues first
195        if self.rt_bitmap != 0 {
196            let idx = self.rt_bitmap.trailing_zeros() as usize;
197            if let Some(task) = self.rt_queues[idx].dequeue() {
198                if self.rt_queues[idx].is_empty() {
199                    self.rt_bitmap &= !(1 << idx);
200                }
201                return Some(task);
202            }
203        }
204
205        // Check normal queues
206        if self.normal_bitmap != 0 {
207            let idx = self.normal_bitmap.trailing_zeros() as usize;
208            if let Some(task) = self.normal_queues[idx].dequeue() {
209                if self.normal_queues[idx].is_empty() {
210                    self.normal_bitmap &= !(1 << idx);
211                }
212                return Some(task);
213            }
214        }
215
216        // Check idle queue
217        if self.idle_flag {
218            if let Some(task) = self.idle_queue.dequeue() {
219                if self.idle_queue.is_empty() {
220                    self.idle_flag = false;
221                }
222                return Some(task);
223            }
224        }
225
226        None
227    }
228
229    /// Remove specific task from queues
230    pub fn remove(&mut self, task: NonNull<Task>) -> bool {
231        // SAFETY: `task` is a valid NonNull<Task> provided by the caller
232        // (e.g., migrate_task). We read sched_class and priority to find
233        // the correct sub-queue for removal. The ReadyQueue Mutex ensures
234        // exclusive access.
235        unsafe {
236            let task_ref = task.as_ref();
237            match task_ref.sched_class {
238                SchedClass::RealTime => {
239                    let idx = (task_ref.priority as usize).min(NUM_RT_PRIORITIES - 1);
240                    let removed = self.rt_queues[idx].remove(task);
241                    if removed && self.rt_queues[idx].is_empty() {
242                        self.rt_bitmap &= !(1 << idx);
243                    }
244                    removed
245                }
246                SchedClass::Normal => {
247                    let idx = ((task_ref.priority as usize).saturating_sub(30) / 10)
248                        .min(NUM_NORMAL_PRIORITIES - 1);
249                    let removed = self.normal_queues[idx].remove(task);
250                    if removed && self.normal_queues[idx].is_empty() {
251                        self.normal_bitmap &= !(1 << idx);
252                    }
253                    removed
254                }
255                SchedClass::Idle => {
256                    let removed = self.idle_queue.remove(task);
257                    if removed && self.idle_queue.is_empty() {
258                        self.idle_flag = false;
259                    }
260                    removed
261                }
262            }
263        }
264    }
265
266    /// Check if any tasks are ready
267    pub fn has_ready_tasks(&self) -> bool {
268        self.rt_bitmap != 0 || self.normal_bitmap != 0 || self.idle_flag
269    }
270}
271
272/// CFS run queue using red-black tree
273#[cfg(feature = "alloc")]
274pub struct CfsRunQueue {
275    /// Tasks sorted by virtual runtime
276    tasks: BTreeMap<u64, Vec<TaskPtr>>,
277    /// Minimum virtual runtime
278    min_vruntime: u64,
279    /// Total weight of all tasks
280    total_weight: u64,
281}
282
283#[cfg(feature = "alloc")]
284impl CfsRunQueue {
285    /// Create new CFS run queue
286    pub fn new() -> Self {
287        Self {
288            tasks: BTreeMap::new(),
289            min_vruntime: 0,
290            total_weight: 0,
291        }
292    }
293
294    /// Add task to CFS queue
295    pub fn enqueue(&mut self, task: NonNull<Task>) {
296        // SAFETY: `task` is a valid NonNull<Task>. We read vruntime and
297        // priority to determine the insertion key and weight. The CFS queue
298        // is protected by a Mutex ensuring exclusive access.
299        unsafe {
300            let task_ref = task.as_ref();
301            let vruntime = task_ref.vruntime.max(self.min_vruntime);
302
303            self.tasks
304                .entry(vruntime)
305                .or_default()
306                .push(TaskPtr::new(task));
307
308            self.total_weight += priority_to_weight(task_ref.priority);
309        }
310    }
311
312    /// Remove task with lowest vruntime
313    pub fn dequeue(&mut self) -> Option<NonNull<Task>> {
314        if let Some(&vruntime) = self.tasks.keys().next() {
315            self.min_vruntime = vruntime;
316
317            // Get mutable reference to remove task
318            // get_mut cannot fail: vruntime was just retrieved from keys().next()
319            let tasks = self
320                .tasks
321                .get_mut(&vruntime)
322                .expect("vruntime key disappeared between lookup and access");
323            let task = tasks.pop();
324
325            if tasks.is_empty() {
326                self.tasks.remove(&vruntime);
327            }
328
329            if let Some(task) = task {
330                // SAFETY: task is a TaskPtr that was stored in the CFS queue.
331                // We read its priority to update total_weight. The CFS queue
332                // Mutex ensures exclusive access.
333                unsafe {
334                    let task_ref = task.as_ptr().as_ref();
335                    self.total_weight = self
336                        .total_weight
337                        .saturating_sub(priority_to_weight(task_ref.priority));
338                }
339            }
340
341            task.map(|t| t.as_ptr())
342        } else {
343            None
344        }
345    }
346
347    /// Remove specific task
348    pub fn remove(&mut self, target: NonNull<Task>) -> bool {
349        // SAFETY: `target` is a valid NonNull<Task> provided by the caller.
350        // We read vruntime and priority to find and remove it from the BTreeMap.
351        // The CFS queue Mutex ensures exclusive access.
352        unsafe {
353            let task_ref = target.as_ref();
354            let vruntime = task_ref.vruntime;
355
356            if let Some(tasks) = self.tasks.get_mut(&vruntime) {
357                if let Some(pos) = tasks.iter().position(|&t| t.as_ptr() == target) {
358                    tasks.remove(pos);
359                    self.total_weight = self
360                        .total_weight
361                        .saturating_sub(priority_to_weight(task_ref.priority));
362
363                    if tasks.is_empty() {
364                        self.tasks.remove(&vruntime);
365                    }
366
367                    return true;
368                }
369            }
370
371            false
372        }
373    }
374
375    /// Update minimum vruntime
376    pub fn update_min_vruntime(&mut self, current_vruntime: u64) {
377        self.min_vruntime = self.min_vruntime.max(current_vruntime);
378    }
379
380    /// Check if queue is empty
381    pub fn is_empty(&self) -> bool {
382        self.tasks.is_empty()
383    }
384}
385
386impl Default for PriorityQueue {
387    fn default() -> Self {
388        Self::new()
389    }
390}
391
392impl Default for ReadyQueue {
393    fn default() -> Self {
394        Self::new()
395    }
396}
397
398#[cfg(feature = "alloc")]
399impl Default for CfsRunQueue {
400    fn default() -> Self {
401        Self::new()
402    }
403}
404
405/// Convert priority to CFS weight
406fn priority_to_weight(priority: Priority) -> u64 {
407    // Higher priority = higher weight = more CPU time
408    match priority {
409        Priority::RealTimeHigh => 0, // Not used for CFS
410        Priority::RealTimeNormal => 0,
411        Priority::RealTimeLow => 0,
412        Priority::SystemHigh => 200,
413        Priority::SystemNormal => 100,
414        Priority::UserHigh => 50,
415        Priority::UserNormal => 20,
416        Priority::UserLow => 10,
417        Priority::Idle => 1,
418    }
419}
420
421/// Maximum tasks per priority queue
422const MAX_TASKS_PER_QUEUE: usize = 256;
423
424/// Number of real-time priority levels
425const NUM_RT_PRIORITIES: usize = 30;
426
427/// Number of normal priority levels
428const NUM_NORMAL_PRIORITIES: usize = 4;
429
430/// Global ready queue protected by mutex
431#[cfg(not(target_arch = "riscv64"))]
432pub(crate) static READY_QUEUE: Mutex<ReadyQueue> = Mutex::new(ReadyQueue::new());
433
434/// Global ready queue for RISC-V (avoiding spin::Mutex issues)
435///
436/// SAFETY JUSTIFICATION: This static mut is intentionally kept because:
437/// 1. RISC-V early boot cannot use spin::Mutex without deadlocking
438/// 2. Const-initialized in BSS (zero-init matches ReadyQueue::new())
439/// 3. After boot, accessed only through get_ready_queue()
440/// 4. Cannot use OnceLock because it may not be available when the scheduler
441///    first needs to run
442///
443/// NOTE: Previously used Option<Box<ReadyQueue>> with lazy initialization,
444/// but Box::new(ReadyQueue::new()) places ~72KB on the stack in debug mode
445/// before heap-allocating, which corrupted the bump allocator on RISC-V.
446/// Const-initializing directly in BSS avoids all heap/stack issues.
447#[cfg(target_arch = "riscv64")]
448#[allow(static_mut_refs)]
449pub(crate) static mut READY_QUEUE_STATIC: ReadyQueue = ReadyQueue::new();
450
451/// Per-CPU ready queues for SMP
452#[cfg(feature = "smp")]
453pub(crate) static PER_CPU_QUEUES: [Mutex<ReadyQueue>; MAX_CPUS] =
454    [const { Mutex::new(ReadyQueue::new()) }; MAX_CPUS];
455
456/// Maximum number of CPUs supported
457#[cfg(feature = "smp")]
458pub(crate) const MAX_CPUS: usize = 64;
459
460/// Get the global ready queue (architecture-specific)
461#[cfg(target_arch = "riscv64")]
462#[allow(static_mut_refs)]
463pub fn get_ready_queue() -> &'static mut ReadyQueue {
464    // SAFETY: READY_QUEUE_STATIC is a const-initialized static mut in BSS.
465    // On RISC-V, we use a static mut instead of spin::Mutex to avoid
466    // deadlocks during early bootstrap. Single-hart boot ensures no
467    // concurrent access during initialization. The 'static lifetime is
468    // valid because the static lives for the kernel's lifetime.
469    unsafe { &mut READY_QUEUE_STATIC }
470}