1use core::sync::atomic::{AtomicU32, AtomicU64, Ordering};
7
8pub struct SchedulerMetrics {
10 pub context_switches: AtomicU64,
12 pub voluntary_switches: AtomicU64,
14 pub involuntary_switches: AtomicU64,
16 pub scheduler_cycles: AtomicU64,
18 pub switch_cycles: AtomicU64,
20 pub schedule_calls: AtomicU64,
22 pub idle_scheduled: AtomicU64,
24 pub avg_switch_latency: AtomicU64,
26 pub min_switch_latency: AtomicU64,
28 pub max_switch_latency: AtomicU64,
30 pub load_balance_count: AtomicU64,
32 pub task_migrations: AtomicU64,
34 pub ipc_blocks: AtomicU64,
36 pub ipc_wakeups: AtomicU64,
38}
39
40impl SchedulerMetrics {
41 pub const fn new() -> Self {
43 Self {
44 context_switches: AtomicU64::new(0),
45 voluntary_switches: AtomicU64::new(0),
46 involuntary_switches: AtomicU64::new(0),
47 scheduler_cycles: AtomicU64::new(0),
48 switch_cycles: AtomicU64::new(0),
49 schedule_calls: AtomicU64::new(0),
50 idle_scheduled: AtomicU64::new(0),
51 avg_switch_latency: AtomicU64::new(0),
52 min_switch_latency: AtomicU64::new(u64::MAX),
53 max_switch_latency: AtomicU64::new(0),
54 load_balance_count: AtomicU64::new(0),
55 task_migrations: AtomicU64::new(0),
56 ipc_blocks: AtomicU64::new(0),
57 ipc_wakeups: AtomicU64::new(0),
58 }
59 }
60
61 pub fn record_context_switch(&self, latency_cycles: u64, voluntary: bool) {
63 self.context_switches.fetch_add(1, Ordering::Relaxed);
64
65 if voluntary {
66 self.voluntary_switches.fetch_add(1, Ordering::Relaxed);
67 } else {
68 self.involuntary_switches.fetch_add(1, Ordering::Relaxed);
69 }
70
71 self.switch_cycles
72 .fetch_add(latency_cycles, Ordering::Relaxed);
73
74 let mut min = self.min_switch_latency.load(Ordering::Relaxed);
76 while latency_cycles < min {
77 match self.min_switch_latency.compare_exchange_weak(
78 min,
79 latency_cycles,
80 Ordering::Relaxed,
81 Ordering::Relaxed,
82 ) {
83 Ok(_) => break,
84 Err(current) => min = current,
85 }
86 }
87
88 let mut max = self.max_switch_latency.load(Ordering::Relaxed);
89 while latency_cycles > max {
90 match self.max_switch_latency.compare_exchange_weak(
91 max,
92 latency_cycles,
93 Ordering::Relaxed,
94 Ordering::Relaxed,
95 ) {
96 Ok(_) => break,
97 Err(current) => max = current,
98 }
99 }
100
101 let count = self.context_switches.load(Ordering::Relaxed);
103 if count > 0 {
104 let total_cycles = self.switch_cycles.load(Ordering::Relaxed);
105 self.avg_switch_latency
106 .store(total_cycles / count, Ordering::Relaxed);
107 }
108 }
109
110 pub fn record_scheduler_overhead(&self, cycles: u64) {
112 self.scheduler_cycles.fetch_add(cycles, Ordering::Relaxed);
113 self.schedule_calls.fetch_add(1, Ordering::Relaxed);
114 }
115
116 pub fn record_idle_scheduled(&self) {
118 self.idle_scheduled.fetch_add(1, Ordering::Relaxed);
119 }
120
121 pub fn record_load_balance(&self) {
123 self.load_balance_count.fetch_add(1, Ordering::Relaxed);
124 }
125
126 pub fn record_migration(&self) {
128 self.task_migrations.fetch_add(1, Ordering::Relaxed);
129 }
130
131 pub fn record_ipc_block(&self) {
133 self.ipc_blocks.fetch_add(1, Ordering::Relaxed);
134 }
135
136 pub fn record_ipc_wakeup(&self) {
138 self.ipc_wakeups.fetch_add(1, Ordering::Relaxed);
139 }
140
141 pub fn get_summary(&self) -> MetricsSummary {
143 let context_switches = self.context_switches.load(Ordering::Relaxed);
144 let scheduler_cycles = self.scheduler_cycles.load(Ordering::Relaxed);
145 let schedule_calls = self.schedule_calls.load(Ordering::Relaxed);
146
147 MetricsSummary {
148 context_switches,
149 voluntary_switches: self.voluntary_switches.load(Ordering::Relaxed),
150 involuntary_switches: self.involuntary_switches.load(Ordering::Relaxed),
151 avg_switch_latency: self.avg_switch_latency.load(Ordering::Relaxed),
152 min_switch_latency: {
153 let min = self.min_switch_latency.load(Ordering::Relaxed);
154 if min == u64::MAX {
155 0
156 } else {
157 min
158 }
159 },
160 max_switch_latency: self.max_switch_latency.load(Ordering::Relaxed),
161 scheduler_overhead_pct: if context_switches > 0 {
162 (scheduler_cycles * 100)
163 / (scheduler_cycles + self.switch_cycles.load(Ordering::Relaxed))
164 } else {
165 0
166 },
167 idle_percentage: if schedule_calls > 0 {
168 (self.idle_scheduled.load(Ordering::Relaxed) * 100) / schedule_calls
169 } else {
170 0
171 },
172 load_balance_count: self.load_balance_count.load(Ordering::Relaxed),
173 task_migrations: self.task_migrations.load(Ordering::Relaxed),
174 ipc_blocks: self.ipc_blocks.load(Ordering::Relaxed),
175 ipc_wakeups: self.ipc_wakeups.load(Ordering::Relaxed),
176 }
177 }
178
179 pub fn reset(&self) {
181 self.context_switches.store(0, Ordering::Relaxed);
182 self.voluntary_switches.store(0, Ordering::Relaxed);
183 self.involuntary_switches.store(0, Ordering::Relaxed);
184 self.scheduler_cycles.store(0, Ordering::Relaxed);
185 self.switch_cycles.store(0, Ordering::Relaxed);
186 self.schedule_calls.store(0, Ordering::Relaxed);
187 self.idle_scheduled.store(0, Ordering::Relaxed);
188 self.avg_switch_latency.store(0, Ordering::Relaxed);
189 self.min_switch_latency.store(u64::MAX, Ordering::Relaxed);
190 self.max_switch_latency.store(0, Ordering::Relaxed);
191 self.load_balance_count.store(0, Ordering::Relaxed);
192 self.task_migrations.store(0, Ordering::Relaxed);
193 self.ipc_blocks.store(0, Ordering::Relaxed);
194 self.ipc_wakeups.store(0, Ordering::Relaxed);
195 }
196}
197
198pub struct MetricsSummary {
200 pub context_switches: u64,
201 pub voluntary_switches: u64,
202 pub involuntary_switches: u64,
203 pub avg_switch_latency: u64,
204 pub min_switch_latency: u64,
205 pub max_switch_latency: u64,
206 pub scheduler_overhead_pct: u64,
207 pub idle_percentage: u64,
208 pub load_balance_count: u64,
209 pub task_migrations: u64,
210 pub ipc_blocks: u64,
211 pub ipc_wakeups: u64,
212}
213
214pub struct PerCpuMetrics {
216 pub cpu_id: u8,
218 pub tasks_scheduled: AtomicU64,
220 pub total_runtime: AtomicU64,
222 pub idle_time: AtomicU64,
224 pub ipis_received: AtomicU32,
226 pub ipis_sent: AtomicU32,
228}
229
230impl PerCpuMetrics {
231 pub const fn new(cpu_id: u8) -> Self {
233 Self {
234 cpu_id,
235 tasks_scheduled: AtomicU64::new(0),
236 total_runtime: AtomicU64::new(0),
237 idle_time: AtomicU64::new(0),
238 ipis_received: AtomicU32::new(0),
239 ipis_sent: AtomicU32::new(0),
240 }
241 }
242}
243
244impl Default for SchedulerMetrics {
245 fn default() -> Self {
246 Self::new()
247 }
248}
249
250pub(crate) static SCHEDULER_METRICS: SchedulerMetrics = SchedulerMetrics::new();
252
253#[inline]
259pub fn read_tsc() -> u64 {
260 crate::arch::entropy::read_timestamp()
261}
262
263pub fn print_metrics() {
265 let summary = SCHEDULER_METRICS.get_summary();
266
267 #[cfg(target_arch = "x86_64")]
268 {
269 println!("[SCHED] Scheduler Metrics:");
270 println!(
271 " Context switches: {} (voluntary: {}, involuntary: {})",
272 summary.context_switches, summary.voluntary_switches, summary.involuntary_switches
273 );
274 println!(
275 " Switch latency: avg={} cycles, min={}, max={}",
276 summary.avg_switch_latency, summary.min_switch_latency, summary.max_switch_latency
277 );
278 println!(" Scheduler overhead: {}%", summary.scheduler_overhead_pct);
279 println!(" Idle time: {}%", summary.idle_percentage);
280 println!(
281 " Load balancing: {} ops, {} migrations",
282 summary.load_balance_count, summary.task_migrations
283 );
284 println!(
285 " IPC: {} blocks, {} wakeups",
286 summary.ipc_blocks, summary.ipc_wakeups
287 );
288 }
289
290 #[cfg(not(target_arch = "x86_64"))]
291 {
292 let _ = summary;
294 }
295}