veridian_kernel/security/
memory_protection.rs1#![allow(clippy::not_unsafe_ptr_arg_deref)]
6
7use core::sync::atomic::{AtomicBool, AtomicU64, Ordering};
8
9use spin::RwLock;
10
11use crate::{crypto::random::get_random, error::KernelError, sync::once_lock::OnceLock};
12
13pub struct Aslr {
15 entropy_pool: RwLock<[u64; 16]>,
17 counter: AtomicU64,
19 seeded: AtomicBool,
21}
22
23impl Aslr {
24 pub fn new() -> Result<Self, KernelError> {
30 Ok(Self {
31 entropy_pool: RwLock::new([0u64; 16]),
32 counter: AtomicU64::new(0),
33 seeded: AtomicBool::new(false),
34 })
35 }
36
37 fn ensure_seeded(&self) {
39 if self.seeded.load(Ordering::Acquire) {
40 return;
41 }
42
43 let rng = get_random();
44 let mut pool = self.entropy_pool.write();
45 if !self.seeded.load(Ordering::Relaxed) {
47 let mut i = 0;
48 while i < 16 {
49 pool[i] = rng.next_u64();
50 i += 1;
51 }
52 self.seeded.store(true, Ordering::Release);
53 }
54 }
55
56 pub fn randomize_address(&self, base: usize, region_type: RegionType) -> usize {
58 self.ensure_seeded();
59 let entropy = {
60 let pool = self.entropy_pool.read();
61 let index = (self.counter.fetch_add(1, Ordering::Relaxed) % 16) as usize;
62 pool[index]
63 };
64
65 let randomization_bits = match region_type {
66 RegionType::Stack => 28, RegionType::Heap => 28, RegionType::Executable => 24, RegionType::Library => 28, RegionType::Mmap => 28, };
72
73 let mask = (1u64 << randomization_bits) - 1;
75 let random_offset = (entropy & mask) as usize;
76
77 let aligned_offset = random_offset & !0xFFF;
79
80 base.wrapping_add(aligned_offset)
81 }
82
83 pub fn get_stack_canary(&self) -> u64 {
85 let rng = get_random();
86 rng.next_u64()
87 }
88
89 pub fn refresh_entropy(&self) {
91 let mut pool = self.entropy_pool.write();
92 let rng = get_random();
93
94 let mut i = 0;
96 while i < 16 {
97 pool[i] = rng.next_u64();
98 i += 1;
99 }
100 }
101}
102
103impl Default for Aslr {
104 fn default() -> Self {
105 Self::new().expect("Failed to create ASLR")
106 }
107}
108
109#[derive(Debug, Clone, Copy, PartialEq, Eq)]
111pub enum RegionType {
112 Stack,
113 Heap,
114 Executable,
115 Library,
116 Mmap,
117}
118
119pub struct StackCanary {
121 value: u64,
123}
124
125impl StackCanary {
126 pub fn new() -> Self {
128 let rng = get_random();
129 Self {
130 value: rng.next_u64(),
131 }
132 }
133
134 pub fn value(&self) -> u64 {
136 self.value
137 }
138
139 pub fn verify(&self, observed_value: u64) -> bool {
141 self.value == observed_value
142 }
143
144 pub fn place(&self, stack_ptr: *mut u64) {
146 unsafe {
151 *stack_ptr = self.value;
152 }
153 }
154
155 pub fn check(&self, stack_ptr: *const u64) -> bool {
157 unsafe { *stack_ptr == self.value }
163 }
164}
165
166impl Default for StackCanary {
167 fn default() -> Self {
168 Self::new()
169 }
170}
171
172pub struct GuardPage {
174 address: usize,
176 size: usize,
178}
179
180impl GuardPage {
181 pub fn new(address: usize, size: usize) -> Self {
183 Self { address, size }
184 }
185
186 pub fn address(&self) -> usize {
188 self.address
189 }
190
191 pub fn size(&self) -> usize {
193 self.size
194 }
195
196 pub fn contains(&self, addr: usize) -> bool {
198 addr >= self.address && addr < self.address + self.size
199 }
200}
201
202pub struct WxPolicy {
206 enabled: bool,
207 violations: AtomicU64,
208}
209
210impl WxPolicy {
211 pub fn new() -> Self {
212 Self {
213 enabled: true,
214 violations: AtomicU64::new(0),
215 }
216 }
217
218 pub fn check_flags(&self, writable: bool, executable: bool) -> bool {
222 if !self.enabled {
223 return true;
224 }
225 if writable && executable {
226 self.violations.fetch_add(1, Ordering::Relaxed);
227 false
228 } else {
229 true
230 }
231 }
232
233 pub fn violation_count(&self) -> u64 {
235 self.violations.load(Ordering::Relaxed)
236 }
237
238 pub fn set_enabled(&mut self, enabled: bool) {
239 self.enabled = enabled;
240 }
241}
242
243impl Default for WxPolicy {
244 fn default() -> Self {
245 Self::new()
246 }
247}
248
249pub struct DepEnforcement {
254 enabled: bool,
255}
256
257impl DepEnforcement {
258 pub fn new() -> Self {
259 Self { enabled: true }
260 }
261
262 pub fn should_set_nx(&self, region: RegionType) -> bool {
266 if !self.enabled {
267 return false;
268 }
269 matches!(
270 region,
271 RegionType::Stack | RegionType::Heap | RegionType::Mmap
272 )
273 }
274
275 pub fn enforce_flags(&self, flags: u64, region: RegionType) -> u64 {
279 if self.should_set_nx(region) {
280 flags | (1u64 << 63)
282 } else {
283 flags
284 }
285 }
286}
287
288impl Default for DepEnforcement {
289 fn default() -> Self {
290 Self::new()
291 }
292}
293
294pub struct SpectreMitigation;
296
297impl SpectreMitigation {
298 #[inline(always)]
302 pub fn speculation_barrier() {
303 #[cfg(target_arch = "x86_64")]
304 unsafe {
307 core::arch::asm!("lfence", options(nomem, nostack));
308 }
309
310 #[cfg(target_arch = "aarch64")]
311 unsafe {
314 core::arch::asm!("csdb", options(nomem, nostack));
315 }
316
317 #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
318 unsafe {
321 core::arch::asm!("fence r, r", options(nomem, nostack));
322 }
323 }
324
325 pub fn safe_array_access<T: Copy + Default>(arr: &[T], index: usize) -> T {
330 if index < arr.len() {
331 Self::speculation_barrier();
332 arr[index]
333 } else {
334 Self::speculation_barrier();
335 T::default()
336 }
337 }
338}
339
340pub struct Kpti {
345 enabled: bool,
347 user_cr3: AtomicU64,
349 kernel_cr3: AtomicU64,
351}
352
353impl Kpti {
354 pub fn new() -> Self {
355 Self {
356 enabled: cfg!(target_arch = "x86_64"),
358 user_cr3: AtomicU64::new(0),
359 kernel_cr3: AtomicU64::new(0),
360 }
361 }
362
363 pub fn set_page_tables(&self, kernel_cr3: u64, user_cr3: u64) {
365 self.kernel_cr3.store(kernel_cr3, Ordering::SeqCst);
366 self.user_cr3.store(user_cr3, Ordering::SeqCst);
367 }
368
369 pub fn is_enabled(&self) -> bool {
371 self.enabled
372 }
373
374 pub fn kernel_cr3(&self) -> u64 {
376 self.kernel_cr3.load(Ordering::SeqCst)
377 }
378
379 pub fn user_cr3(&self) -> u64 {
381 self.user_cr3.load(Ordering::SeqCst)
382 }
383
384 #[cfg(target_arch = "x86_64")]
386 pub fn switch_to_kernel(&self) {
387 if !self.enabled {
388 return;
389 }
390 let cr3 = self.kernel_cr3.load(Ordering::SeqCst);
391 if cr3 != 0 {
392 unsafe {
395 core::arch::asm!("mov cr3, {}", in(reg) cr3, options(nostack));
396 }
397 }
398 }
399
400 #[cfg(target_arch = "x86_64")]
402 pub fn switch_to_user(&self) {
403 if !self.enabled {
404 return;
405 }
406 let cr3 = self.user_cr3.load(Ordering::SeqCst);
407 if cr3 != 0 {
408 unsafe {
411 core::arch::asm!("mov cr3, {}", in(reg) cr3, options(nostack));
412 }
413 }
414 }
415}
416
417impl Default for Kpti {
418 fn default() -> Self {
419 Self::new()
420 }
421}
422
423pub struct MemoryProtection {
425 aslr: Aslr,
426 stack_canaries_enabled: bool,
427 guard_pages_enabled: bool,
428 dep_enabled: bool, wx_policy: WxPolicy,
430 dep_enforcement: DepEnforcement,
431 kpti: Kpti,
432}
433
434impl MemoryProtection {
435 pub fn new() -> Result<Self, KernelError> {
437 Ok(Self {
438 aslr: Aslr::new()?,
439 stack_canaries_enabled: true,
440 guard_pages_enabled: true,
441 dep_enabled: true,
442 wx_policy: WxPolicy::new(),
443 dep_enforcement: DepEnforcement::new(),
444 kpti: Kpti::new(),
445 })
446 }
447
448 pub fn aslr(&self) -> &Aslr {
450 &self.aslr
451 }
452
453 pub fn set_stack_canaries(&mut self, enabled: bool) {
455 self.stack_canaries_enabled = enabled;
456 }
457
458 pub fn stack_canaries_enabled(&self) -> bool {
460 self.stack_canaries_enabled
461 }
462
463 pub fn set_guard_pages(&mut self, enabled: bool) {
465 self.guard_pages_enabled = enabled;
466 }
467
468 pub fn guard_pages_enabled(&self) -> bool {
470 self.guard_pages_enabled
471 }
472
473 pub fn set_dep(&mut self, enabled: bool) {
475 self.dep_enabled = enabled;
476 }
477
478 pub fn dep_enabled(&self) -> bool {
480 self.dep_enabled
481 }
482
483 pub fn create_canary(&self) -> Option<StackCanary> {
485 if self.stack_canaries_enabled {
486 Some(StackCanary::new())
487 } else {
488 None
489 }
490 }
491
492 pub fn create_guard_page(&self, address: usize, size: usize) -> Option<GuardPage> {
494 if self.guard_pages_enabled {
495 Some(GuardPage::new(address, size))
496 } else {
497 None
498 }
499 }
500
501 pub fn wx_policy(&self) -> &WxPolicy {
503 &self.wx_policy
504 }
505
506 pub fn dep_enforcement(&self) -> &DepEnforcement {
508 &self.dep_enforcement
509 }
510
511 pub fn kpti(&self) -> &Kpti {
513 &self.kpti
514 }
515}
516
517impl Default for MemoryProtection {
518 fn default() -> Self {
519 Self::new().expect("Failed to create MemoryProtection")
520 }
521}
522
523static MEMORY_PROTECTION: OnceLock<MemoryProtection> = OnceLock::new();
525
526pub fn init() -> Result<(), KernelError> {
528 MEMORY_PROTECTION
529 .set(MemoryProtection::new()?)
530 .map_err(|_| KernelError::AlreadyExists {
531 resource: "memory_protection",
532 id: 0,
533 })?;
534
535 crate::println!("[MEMORY-PROTECTION] ASLR, stack canaries, and guard pages enabled");
536 Ok(())
537}
538
539pub fn get_memory_protection() -> &'static MemoryProtection {
541 MEMORY_PROTECTION
542 .get()
543 .expect("Memory protection not initialized")
544}
545
546#[cfg(test)]
547mod tests {
548 use super::*;
549
550 #[test]
551 fn test_aslr_randomization() {
552 let aslr = Aslr::new().unwrap();
553
554 let base = 0x400000;
555 let addr1 = aslr.randomize_address(base, RegionType::Stack);
556 let addr2 = aslr.randomize_address(base, RegionType::Stack);
557
558 assert_ne!(addr1, addr2);
560
561 assert_eq!(addr1 & 0xFFF, 0);
563 assert_eq!(addr2 & 0xFFF, 0);
564 }
565
566 #[test]
567 fn test_stack_canary() {
568 let canary = StackCanary::new();
569 let value = canary.value();
570
571 assert!(canary.verify(value));
572 assert!(!canary.verify(value ^ 1));
573 }
574}