⚠️ VeridianOS Kernel Documentation - This is low-level kernel code. All functions are unsafe unless explicitly marked otherwise. no_std

veridian_kernel/security/
memory_protection.rs

1//! Memory Protection Features
2//!
3//! Implements ASLR, stack canaries, and other memory protection mechanisms.
4
5#![allow(clippy::not_unsafe_ptr_arg_deref)]
6
7use core::sync::atomic::{AtomicBool, AtomicU64, Ordering};
8
9use spin::RwLock;
10
11use crate::{crypto::random::get_random, error::KernelError, sync::once_lock::OnceLock};
12
13/// ASLR (Address Space Layout Randomization) manager
14pub struct Aslr {
15    /// Base entropy for randomization
16    entropy_pool: RwLock<[u64; 16]>,
17    /// Counter for mixing
18    counter: AtomicU64,
19    /// Whether the entropy pool has been seeded
20    seeded: AtomicBool,
21}
22
23impl Aslr {
24    /// Create new ASLR instance (lightweight — defers CSPRNG to first use)
25    ///
26    /// Entropy seeding is deferred to `ensure_seeded()` to avoid deep crypto
27    /// call chains during early boot, which can overflow the small x86_64
28    /// kernel stack in debug mode.
29    pub fn new() -> Result<Self, KernelError> {
30        Ok(Self {
31            entropy_pool: RwLock::new([0u64; 16]),
32            counter: AtomicU64::new(0),
33            seeded: AtomicBool::new(false),
34        })
35    }
36
37    /// Seed the entropy pool from the CSPRNG (called lazily on first use)
38    fn ensure_seeded(&self) {
39        if self.seeded.load(Ordering::Acquire) {
40            return;
41        }
42
43        let rng = get_random();
44        let mut pool = self.entropy_pool.write();
45        // Double-check after acquiring write lock
46        if !self.seeded.load(Ordering::Relaxed) {
47            let mut i = 0;
48            while i < 16 {
49                pool[i] = rng.next_u64();
50                i += 1;
51            }
52            self.seeded.store(true, Ordering::Release);
53        }
54    }
55
56    /// Randomize address for given address space region
57    pub fn randomize_address(&self, base: usize, region_type: RegionType) -> usize {
58        self.ensure_seeded();
59        let entropy = {
60            let pool = self.entropy_pool.read();
61            let index = (self.counter.fetch_add(1, Ordering::Relaxed) % 16) as usize;
62            pool[index]
63        };
64
65        let randomization_bits = match region_type {
66            RegionType::Stack => 28,      // 28 bits = 256MB range
67            RegionType::Heap => 28,       // 28 bits = 256MB range
68            RegionType::Executable => 24, // 24 bits = 16MB range
69            RegionType::Library => 28,    // 28 bits = 256MB range
70            RegionType::Mmap => 28,       // 28 bits = 256MB range
71        };
72
73        // Create mask for randomization
74        let mask = (1u64 << randomization_bits) - 1;
75        let random_offset = (entropy & mask) as usize;
76
77        // Page-align the offset (4KB alignment)
78        let aligned_offset = random_offset & !0xFFF;
79
80        base.wrapping_add(aligned_offset)
81    }
82
83    /// Get random stack offset for stack canary
84    pub fn get_stack_canary(&self) -> u64 {
85        let rng = get_random();
86        rng.next_u64()
87    }
88
89    /// Refresh entropy pool
90    pub fn refresh_entropy(&self) {
91        let mut pool = self.entropy_pool.write();
92        let rng = get_random();
93
94        // Use index-based loop instead of iter_mut() to avoid AArch64 LLVM hang
95        let mut i = 0;
96        while i < 16 {
97            pool[i] = rng.next_u64();
98            i += 1;
99        }
100    }
101}
102
103impl Default for Aslr {
104    fn default() -> Self {
105        Self::new().expect("Failed to create ASLR")
106    }
107}
108
109/// Address space region types for ASLR
110#[derive(Debug, Clone, Copy, PartialEq, Eq)]
111pub enum RegionType {
112    Stack,
113    Heap,
114    Executable,
115    Library,
116    Mmap,
117}
118
119/// Stack canary for detecting buffer overflows
120pub struct StackCanary {
121    /// Canary value
122    value: u64,
123}
124
125impl StackCanary {
126    /// Create new stack canary with random value
127    pub fn new() -> Self {
128        let rng = get_random();
129        Self {
130            value: rng.next_u64(),
131        }
132    }
133
134    /// Get canary value
135    pub fn value(&self) -> u64 {
136        self.value
137    }
138
139    /// Verify canary hasn't been modified
140    pub fn verify(&self, observed_value: u64) -> bool {
141        self.value == observed_value
142    }
143
144    /// Place canary on stack
145    pub fn place(&self, stack_ptr: *mut u64) {
146        // SAFETY: The caller must ensure stack_ptr points to a valid, aligned, writable
147        // u64 location within the process's stack. This is used during process creation
148        // where the stack is freshly allocated and the canary location is computed from
149        // the known stack base.
150        unsafe {
151            *stack_ptr = self.value;
152        }
153    }
154
155    /// Check canary on stack
156    pub fn check(&self, stack_ptr: *const u64) -> bool {
157        // SAFETY: The caller must ensure stack_ptr points to a valid, aligned, readable
158        // u64 location where a canary was previously placed via place(). If the canary
159        // has been overwritten by a buffer overflow, this read is still safe (it
160        // returns valid u64 data), but the comparison will fail indicating
161        // corruption.
162        unsafe { *stack_ptr == self.value }
163    }
164}
165
166impl Default for StackCanary {
167    fn default() -> Self {
168        Self::new()
169    }
170}
171
172/// Guard page for detecting stack overflow
173pub struct GuardPage {
174    /// Address of guard page
175    address: usize,
176    /// Size of guard page
177    size: usize,
178}
179
180impl GuardPage {
181    /// Create new guard page
182    pub fn new(address: usize, size: usize) -> Self {
183        Self { address, size }
184    }
185
186    /// Get guard page address
187    pub fn address(&self) -> usize {
188        self.address
189    }
190
191    /// Get guard page size
192    pub fn size(&self) -> usize {
193        self.size
194    }
195
196    /// Check if address is within guard page
197    pub fn contains(&self, addr: usize) -> bool {
198        addr >= self.address && addr < self.address + self.size
199    }
200}
201
202/// W^X (Write XOR Execute) policy enforcement.
203///
204/// Ensures no memory page is both writable and executable simultaneously.
205pub struct WxPolicy {
206    enabled: bool,
207    violations: AtomicU64,
208}
209
210impl WxPolicy {
211    pub fn new() -> Self {
212        Self {
213            enabled: true,
214            violations: AtomicU64::new(0),
215        }
216    }
217
218    /// Check whether a page flags combination violates W^X.
219    ///
220    /// Returns `true` if the flags are safe (not both writable and executable).
221    pub fn check_flags(&self, writable: bool, executable: bool) -> bool {
222        if !self.enabled {
223            return true;
224        }
225        if writable && executable {
226            self.violations.fetch_add(1, Ordering::Relaxed);
227            false
228        } else {
229            true
230        }
231    }
232
233    /// Get the number of detected W^X violations.
234    pub fn violation_count(&self) -> u64 {
235        self.violations.load(Ordering::Relaxed)
236    }
237
238    pub fn set_enabled(&mut self, enabled: bool) {
239        self.enabled = enabled;
240    }
241}
242
243impl Default for WxPolicy {
244    fn default() -> Self {
245        Self::new()
246    }
247}
248
249/// DEP (Data Execution Prevention) / NX enforcement.
250///
251/// Tracks pages that should have the NX bit set and provides helpers
252/// for ensuring data pages are not executable.
253pub struct DepEnforcement {
254    enabled: bool,
255}
256
257impl DepEnforcement {
258    pub fn new() -> Self {
259        Self { enabled: true }
260    }
261
262    /// Determine whether a page at the given address should have NX set.
263    ///
264    /// Data, heap, and stack pages should always be non-executable.
265    pub fn should_set_nx(&self, region: RegionType) -> bool {
266        if !self.enabled {
267            return false;
268        }
269        matches!(
270            region,
271            RegionType::Stack | RegionType::Heap | RegionType::Mmap
272        )
273    }
274
275    /// Apply NX bit to page table entry flags.
276    ///
277    /// Returns the flags with NO_EXECUTE added if the region type warrants it.
278    pub fn enforce_flags(&self, flags: u64, region: RegionType) -> u64 {
279        if self.should_set_nx(region) {
280            // NX bit is bit 63 on x86_64 page table entries
281            flags | (1u64 << 63)
282        } else {
283            flags
284        }
285    }
286}
287
288impl Default for DepEnforcement {
289    fn default() -> Self {
290        Self::new()
291    }
292}
293
294/// Spectre v1 mitigation helpers.
295pub struct SpectreMitigation;
296
297impl SpectreMitigation {
298    /// Insert a speculation barrier after a bounds check.
299    ///
300    /// On x86_64 this emits LFENCE, on AArch64 CSDB, on RISC-V FENCE.
301    #[inline(always)]
302    pub fn speculation_barrier() {
303        #[cfg(target_arch = "x86_64")]
304        // SAFETY: LFENCE serializes instruction execution, preventing speculative
305        // reads past this point. No memory or stack effects.
306        unsafe {
307            core::arch::asm!("lfence", options(nomem, nostack));
308        }
309
310        #[cfg(target_arch = "aarch64")]
311        // SAFETY: CSDB (Consumption of Speculative Data Barrier) prevents
312        // speculative data access. No memory or stack effects.
313        unsafe {
314            core::arch::asm!("csdb", options(nomem, nostack));
315        }
316
317        #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))]
318        // SAFETY: FENCE R,R orders prior reads before subsequent reads,
319        // serving as a speculation barrier. No memory or stack effects.
320        unsafe {
321            core::arch::asm!("fence r, r", options(nomem, nostack));
322        }
323    }
324
325    /// Bounds-checked array access with speculation barrier.
326    ///
327    /// Returns the value at `index` if in bounds, otherwise returns the
328    /// default value. Always inserts a speculation barrier after the check.
329    pub fn safe_array_access<T: Copy + Default>(arr: &[T], index: usize) -> T {
330        if index < arr.len() {
331            Self::speculation_barrier();
332            arr[index]
333        } else {
334            Self::speculation_barrier();
335            T::default()
336        }
337    }
338}
339
340/// KPTI (Kernel Page Table Isolation) support for Meltdown mitigation.
341///
342/// On x86_64, this manages separate kernel and user page tables so that
343/// kernel memory is not mapped in user-space page tables.
344pub struct Kpti {
345    /// Whether KPTI is enabled
346    enabled: bool,
347    /// Address of user page table (CR3 value for user mode)
348    user_cr3: AtomicU64,
349    /// Address of kernel page table (CR3 value for kernel mode)
350    kernel_cr3: AtomicU64,
351}
352
353impl Kpti {
354    pub fn new() -> Self {
355        Self {
356            // KPTI is only relevant on x86_64
357            enabled: cfg!(target_arch = "x86_64"),
358            user_cr3: AtomicU64::new(0),
359            kernel_cr3: AtomicU64::new(0),
360        }
361    }
362
363    /// Set page table addresses for KPTI.
364    pub fn set_page_tables(&self, kernel_cr3: u64, user_cr3: u64) {
365        self.kernel_cr3.store(kernel_cr3, Ordering::SeqCst);
366        self.user_cr3.store(user_cr3, Ordering::SeqCst);
367    }
368
369    /// Check if KPTI is enabled.
370    pub fn is_enabled(&self) -> bool {
371        self.enabled
372    }
373
374    /// Get the kernel page table address.
375    pub fn kernel_cr3(&self) -> u64 {
376        self.kernel_cr3.load(Ordering::SeqCst)
377    }
378
379    /// Get the user page table address.
380    pub fn user_cr3(&self) -> u64 {
381        self.user_cr3.load(Ordering::SeqCst)
382    }
383
384    /// Switch to kernel page table (called on syscall entry / interrupt).
385    #[cfg(target_arch = "x86_64")]
386    pub fn switch_to_kernel(&self) {
387        if !self.enabled {
388            return;
389        }
390        let cr3 = self.kernel_cr3.load(Ordering::SeqCst);
391        if cr3 != 0 {
392            // SAFETY: Writing CR3 switches the page table root. cr3 was
393            // previously set via set_kernel_cr3 and points to a valid PML4.
394            unsafe {
395                core::arch::asm!("mov cr3, {}", in(reg) cr3, options(nostack));
396            }
397        }
398    }
399
400    /// Switch to user page table (called on syscall exit / iret).
401    #[cfg(target_arch = "x86_64")]
402    pub fn switch_to_user(&self) {
403        if !self.enabled {
404            return;
405        }
406        let cr3 = self.user_cr3.load(Ordering::SeqCst);
407        if cr3 != 0 {
408            // SAFETY: Writing CR3 switches the page table root. cr3 was
409            // previously set via set_user_cr3 and points to a valid PML4.
410            unsafe {
411                core::arch::asm!("mov cr3, {}", in(reg) cr3, options(nostack));
412            }
413        }
414    }
415}
416
417impl Default for Kpti {
418    fn default() -> Self {
419        Self::new()
420    }
421}
422
423/// Memory protection manager
424pub struct MemoryProtection {
425    aslr: Aslr,
426    stack_canaries_enabled: bool,
427    guard_pages_enabled: bool,
428    dep_enabled: bool, // Data Execution Prevention
429    wx_policy: WxPolicy,
430    dep_enforcement: DepEnforcement,
431    kpti: Kpti,
432}
433
434impl MemoryProtection {
435    /// Create new memory protection manager
436    pub fn new() -> Result<Self, KernelError> {
437        Ok(Self {
438            aslr: Aslr::new()?,
439            stack_canaries_enabled: true,
440            guard_pages_enabled: true,
441            dep_enabled: true,
442            wx_policy: WxPolicy::new(),
443            dep_enforcement: DepEnforcement::new(),
444            kpti: Kpti::new(),
445        })
446    }
447
448    /// Get ASLR instance
449    pub fn aslr(&self) -> &Aslr {
450        &self.aslr
451    }
452
453    /// Enable/disable stack canaries
454    pub fn set_stack_canaries(&mut self, enabled: bool) {
455        self.stack_canaries_enabled = enabled;
456    }
457
458    /// Check if stack canaries are enabled
459    pub fn stack_canaries_enabled(&self) -> bool {
460        self.stack_canaries_enabled
461    }
462
463    /// Enable/disable guard pages
464    pub fn set_guard_pages(&mut self, enabled: bool) {
465        self.guard_pages_enabled = enabled;
466    }
467
468    /// Check if guard pages are enabled
469    pub fn guard_pages_enabled(&self) -> bool {
470        self.guard_pages_enabled
471    }
472
473    /// Enable/disable DEP
474    pub fn set_dep(&mut self, enabled: bool) {
475        self.dep_enabled = enabled;
476    }
477
478    /// Check if DEP is enabled
479    pub fn dep_enabled(&self) -> bool {
480        self.dep_enabled
481    }
482
483    /// Create stack canary if enabled
484    pub fn create_canary(&self) -> Option<StackCanary> {
485        if self.stack_canaries_enabled {
486            Some(StackCanary::new())
487        } else {
488            None
489        }
490    }
491
492    /// Create guard page if enabled
493    pub fn create_guard_page(&self, address: usize, size: usize) -> Option<GuardPage> {
494        if self.guard_pages_enabled {
495            Some(GuardPage::new(address, size))
496        } else {
497            None
498        }
499    }
500
501    /// Get W^X policy reference
502    pub fn wx_policy(&self) -> &WxPolicy {
503        &self.wx_policy
504    }
505
506    /// Get DEP enforcement reference
507    pub fn dep_enforcement(&self) -> &DepEnforcement {
508        &self.dep_enforcement
509    }
510
511    /// Get KPTI reference
512    pub fn kpti(&self) -> &Kpti {
513        &self.kpti
514    }
515}
516
517impl Default for MemoryProtection {
518    fn default() -> Self {
519        Self::new().expect("Failed to create MemoryProtection")
520    }
521}
522
523/// Global memory protection instance
524static MEMORY_PROTECTION: OnceLock<MemoryProtection> = OnceLock::new();
525
526/// Initialize memory protection
527pub fn init() -> Result<(), KernelError> {
528    MEMORY_PROTECTION
529        .set(MemoryProtection::new()?)
530        .map_err(|_| KernelError::AlreadyExists {
531            resource: "memory_protection",
532            id: 0,
533        })?;
534
535    crate::println!("[MEMORY-PROTECTION] ASLR, stack canaries, and guard pages enabled");
536    Ok(())
537}
538
539/// Get global memory protection instance
540pub fn get_memory_protection() -> &'static MemoryProtection {
541    MEMORY_PROTECTION
542        .get()
543        .expect("Memory protection not initialized")
544}
545
546#[cfg(test)]
547mod tests {
548    use super::*;
549
550    #[test]
551    fn test_aslr_randomization() {
552        let aslr = Aslr::new().unwrap();
553
554        let base = 0x400000;
555        let addr1 = aslr.randomize_address(base, RegionType::Stack);
556        let addr2 = aslr.randomize_address(base, RegionType::Stack);
557
558        // Addresses should be different
559        assert_ne!(addr1, addr2);
560
561        // Addresses should be page-aligned
562        assert_eq!(addr1 & 0xFFF, 0);
563        assert_eq!(addr2 & 0xFFF, 0);
564    }
565
566    #[test]
567    fn test_stack_canary() {
568        let canary = StackCanary::new();
569        let value = canary.value();
570
571        assert!(canary.verify(value));
572        assert!(!canary.verify(value ^ 1));
573    }
574}