⚠️ VeridianOS Kernel Documentation - This is low-level kernel code. All functions are unsafe unless explicitly marked otherwise. no_std

veridian_kernel/security/
kaslr.rs

1//! Kernel Address Space Layout Randomization (KASLR)
2//!
3//! Provides address randomization for kernel text, heap, stack, and module
4//! load addresses. Uses architecture-specific hardware entropy sources
5//! (RDRAND on x86_64, RNDR on AArch64) with an xorshift64 PRNG fallback
6//! for RISC-V and other architectures.
7//!
8//! # Design
9//!
10//! KASLR offsets are computed once during boot and stored in a global
11//! `KaslrState` protected by a `RwLock`. The state includes:
12//!
13//! - **text_offset**: Randomized slide for kernel text/code
14//! - **heap_offset**: Randomized base offset for the kernel heap
15//! - **stack_offset**: Default per-thread stack randomization quantum
16//! - **module_base**: Randomized base for driver/module loading
17//!
18//! Runtime re-randomization can refresh offsets for long-running systems,
19//! though the kernel text offset is typically fixed at boot.
20//!
21//! # Entropy Sources
22//!
23//! | Architecture | Primary Source | Fallback |
24//! |-------------|---------------|----------|
25//! | x86_64 | RDRAND | xorshift64 (TSC seed) |
26//! | AArch64 | RNDR | xorshift64 (CNTPCT seed) |
27//! | RISC-V | N/A | xorshift64 (cycle seed) |
28
29use core::sync::atomic::{AtomicBool, AtomicU64, Ordering};
30
31use spin::RwLock;
32
33use crate::error::KernelError;
34
35// ---------------------------------------------------------------------------
36// Constants
37// ---------------------------------------------------------------------------
38
39/// Maximum randomization for kernel text offset (2 MB aligned, 16 MB range).
40/// This keeps the kernel within a manageable range while providing meaningful
41/// randomization (8 possible positions at 2 MB granularity).
42const TEXT_RANDOM_BITS: u32 = 23; // 8 MB range
43const TEXT_ALIGNMENT: usize = 0x20_0000; // 2 MB alignment (huge page)
44
45/// Maximum randomization for heap base (4 KB aligned, 256 MB range).
46const HEAP_RANDOM_BITS: u32 = 28; // 256 MB range
47const HEAP_ALIGNMENT: usize = 0x1000; // 4 KB alignment (page)
48
49/// Maximum per-thread stack randomization (16 bytes aligned, 16 KB range).
50const STACK_RANDOM_BITS: u32 = 14; // 16 KB range
51const STACK_ALIGNMENT: usize = 16; // 16-byte alignment (ABI requirement)
52
53/// Maximum randomization for module/driver load base (4 KB aligned, 64 MB
54/// range).
55const MODULE_RANDOM_BITS: u32 = 26; // 64 MB range
56const MODULE_ALIGNMENT: usize = 0x1000; // 4 KB alignment (page)
57
58// ---------------------------------------------------------------------------
59// Xorshift64 PRNG
60// ---------------------------------------------------------------------------
61
62/// Simple xorshift64 PRNG for generating randomness from a seed.
63///
64/// This is NOT cryptographically secure -- it is used only for address
65/// randomization where the primary goal is making addresses unpredictable
66/// to remote attackers, not resisting local analysis.
67struct Xorshift64 {
68    state: u64,
69}
70
71impl Xorshift64 {
72    /// Create a new xorshift64 PRNG with the given seed.
73    /// If seed is 0, uses a fixed non-zero value to avoid degenerate state.
74    fn new(seed: u64) -> Self {
75        Self {
76            state: if seed == 0 {
77                0xDEAD_BEEF_CAFE_BABE
78            } else {
79                seed
80            },
81        }
82    }
83
84    /// Generate the next pseudo-random u64 value.
85    fn next(&mut self) -> u64 {
86        let mut x = self.state;
87        x ^= x << 13;
88        x ^= x >> 7;
89        x ^= x << 17;
90        self.state = x;
91        x
92    }
93}
94
95// ---------------------------------------------------------------------------
96// Architecture-specific entropy
97// ---------------------------------------------------------------------------
98
99/// Gather a 64-bit entropy value from the best available hardware source.
100fn get_hardware_entropy() -> u64 {
101    #[cfg(target_arch = "x86_64")]
102    {
103        // Try RDRAND first (check CPUID.01H:ECX.RDRAND[bit 30])
104        if rdrand_available() {
105            if let Some(val) = rdrand64() {
106                return val;
107            }
108        }
109        // Fallback: use TSC as seed for xorshift
110        let tsc = read_tsc();
111        let mut rng = Xorshift64::new(tsc);
112        rng.next()
113    }
114
115    #[cfg(target_arch = "aarch64")]
116    {
117        // Try RNDR (ARMv8.5 Random Number)
118        if let Some(val) = rndr64() {
119            return val;
120        }
121        // Fallback: use CNTPCT_EL0 (physical counter) as seed
122        let cnt = read_cntpct();
123        let mut rng = Xorshift64::new(cnt);
124        rng.next()
125    }
126
127    #[cfg(target_arch = "riscv64")]
128    {
129        // No hardware RNG instruction on most RISC-V cores
130        // Use cycle counter as seed for xorshift
131        let cycles = read_cycle();
132        let mut rng = Xorshift64::new(cycles);
133        rng.next()
134    }
135
136    // Host target (for CI tests on x86_64-unknown-linux-gnu)
137    #[cfg(not(any(
138        target_arch = "x86_64",
139        target_arch = "aarch64",
140        target_arch = "riscv64"
141    )))]
142    {
143        // Deterministic fallback for unsupported architectures
144        let mut rng = Xorshift64::new(0x1234_5678_9ABC_DEF0);
145        rng.next()
146    }
147}
148
149// -- x86_64 helpers --
150
151#[cfg(target_arch = "x86_64")]
152fn rdrand_available() -> bool {
153    let ecx: u32;
154    // SAFETY: CPUID is a non-privileged instruction that reads CPU feature
155    // flags. We request leaf 1 (basic features). No memory or stack effects.
156    // LLVM reserves rbx, so we save/restore it.
157    unsafe {
158        core::arch::asm!(
159            "push rbx",
160            "cpuid",
161            "pop rbx",
162            in("eax") 1u32,
163            in("ecx") 0u32,
164            lateout("ecx") ecx,
165            lateout("edx") _,
166            options(nostack),
167        );
168    }
169    (ecx & (1 << 30)) != 0
170}
171
172#[cfg(target_arch = "x86_64")]
173fn rdrand64() -> Option<u64> {
174    let mut val: u64;
175    let success: u8;
176    // SAFETY: RDRAND reads from the hardware RNG and sets CF on success.
177    // No memory or stack effects.
178    unsafe {
179        core::arch::asm!(
180            "rdrand {val}",
181            "setc {success}",
182            val = out(reg) val,
183            success = out(reg_byte) success,
184            options(nomem, nostack),
185        );
186    }
187    if success != 0 {
188        Some(val)
189    } else {
190        None
191    }
192}
193
194#[cfg(target_arch = "x86_64")]
195fn read_tsc() -> u64 {
196    let lo: u32;
197    let hi: u32;
198    // SAFETY: RDTSC reads the timestamp counter. No memory or stack effects.
199    unsafe {
200        core::arch::asm!(
201            "rdtsc",
202            out("eax") lo,
203            out("edx") hi,
204            options(nomem, nostack),
205        );
206    }
207    ((hi as u64) << 32) | (lo as u64)
208}
209
210// -- AArch64 helpers --
211
212#[cfg(target_arch = "aarch64")]
213fn rndr64() -> Option<u64> {
214    // RNDR is available on ARMv8.5+ (ID_AA64ISAR0_EL1.RNDR != 0).
215    // On older cores this instruction is UNDEFINED, so we check first.
216    // For simplicity and safety on QEMU virt, we skip RNDR and use fallback.
217    // A production implementation would check ID_AA64ISAR0_EL1 bits [63:60].
218    None
219}
220
221#[cfg(target_arch = "aarch64")]
222fn read_cntpct() -> u64 {
223    let cnt: u64;
224    // SAFETY: CNTPCT_EL0 reads the physical counter register.
225    // Available from EL0 upward. No memory or stack effects.
226    unsafe {
227        core::arch::asm!(
228            "mrs {}, cntpct_el0",
229            out(reg) cnt,
230            options(nomem, nostack),
231        );
232    }
233    cnt
234}
235
236// -- RISC-V helpers --
237
238#[cfg(target_arch = "riscv64")]
239fn read_cycle() -> u64 {
240    let cycles: u64;
241    // SAFETY: Reading the `cycle` CSR via `rdcycle`. On QEMU virt this
242    // may trap (SIGILL) if the CSR is not implemented; in that case the
243    // SBI trap handler returns 0 and we fall through to the xorshift.
244    // No memory or stack effects.
245    unsafe {
246        core::arch::asm!(
247            "rdcycle {}",
248            out(reg) cycles,
249            options(nomem, nostack),
250        );
251    }
252    cycles
253}
254
255// ---------------------------------------------------------------------------
256// KASLR State
257// ---------------------------------------------------------------------------
258
259/// Current KASLR offsets and PRNG state.
260pub struct KaslrState {
261    /// Kernel text randomization offset (applied at boot)
262    pub text_offset: usize,
263    /// Kernel heap base randomization offset
264    pub heap_offset: usize,
265    /// Default per-thread stack randomization offset
266    pub stack_offset: usize,
267    /// Module/driver load base randomization offset
268    pub module_base: usize,
269    /// Internal PRNG for generating additional random offsets
270    prng: Xorshift64,
271    /// Number of re-randomizations performed
272    pub rerandomize_count: u64,
273}
274
275impl KaslrState {
276    /// Create a new KASLR state seeded from hardware entropy.
277    fn new() -> Self {
278        let seed = get_hardware_entropy();
279        let mut prng = Xorshift64::new(seed);
280
281        let text_offset = Self::aligned_random(&mut prng, TEXT_RANDOM_BITS, TEXT_ALIGNMENT);
282        let heap_offset = Self::aligned_random(&mut prng, HEAP_RANDOM_BITS, HEAP_ALIGNMENT);
283        let stack_offset = Self::aligned_random(&mut prng, STACK_RANDOM_BITS, STACK_ALIGNMENT);
284        let module_base = Self::aligned_random(&mut prng, MODULE_RANDOM_BITS, MODULE_ALIGNMENT);
285
286        Self {
287            text_offset,
288            heap_offset,
289            stack_offset,
290            module_base,
291            prng,
292            rerandomize_count: 0,
293        }
294    }
295
296    /// Generate an aligned random offset within the given bit range.
297    fn aligned_random(prng: &mut Xorshift64, bits: u32, alignment: usize) -> usize {
298        let mask = (1u64 << bits) - 1;
299        let raw = (prng.next() & mask) as usize;
300        // Align down to the required alignment
301        raw & !(alignment - 1)
302    }
303
304    /// Re-randomize non-text offsets for long-running systems.
305    ///
306    /// The text offset cannot be changed at runtime since code is already
307    /// loaded, but heap, stack, and module offsets can be refreshed.
308    fn rerandomize(&mut self) {
309        // Mix in fresh hardware entropy
310        let fresh = get_hardware_entropy();
311        self.prng.state ^= fresh;
312        // Advance PRNG state
313        let _ = self.prng.next();
314
315        self.heap_offset = Self::aligned_random(&mut self.prng, HEAP_RANDOM_BITS, HEAP_ALIGNMENT);
316        self.stack_offset =
317            Self::aligned_random(&mut self.prng, STACK_RANDOM_BITS, STACK_ALIGNMENT);
318        self.module_base =
319            Self::aligned_random(&mut self.prng, MODULE_RANDOM_BITS, MODULE_ALIGNMENT);
320        self.rerandomize_count += 1;
321    }
322
323    /// Generate a random stack offset for a new thread.
324    ///
325    /// Returns a random offset in the range [0, 16 KB), aligned to 16 bytes,
326    /// that should be subtracted from the thread's stack base to randomize
327    /// its starting stack pointer.
328    fn random_stack_offset(&mut self) -> usize {
329        Self::aligned_random(&mut self.prng, STACK_RANDOM_BITS, STACK_ALIGNMENT)
330    }
331
332    /// Generate a random module load base address.
333    ///
334    /// Returns a page-aligned random offset to add to the default module
335    /// load region base.
336    fn random_module_offset(&mut self) -> usize {
337        Self::aligned_random(&mut self.prng, MODULE_RANDOM_BITS, MODULE_ALIGNMENT)
338    }
339}
340
341// ---------------------------------------------------------------------------
342// Global State
343// ---------------------------------------------------------------------------
344
345/// Global KASLR offsets, protected by RwLock for concurrent read access.
346static KASLR_OFFSETS: RwLock<Option<KaslrState>> = RwLock::new(None);
347
348/// Whether KASLR has been initialized.
349static KASLR_INITIALIZED: AtomicBool = AtomicBool::new(false);
350
351/// Monotonic counter for re-randomization events.
352static RERANDOMIZE_COUNT: AtomicU64 = AtomicU64::new(0);
353
354// ---------------------------------------------------------------------------
355// Public API
356// ---------------------------------------------------------------------------
357
358/// Initialize the KASLR subsystem.
359///
360/// Gathers hardware entropy and computes initial randomization offsets.
361/// Must be called once during boot before any offset queries.
362pub fn init() -> Result<(), KernelError> {
363    if KASLR_INITIALIZED.load(Ordering::Acquire) {
364        return Err(KernelError::AlreadyExists {
365            resource: "kaslr",
366            id: 0,
367        });
368    }
369
370    let state = KaslrState::new();
371
372    crate::println!(
373        "[KASLR] Text offset: {:#x}, Heap offset: {:#x}, Stack offset: {:#x}, Module base: {:#x}",
374        state.text_offset,
375        state.heap_offset,
376        state.stack_offset,
377        state.module_base,
378    );
379
380    {
381        let mut offsets = KASLR_OFFSETS.write();
382        *offsets = Some(state);
383    }
384
385    KASLR_INITIALIZED.store(true, Ordering::Release);
386    crate::println!("[KASLR] Kernel address space layout randomization initialized");
387    Ok(())
388}
389
390/// Get the kernel text randomization offset.
391///
392/// Returns 0 if KASLR is not initialized.
393pub fn get_text_offset() -> usize {
394    if !KASLR_INITIALIZED.load(Ordering::Acquire) {
395        return 0;
396    }
397    let offsets = KASLR_OFFSETS.read();
398    offsets.as_ref().map_or(0, |s| s.text_offset)
399}
400
401/// Get the kernel heap base randomization offset.
402///
403/// Returns 0 if KASLR is not initialized.
404pub fn get_heap_offset() -> usize {
405    if !KASLR_INITIALIZED.load(Ordering::Acquire) {
406        return 0;
407    }
408    let offsets = KASLR_OFFSETS.read();
409    offsets.as_ref().map_or(0, |s| s.heap_offset)
410}
411
412/// Randomize a thread's stack base by subtracting a random offset.
413///
414/// Given a stack `base` address (top of stack), returns a new address
415/// with a random downward offset applied (up to 16 KB, 16-byte aligned).
416///
417/// Returns the base unchanged if KASLR is not initialized.
418pub fn randomize_stack(base: usize) -> usize {
419    if !KASLR_INITIALIZED.load(Ordering::Acquire) {
420        return base;
421    }
422    let mut offsets = KASLR_OFFSETS.write();
423    if let Some(state) = offsets.as_mut() {
424        let offset = state.random_stack_offset();
425        base.saturating_sub(offset)
426    } else {
427        base
428    }
429}
430
431/// Get a randomized module/driver load base address.
432///
433/// Returns a page-aligned random offset suitable for adding to the
434/// default module load region. Each call produces a different offset
435/// so different modules get different addresses.
436///
437/// Returns 0 if KASLR is not initialized.
438pub fn get_module_base() -> usize {
439    if !KASLR_INITIALIZED.load(Ordering::Acquire) {
440        return 0;
441    }
442    let mut offsets = KASLR_OFFSETS.write();
443    if let Some(state) = offsets.as_mut() {
444        state.random_module_offset()
445    } else {
446        0
447    }
448}
449
450/// Re-randomize non-text KASLR offsets.
451///
452/// Call periodically on long-running systems to refresh randomization.
453/// The kernel text offset cannot be changed since code is already mapped.
454///
455/// Returns the new re-randomization count, or an error if not initialized.
456pub fn rerandomize() -> Result<u64, KernelError> {
457    if !KASLR_INITIALIZED.load(Ordering::Acquire) {
458        return Err(KernelError::NotInitialized { subsystem: "kaslr" });
459    }
460
461    let mut offsets = KASLR_OFFSETS.write();
462    if let Some(state) = offsets.as_mut() {
463        state.rerandomize();
464        let count = state.rerandomize_count;
465        RERANDOMIZE_COUNT.store(count, Ordering::Relaxed);
466        Ok(count)
467    } else {
468        Err(KernelError::NotInitialized { subsystem: "kaslr" })
469    }
470}
471
472/// Check if KASLR is initialized and active.
473pub fn is_active() -> bool {
474    KASLR_INITIALIZED.load(Ordering::Acquire)
475}
476
477/// Get the number of re-randomizations performed.
478pub fn rerandomize_count() -> u64 {
479    RERANDOMIZE_COUNT.load(Ordering::Relaxed)
480}
481
482/// Get a snapshot of current KASLR offsets for diagnostics.
483///
484/// Returns `(text_offset, heap_offset, stack_offset, module_base)`.
485pub fn get_offsets() -> (usize, usize, usize, usize) {
486    if !KASLR_INITIALIZED.load(Ordering::Acquire) {
487        return (0, 0, 0, 0);
488    }
489    let offsets = KASLR_OFFSETS.read();
490    offsets.as_ref().map_or((0, 0, 0, 0), |s| {
491        (s.text_offset, s.heap_offset, s.stack_offset, s.module_base)
492    })
493}
494
495// ---------------------------------------------------------------------------
496// Tests
497// ---------------------------------------------------------------------------
498
499#[cfg(test)]
500mod tests {
501    use super::*;
502
503    #[test]
504    fn test_xorshift64_nonzero() {
505        let mut rng = Xorshift64::new(42);
506        let val = rng.next();
507        assert_ne!(val, 0, "xorshift64 should produce non-zero output");
508    }
509
510    #[test]
511    fn test_xorshift64_different_values() {
512        let mut rng = Xorshift64::new(42);
513        let a = rng.next();
514        let b = rng.next();
515        assert_ne!(a, b, "consecutive xorshift64 values should differ");
516    }
517
518    #[test]
519    fn test_xorshift64_zero_seed_handled() {
520        // Zero seed should be replaced with a non-zero constant
521        let mut rng = Xorshift64::new(0);
522        let val = rng.next();
523        assert_ne!(val, 0);
524    }
525
526    #[test]
527    fn test_aligned_random_page_aligned() {
528        let mut rng = Xorshift64::new(0xDEAD);
529        for _ in 0..100 {
530            let offset = KaslrState::aligned_random(&mut rng, 28, 0x1000);
531            assert_eq!(offset & 0xFFF, 0, "offset must be page-aligned");
532        }
533    }
534
535    #[test]
536    fn test_aligned_random_16_byte_aligned() {
537        let mut rng = Xorshift64::new(0xBEEF);
538        for _ in 0..100 {
539            let offset = KaslrState::aligned_random(&mut rng, 14, 16);
540            assert_eq!(offset & 0xF, 0, "offset must be 16-byte aligned");
541        }
542    }
543
544    #[test]
545    fn test_aligned_random_within_range() {
546        let mut rng = Xorshift64::new(0xCAFE);
547        for _ in 0..1000 {
548            let offset = KaslrState::aligned_random(&mut rng, 14, 16);
549            assert!(offset < (1 << 14), "offset must be within 16 KB range");
550        }
551    }
552
553    #[test]
554    fn test_aligned_random_2mb_aligned() {
555        let mut rng = Xorshift64::new(0xFACE);
556        for _ in 0..100 {
557            let offset = KaslrState::aligned_random(&mut rng, 23, 0x20_0000);
558            assert_eq!(offset & 0x1F_FFFF, 0, "text offset must be 2 MB aligned");
559        }
560    }
561
562    #[test]
563    fn test_kaslr_state_creation() {
564        let state = KaslrState::new();
565
566        // Text offset should be 2 MB aligned
567        assert_eq!(state.text_offset & (TEXT_ALIGNMENT - 1), 0);
568        // Heap offset should be page-aligned
569        assert_eq!(state.heap_offset & (HEAP_ALIGNMENT - 1), 0);
570        // Stack offset should be 16-byte aligned
571        assert_eq!(state.stack_offset & (STACK_ALIGNMENT - 1), 0);
572        // Module base should be page-aligned
573        assert_eq!(state.module_base & (MODULE_ALIGNMENT - 1), 0);
574    }
575
576    #[test]
577    fn test_kaslr_state_rerandomize() {
578        let mut state = KaslrState::new();
579        let old_heap = state.heap_offset;
580        let old_module = state.module_base;
581
582        state.rerandomize();
583
584        // After rerandomization, at least one of heap or module should change
585        // (extremely unlikely both stay the same with random entropy)
586        assert_eq!(state.rerandomize_count, 1);
587        // Alignment should still hold
588        assert_eq!(state.heap_offset & (HEAP_ALIGNMENT - 1), 0);
589        assert_eq!(state.module_base & (MODULE_ALIGNMENT - 1), 0);
590
591        // Suppress unused variable warnings in case the values happen to match
592        let _ = old_heap;
593        let _ = old_module;
594    }
595
596    #[test]
597    fn test_random_stack_offset_aligned() {
598        let mut state = KaslrState::new();
599        for _ in 0..100 {
600            let offset = state.random_stack_offset();
601            assert_eq!(offset & 0xF, 0, "stack offset must be 16-byte aligned");
602            assert!(offset < (1 << STACK_RANDOM_BITS));
603        }
604    }
605
606    #[test]
607    fn test_random_module_offset_page_aligned() {
608        let mut state = KaslrState::new();
609        for _ in 0..100 {
610            let offset = state.random_module_offset();
611            assert_eq!(offset & 0xFFF, 0, "module offset must be page-aligned");
612            assert!(offset < (1 << MODULE_RANDOM_BITS));
613        }
614    }
615
616    #[test]
617    fn test_get_text_offset_before_init() {
618        // Before init, should return 0 (safe default)
619        // Note: in test harness, KASLR_INITIALIZED may already be true
620        // from other tests. This test just verifies the function doesn't panic.
621        let _offset = get_text_offset();
622    }
623
624    #[test]
625    fn test_get_heap_offset_before_init() {
626        let _offset = get_heap_offset();
627    }
628}