⚠️ VeridianOS Kernel Documentation - This is low-level kernel code. All functions are unsafe unless explicitly marked otherwise. no_std

veridian_kernel/mm/
cache_aligned.rs

1//! Cache Line Alignment Utilities
2//!
3//! Provides types and constants for eliminating false sharing in
4//! concurrent data structures. False sharing occurs when independent
5//! variables share a cache line, causing cache invalidation ping-pong
6//! between CPU cores on writes.
7//!
8//! Modern x86_64 (Intel/AMD) and AArch64 (Cortex-A72) use 64-byte
9//! cache lines. RISC-V implementations also commonly use 64 bytes.
10
11use core::ops::{Deref, DerefMut};
12
13/// Cache line size in bytes for the target architecture.
14///
15/// Intel/AMD x86_64: 64 bytes (since Pentium 4).
16/// ARM Cortex-A72: 64 bytes (L1D and L2).
17/// Common RISC-V implementations: 64 bytes.
18pub const CACHE_LINE_SIZE: usize = 64;
19
20/// Cache-line-aligned wrapper type for eliminating false sharing.
21///
22/// Wraps a value `T` and ensures it is aligned to a full cache line
23/// boundary (64 bytes). When placed in arrays or adjacent to other
24/// per-CPU data, this prevents false sharing between cores.
25///
26/// # Example
27///
28/// ```rust,ignore
29/// use crate::mm::cache_aligned::CacheAligned;
30/// use core::sync::atomic::{AtomicU64, Ordering};
31///
32/// // Each counter occupies its own cache line
33/// static COUNTERS: [CacheAligned<AtomicU64>; 4] = [
34///     CacheAligned::new(AtomicU64::new(0)),
35///     CacheAligned::new(AtomicU64::new(0)),
36///     CacheAligned::new(AtomicU64::new(0)),
37///     CacheAligned::new(AtomicU64::new(0)),
38/// ];
39/// ```
40#[repr(C, align(64))]
41pub struct CacheAligned<T> {
42    value: T,
43}
44
45impl<T> CacheAligned<T> {
46    /// Create a new cache-line-aligned wrapper.
47    pub const fn new(value: T) -> Self {
48        Self { value }
49    }
50
51    /// Consume the wrapper and return the inner value.
52    pub fn into_inner(self) -> T {
53        self.value
54    }
55}
56
57impl<T> Deref for CacheAligned<T> {
58    type Target = T;
59
60    fn deref(&self) -> &T {
61        &self.value
62    }
63}
64
65impl<T> DerefMut for CacheAligned<T> {
66    fn deref_mut(&mut self) -> &mut T {
67        &mut self.value
68    }
69}
70
71// Safety: CacheAligned is transparent for Send/Sync -- it only adds alignment.
72// If T: Send, CacheAligned<T>: Send. If T: Sync, CacheAligned<T>: Sync.
73unsafe impl<T: Send> Send for CacheAligned<T> {}
74unsafe impl<T: Sync> Sync for CacheAligned<T> {}
75
76impl<T: Default> Default for CacheAligned<T> {
77    fn default() -> Self {
78        Self {
79            value: T::default(),
80        }
81    }
82}
83
84impl<T: Clone> Clone for CacheAligned<T> {
85    fn clone(&self) -> Self {
86        Self {
87            value: self.value.clone(),
88        }
89    }
90}
91
92impl<T: Copy> Copy for CacheAligned<T> {}
93
94impl<T: core::fmt::Debug> core::fmt::Debug for CacheAligned<T> {
95    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
96        f.debug_struct("CacheAligned")
97            .field("value", &self.value)
98            .finish()
99    }
100}
101
102#[cfg(test)]
103mod tests {
104    use core::{
105        mem,
106        sync::atomic::{AtomicU64, Ordering},
107    };
108
109    use super::*;
110
111    #[test]
112    fn test_cache_line_alignment() {
113        // CacheAligned<u64> should be 64 bytes (one full cache line)
114        assert_eq!(mem::align_of::<CacheAligned<u64>>(), 64);
115        assert!(mem::size_of::<CacheAligned<u64>>() >= 64);
116    }
117
118    #[test]
119    fn test_deref() {
120        let aligned = CacheAligned::new(42u64);
121        assert_eq!(*aligned, 42);
122    }
123
124    #[test]
125    fn test_deref_mut() {
126        let mut aligned = CacheAligned::new(42u64);
127        *aligned = 100;
128        assert_eq!(*aligned, 100);
129    }
130
131    #[test]
132    fn test_into_inner() {
133        let aligned = CacheAligned::new(42u64);
134        assert_eq!(aligned.into_inner(), 42);
135    }
136
137    #[test]
138    fn test_atomic_usage() {
139        let counter = CacheAligned::new(AtomicU64::new(0));
140        counter.fetch_add(1, Ordering::Relaxed);
141        assert_eq!(counter.load(Ordering::Relaxed), 1);
142    }
143
144    #[test]
145    fn test_array_no_false_sharing() {
146        // Each element in the array should be on its own cache line
147        let arr: [CacheAligned<AtomicU64>; 4] = [
148            CacheAligned::new(AtomicU64::new(0)),
149            CacheAligned::new(AtomicU64::new(0)),
150            CacheAligned::new(AtomicU64::new(0)),
151            CacheAligned::new(AtomicU64::new(0)),
152        ];
153
154        // Addresses should be at least 64 bytes apart
155        let addr0 = &arr[0] as *const _ as usize;
156        let addr1 = &arr[1] as *const _ as usize;
157        assert!(addr1 - addr0 >= CACHE_LINE_SIZE);
158
159        // Verify each element works independently
160        arr[0].fetch_add(10, Ordering::Relaxed);
161        arr[1].fetch_add(20, Ordering::Relaxed);
162        arr[2].fetch_add(30, Ordering::Relaxed);
163        arr[3].fetch_add(40, Ordering::Relaxed);
164        assert_eq!(arr[0].load(Ordering::Relaxed), 10);
165        assert_eq!(arr[1].load(Ordering::Relaxed), 20);
166        assert_eq!(arr[2].load(Ordering::Relaxed), 30);
167        assert_eq!(arr[3].load(Ordering::Relaxed), 40);
168    }
169
170    #[test]
171    fn test_const_new() {
172        // Verify const construction works (needed for static initialization)
173        static COUNTER: CacheAligned<AtomicU64> = CacheAligned::new(AtomicU64::new(0));
174        COUNTER.fetch_add(1, Ordering::Relaxed);
175        assert_eq!(COUNTER.load(Ordering::Relaxed), 1);
176    }
177
178    #[test]
179    fn test_default() {
180        let aligned: CacheAligned<u64> = CacheAligned::default();
181        assert_eq!(*aligned, 0);
182    }
183}