veridian_kernel/mm/
cache_aligned.rs1use core::ops::{Deref, DerefMut};
12
13pub const CACHE_LINE_SIZE: usize = 64;
19
20#[repr(C, align(64))]
41pub struct CacheAligned<T> {
42 value: T,
43}
44
45impl<T> CacheAligned<T> {
46 pub const fn new(value: T) -> Self {
48 Self { value }
49 }
50
51 pub fn into_inner(self) -> T {
53 self.value
54 }
55}
56
57impl<T> Deref for CacheAligned<T> {
58 type Target = T;
59
60 fn deref(&self) -> &T {
61 &self.value
62 }
63}
64
65impl<T> DerefMut for CacheAligned<T> {
66 fn deref_mut(&mut self) -> &mut T {
67 &mut self.value
68 }
69}
70
71unsafe impl<T: Send> Send for CacheAligned<T> {}
74unsafe impl<T: Sync> Sync for CacheAligned<T> {}
75
76impl<T: Default> Default for CacheAligned<T> {
77 fn default() -> Self {
78 Self {
79 value: T::default(),
80 }
81 }
82}
83
84impl<T: Clone> Clone for CacheAligned<T> {
85 fn clone(&self) -> Self {
86 Self {
87 value: self.value.clone(),
88 }
89 }
90}
91
92impl<T: Copy> Copy for CacheAligned<T> {}
93
94impl<T: core::fmt::Debug> core::fmt::Debug for CacheAligned<T> {
95 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
96 f.debug_struct("CacheAligned")
97 .field("value", &self.value)
98 .finish()
99 }
100}
101
102#[cfg(test)]
103mod tests {
104 use core::{
105 mem,
106 sync::atomic::{AtomicU64, Ordering},
107 };
108
109 use super::*;
110
111 #[test]
112 fn test_cache_line_alignment() {
113 assert_eq!(mem::align_of::<CacheAligned<u64>>(), 64);
115 assert!(mem::size_of::<CacheAligned<u64>>() >= 64);
116 }
117
118 #[test]
119 fn test_deref() {
120 let aligned = CacheAligned::new(42u64);
121 assert_eq!(*aligned, 42);
122 }
123
124 #[test]
125 fn test_deref_mut() {
126 let mut aligned = CacheAligned::new(42u64);
127 *aligned = 100;
128 assert_eq!(*aligned, 100);
129 }
130
131 #[test]
132 fn test_into_inner() {
133 let aligned = CacheAligned::new(42u64);
134 assert_eq!(aligned.into_inner(), 42);
135 }
136
137 #[test]
138 fn test_atomic_usage() {
139 let counter = CacheAligned::new(AtomicU64::new(0));
140 counter.fetch_add(1, Ordering::Relaxed);
141 assert_eq!(counter.load(Ordering::Relaxed), 1);
142 }
143
144 #[test]
145 fn test_array_no_false_sharing() {
146 let arr: [CacheAligned<AtomicU64>; 4] = [
148 CacheAligned::new(AtomicU64::new(0)),
149 CacheAligned::new(AtomicU64::new(0)),
150 CacheAligned::new(AtomicU64::new(0)),
151 CacheAligned::new(AtomicU64::new(0)),
152 ];
153
154 let addr0 = &arr[0] as *const _ as usize;
156 let addr1 = &arr[1] as *const _ as usize;
157 assert!(addr1 - addr0 >= CACHE_LINE_SIZE);
158
159 arr[0].fetch_add(10, Ordering::Relaxed);
161 arr[1].fetch_add(20, Ordering::Relaxed);
162 arr[2].fetch_add(30, Ordering::Relaxed);
163 arr[3].fetch_add(40, Ordering::Relaxed);
164 assert_eq!(arr[0].load(Ordering::Relaxed), 10);
165 assert_eq!(arr[1].load(Ordering::Relaxed), 20);
166 assert_eq!(arr[2].load(Ordering::Relaxed), 30);
167 assert_eq!(arr[3].load(Ordering::Relaxed), 40);
168 }
169
170 #[test]
171 fn test_const_new() {
172 static COUNTER: CacheAligned<AtomicU64> = CacheAligned::new(AtomicU64::new(0));
174 COUNTER.fetch_add(1, Ordering::Relaxed);
175 assert_eq!(COUNTER.load(Ordering::Relaxed), 1);
176 }
177
178 #[test]
179 fn test_default() {
180 let aligned: CacheAligned<u64> = CacheAligned::default();
181 assert_eq!(*aligned, 0);
182 }
183}