⚠️ VeridianOS Kernel Documentation - This is low-level kernel code. All functions are unsafe unless explicitly marked otherwise. no_std

veridian_kernel/sync/
once_lock.rs

1//! Safe Global Initialization (Rust 2024 Compatible)
2//!
3//! Provides safe alternatives to `static mut` for global state management.
4//! Uses atomic operations and proper synchronization for Rust 2024 edition.
5
6#![allow(clippy::needless_lifetimes, mismatched_lifetime_syntaxes)]
7
8use core::{
9    cell::UnsafeCell,
10    sync::atomic::{AtomicPtr, Ordering},
11};
12
13use spin::Mutex;
14
15/// A cell that can be written to only once (Rust 2024 compatible)
16///
17/// Similar to std::sync::OnceLock but works in no_std environments.
18pub struct OnceLock<T> {
19    inner: AtomicPtr<T>,
20}
21
22impl<T> Default for OnceLock<T> {
23    fn default() -> Self {
24        Self::new()
25    }
26}
27
28impl<T> OnceLock<T> {
29    /// Create a new empty OnceLock
30    pub const fn new() -> Self {
31        Self {
32            inner: AtomicPtr::new(core::ptr::null_mut()),
33        }
34    }
35
36    /// Get the value if initialized
37    pub fn get(&self) -> Option<&'static T> {
38        let ptr = self.inner.load(Ordering::Acquire);
39        if ptr.is_null() {
40            None
41        } else {
42            // SAFETY: The pointer is non-null, meaning `set()` or `get_or_init()`
43            // has previously stored a valid, heap-allocated `T` via `Box::into_raw()`.
44            // The Acquire ordering on the load synchronizes-with the Release in
45            // `set()`, ensuring the pointed-to data is fully initialized before we
46            // read it. The 'static lifetime is valid because the allocation is leaked
47            // (only freed in `Drop`) and the OnceLock owns the allocation.
48            Some(unsafe { &*ptr })
49        }
50    }
51
52    /// Get mutable reference if initialized (unsafe)
53    ///
54    /// # Safety
55    /// Caller must ensure exclusive access to the contained value for the
56    /// duration of the returned reference's lifetime. No other references
57    /// (mutable or immutable) to the inner value may exist concurrently.
58    /// Violating this invariant causes undefined behavior due to aliased
59    /// mutable references.
60    pub unsafe fn get_mut(&self) -> Option<&'static mut T> {
61        let ptr = self.inner.load(Ordering::Acquire);
62        if ptr.is_null() {
63            None
64        } else {
65            // SAFETY: The pointer is non-null, so `set()` has previously stored
66            // a valid, heap-allocated `T`. The caller guarantees exclusive access,
67            // so creating a mutable reference does not alias any existing references.
68            // The Acquire load ensures we see the fully initialized data.
69            Some(&mut *ptr)
70        }
71    }
72
73    /// Initialize the cell with a value
74    ///
75    /// Returns Ok(()) if initialization succeeds, Err(value) if already
76    /// initialized
77    pub fn set(&self, value: T) -> Result<(), T> {
78        let boxed = alloc::boxed::Box::new(value);
79        let ptr = alloc::boxed::Box::into_raw(boxed);
80
81        match self.inner.compare_exchange(
82            core::ptr::null_mut(),
83            ptr,
84            Ordering::Release,
85            Ordering::Acquire,
86        ) {
87            Ok(_) => Ok(()),
88            Err(_) => {
89                // Already initialized, reclaim our allocation and return the value.
90                // SAFETY: `ptr` was obtained from `Box::into_raw()` on the line above,
91                // so it points to a valid, properly aligned, heap-allocated `T`. The
92                // compare_exchange failed, meaning no one else has taken ownership of
93                // this pointer, so we must reclaim it to avoid a memory leak.
94                // We dereference the Box to extract the owned value before the Box
95                // is dropped, avoiding a use-after-free.
96                let boxed = unsafe { alloc::boxed::Box::from_raw(ptr) };
97                Err(*boxed)
98            }
99        }
100    }
101
102    /// Get or initialize the value
103    pub fn get_or_init<F>(&self, f: F) -> &'static T
104    where
105        F: FnOnce() -> T,
106    {
107        if let Some(val) = self.get() {
108            return val;
109        }
110
111        let value = f();
112        match self.set(value) {
113            // After set() succeeds or detects prior init, get() is guaranteed Some
114            Ok(()) => self
115                .get()
116                .expect("OnceLock get failed after successful set"),
117            Err(_) => self
118                .get()
119                .expect("OnceLock get failed after concurrent init"),
120        }
121    }
122}
123
124// SAFETY: OnceLock<T> can be sent across threads if T: Send because the inner
125// value is heap-allocated and accessed through an AtomicPtr with proper memory
126// ordering. Ownership transfer is safe when T itself is safe to transfer.
127unsafe impl<T: Send> Send for OnceLock<T> {}
128// SAFETY: OnceLock<T> can be shared across threads if T: Send + Sync. The
129// AtomicPtr with Acquire/Release ordering ensures that concurrent `get()` calls
130// observe a fully initialized T. The `set()` method uses compare_exchange to
131// ensure at most one successful initialization. T must be Sync because multiple
132// threads may hold shared references to the inner value simultaneously.
133unsafe impl<T: Send + Sync> Sync for OnceLock<T> {}
134
135impl<T> Drop for OnceLock<T> {
136    fn drop(&mut self) {
137        let ptr = self.inner.load(Ordering::Acquire);
138        if !ptr.is_null() {
139            // SAFETY: The pointer was originally created by `Box::into_raw()` in
140            // `set()`. Since we are in `drop(&mut self)`, we have exclusive access
141            // to the OnceLock, guaranteeing no other thread is concurrently reading
142            // or writing the pointer. Reconstructing the Box reclaims the heap
143            // allocation and drops the contained T.
144            unsafe {
145                let _ = alloc::boxed::Box::from_raw(ptr);
146            }
147        }
148    }
149}
150
151/// Lazy initialization with function (Rust 2024 compatible)
152///
153/// Similar to std::sync::LazyLock but for no_std
154pub struct LazyLock<T, F = fn() -> T> {
155    cell: OnceLock<T>,
156    init: UnsafeCell<Option<F>>,
157}
158
159impl<T: 'static, F: FnOnce() -> T> LazyLock<T, F> {
160    /// Create a new LazyLock with initialization function
161    pub const fn new(init: F) -> Self {
162        Self {
163            cell: OnceLock::new(),
164            init: UnsafeCell::new(Some(init)),
165        }
166    }
167
168    /// Force initialization and get reference
169    pub fn force(&self) -> &T {
170        self.cell.get_or_init(|| {
171            // SAFETY: Access to the UnsafeCell is safe here because `get_or_init`
172            // on the inner OnceLock guarantees that this closure is called at most
173            // once. The OnceLock's compare_exchange in `set()` ensures that even
174            // if multiple threads race to call `force()`, only one will execute
175            // this closure. After `take()` extracts the init function, subsequent
176            // calls to `force()` will find the OnceLock already initialized and
177            // skip this closure entirely.
178            let init = unsafe { &mut *self.init.get() };
179            match init.take() {
180                Some(f) => f(),
181                // Panic is intentional: this is a logic error. The OnceLock
182                // guarantees single-init, so reaching None means the internal
183                // invariant was violated (a bug in the LazyLock implementation).
184                None => panic!("LazyLock initialization function called twice"),
185            }
186        })
187    }
188}
189
190impl<T: 'static, F: FnOnce() -> T> core::ops::Deref for LazyLock<T, F> {
191    type Target = T;
192
193    fn deref(&self) -> &Self::Target {
194        self.force()
195    }
196}
197
198// SAFETY: LazyLock<T, F> can be sent across threads if both T and F are Send.
199// The inner OnceLock handles synchronization for the value, and the init
200// function F is only accessed once (consumed via take()) so transferring
201// ownership is safe.
202unsafe impl<T: Send, F: Send> Send for LazyLock<T, F> {}
203// SAFETY: LazyLock<T, F> can be shared across threads if T: Sync and F: Send.
204// The OnceLock provides the synchronization for concurrent access to T. F must
205// be Send (not Sync) because it is consumed exactly once via the UnsafeCell;
206// the OnceLock's atomic CAS ensures only one thread executes the init closure.
207unsafe impl<T: Sync, F: Send> Sync for LazyLock<T, F> {}
208
209/// Safe global state with mutex (Rust 2024 compatible)
210pub struct GlobalState<T> {
211    inner: Mutex<Option<T>>,
212}
213
214impl<T> GlobalState<T> {
215    /// Create new uninitialized global state
216    pub const fn new() -> Self {
217        Self {
218            inner: Mutex::new(None),
219        }
220    }
221
222    /// Initialize the global state
223    pub fn init(&self, value: T) -> Result<(), T> {
224        let mut lock = self.inner.lock();
225        if lock.is_some() {
226            Err(value)
227        } else {
228            *lock = Some(value);
229            Ok(())
230        }
231    }
232
233    /// Get reference with closure
234    pub fn with<R, F: FnOnce(&T) -> R>(&self, f: F) -> Option<R> {
235        let lock = self.inner.lock();
236        lock.as_ref().map(f)
237    }
238
239    /// Get mutable reference with closure
240    pub fn with_mut<R, F: FnOnce(&mut T) -> R>(&self, f: F) -> Option<R> {
241        let mut lock = self.inner.lock();
242        lock.as_mut().map(f)
243    }
244
245    /// Try to get a reference (may fail if not initialized)
246    pub fn try_get(&self) -> Option<spin::MutexGuard<Option<T>>> {
247        let lock = self.inner.lock();
248        if lock.is_some() {
249            Some(lock)
250        } else {
251            None
252        }
253    }
254}
255
256impl<T> Default for GlobalState<T> {
257    fn default() -> Self {
258        Self::new()
259    }
260}
261
262// SAFETY: GlobalState<T> can be sent across threads if T: Send. The inner
263// spin::Mutex provides mutual exclusion, so the contained Option<T> is only
264// accessed by one thread at a time. Transferring ownership is safe when T
265// itself supports cross-thread transfer.
266unsafe impl<T: Send> Send for GlobalState<T> {}
267// SAFETY: GlobalState<T> can be shared across threads if T: Send. The
268// spin::Mutex serializes all access to the inner Option<T>, preventing data
269// races. T only needs to be Send (not Sync) because the Mutex ensures no
270// concurrent access -- each caller gets exclusive access through the lock
271// guard.
272unsafe impl<T: Send> Sync for GlobalState<T> {}
273
274#[cfg(test)]
275mod tests {
276    use alloc::string::String;
277
278    use super::*;
279
280    #[test]
281    fn test_once_lock() {
282        let lock = OnceLock::new();
283        assert!(lock.get().is_none());
284
285        assert!(lock.set(42).is_ok());
286        assert_eq!(*lock.get().unwrap(), 42);
287
288        // Second set should fail
289        assert!(lock.set(100).is_err());
290    }
291
292    #[test]
293    fn test_lazy_lock() {
294        let lazy = LazyLock::new(|| 42);
295        assert_eq!(*lazy, 42);
296    }
297
298    #[test]
299    fn test_global_state() {
300        let state = GlobalState::new();
301        assert!(state.init(String::from("hello")).is_ok());
302
303        state.with(|s| {
304            assert_eq!(s, "hello");
305        });
306
307        state.with_mut(|s| {
308            s.push_str(" world");
309        });
310
311        state.with(|s| {
312            assert_eq!(s, "hello world");
313        });
314    }
315}