⚠️ VeridianOS Kernel Documentation - This is low-level kernel code. All functions are unsafe unless explicitly marked otherwise. no_std

veridian_kernel/
raii.rs

1//! RAII (Resource Acquisition Is Initialization) patterns for kernel resources
2//!
3//! This module provides RAII wrappers for various kernel resources to ensure
4//! proper cleanup when resources go out of scope.
5
6use core::{
7    mem::ManuallyDrop,
8    ops::{Deref, DerefMut},
9};
10
11#[cfg(feature = "alloc")]
12extern crate alloc;
13
14#[cfg(feature = "alloc")]
15use alloc::sync::Arc;
16#[cfg(feature = "alloc")]
17use alloc::vec::Vec;
18
19// Stub Vec for no-alloc builds
20#[cfg(not(feature = "alloc"))]
21struct Vec<T> {
22    _phantom: core::marker::PhantomData<T>,
23}
24
25#[cfg(not(feature = "alloc"))]
26impl<T> Vec<T> {
27    fn len(&self) -> usize {
28        0
29    }
30    fn clone(&self) -> Self {
31        Self {
32            _phantom: core::marker::PhantomData,
33        }
34    }
35}
36
37use spin::{Mutex, MutexGuard};
38
39#[allow(unused_imports)]
40use crate::{
41    cap::{CapabilityId, CapabilitySpace},
42    mm::{frame_allocator::FrameAllocator, PhysicalFrame},
43    println,
44    process::ProcessId,
45};
46
47/// RAII wrapper for physical frames
48///
49/// Automatically returns frames to the allocator when dropped
50pub struct FrameGuard {
51    frame: PhysicalFrame,
52    allocator: &'static FrameAllocator,
53}
54
55impl FrameGuard {
56    /// Create a new frame guard
57    pub fn new(frame: PhysicalFrame, allocator: &'static FrameAllocator) -> Self {
58        Self { frame, allocator }
59    }
60
61    /// Get the physical frame address
62    pub fn addr(&self) -> usize {
63        self.frame.addr()
64    }
65
66    /// Release ownership of the frame without deallocating
67    pub fn leak(self) -> PhysicalFrame {
68        let frame = self.frame;
69        core::mem::forget(self);
70        frame
71    }
72}
73
74impl Drop for FrameGuard {
75    fn drop(&mut self) {
76        // Return the frame to the allocator
77        // SAFETY: The frame was allocated from this allocator when the
78        // FrameGuard was created. The guard has exclusive ownership, so
79        // the frame has not been freed elsewhere. free_frame returns
80        // the frame to the allocator's free pool.
81        unsafe {
82            self.allocator.free_frame(self.frame);
83        }
84        println!("[RAII] Released frame at {:#x}", self.frame.addr());
85    }
86}
87
88impl Deref for FrameGuard {
89    type Target = PhysicalFrame;
90
91    fn deref(&self) -> &Self::Target {
92        &self.frame
93    }
94}
95
96/// RAII wrapper for multiple frames
97pub struct FramesGuard {
98    frames: Vec<PhysicalFrame>,
99    #[allow(dead_code)] // Cached count for diagnostics/debugging
100    count: usize,
101    allocator: &'static FrameAllocator,
102}
103
104impl FramesGuard {
105    /// Create a new frames guard
106    pub fn new(frames: Vec<PhysicalFrame>, allocator: &'static FrameAllocator) -> Self {
107        let count = frames.len();
108        Self {
109            frames,
110            count,
111            allocator,
112        }
113    }
114
115    /// Release ownership of the frames without deallocating
116    pub fn leak(self) -> Vec<PhysicalFrame> {
117        let frames = self.frames.clone();
118        core::mem::forget(self);
119        frames
120    }
121}
122
123impl Drop for FramesGuard {
124    fn drop(&mut self) {
125        // Return all frames to the allocator
126        for frame in &self.frames {
127            // SAFETY: Each frame was allocated from this allocator when the
128            // FramesGuard was created. The guard has exclusive ownership of
129            // all frames. free_frame returns each frame to the free pool.
130            unsafe {
131                self.allocator.free_frame(*frame);
132            }
133        }
134        println!("[RAII] Released {} frames", self.count);
135    }
136}
137
138/// RAII wrapper for mapped memory regions
139pub struct MappedRegion {
140    virt_addr: usize,
141    size: usize,
142    process_id: ProcessId,
143}
144
145impl MappedRegion {
146    /// Create a new mapped region guard
147    pub fn new(virt_addr: usize, size: usize, process_id: ProcessId) -> Self {
148        Self {
149            virt_addr,
150            size,
151            process_id,
152        }
153    }
154
155    /// Get the virtual address
156    pub fn addr(&self) -> usize {
157        self.virt_addr
158    }
159
160    /// Get the size
161    pub fn size(&self) -> usize {
162        self.size
163    }
164}
165
166impl Drop for MappedRegion {
167    fn drop(&mut self) {
168        // Unmap the region from the process's address space
169        if let Some(process) = crate::process::find_process(self.process_id) {
170            let memory_space = process.memory_space.lock();
171            if let Err(_e) = memory_space.unmap(self.virt_addr, self.size) {
172                println!(
173                    "[RAII] Warning: Failed to unmap region at {:#x}: {:?}",
174                    self.virt_addr, _e
175                );
176            } else {
177                println!(
178                    "[RAII] Unmapped region at {:#x} (size: {:#x})",
179                    self.virt_addr, self.size
180                );
181            }
182        }
183    }
184}
185
186/// RAII wrapper for capability space operations
187pub struct CapabilityGuard {
188    cap_id: CapabilityId,
189    space: Arc<Mutex<CapabilitySpace>>,
190}
191
192impl CapabilityGuard {
193    /// Create a new capability guard
194    pub fn new(cap_id: CapabilityId, space: Arc<Mutex<CapabilitySpace>>) -> Self {
195        Self { cap_id, space }
196    }
197
198    /// Get the capability ID
199    pub fn id(&self) -> CapabilityId {
200        self.cap_id
201    }
202
203    /// Release ownership without revoking
204    pub fn leak(self) -> CapabilityId {
205        let id = self.cap_id;
206        core::mem::forget(self);
207        id
208    }
209}
210
211impl Drop for CapabilityGuard {
212    fn drop(&mut self) {
213        // Revoke the capability
214        let mut space = self.space.lock();
215        if let Err(_e) = space.revoke(self.cap_id) {
216            println!(
217                "[RAII] Warning: Failed to revoke capability {}: {:?}",
218                self.cap_id, _e
219            );
220        } else {
221            println!("[RAII] Revoked capability {}", self.cap_id);
222        }
223    }
224}
225
226/// RAII wrapper for process resources
227///
228/// Ensures all process resources are cleaned up when the process exits
229#[cfg(feature = "alloc")]
230pub struct ProcessResources {
231    pid: ProcessId,
232    // We use ManuallyDrop to control the order of cleanup
233    threads: ManuallyDrop<Vec<crate::process::ThreadId>>,
234    capabilities: ManuallyDrop<Arc<Mutex<CapabilitySpace>>>,
235    memory_space: ManuallyDrop<Arc<Mutex<crate::mm::VirtualAddressSpace>>>,
236}
237
238#[cfg(feature = "alloc")]
239impl ProcessResources {
240    /// Create a new process resources guard
241    pub fn new(
242        pid: ProcessId,
243        threads: Vec<crate::process::ThreadId>,
244        capabilities: Arc<Mutex<CapabilitySpace>>,
245        memory_space: Arc<Mutex<crate::mm::VirtualAddressSpace>>,
246    ) -> Self {
247        Self {
248            pid,
249            threads: ManuallyDrop::new(threads),
250            capabilities: ManuallyDrop::new(capabilities),
251            memory_space: ManuallyDrop::new(memory_space),
252        }
253    }
254}
255
256#[cfg(feature = "alloc")]
257impl Drop for ProcessResources {
258    fn drop(&mut self) {
259        println!("[RAII] Cleaning up resources for process {}", self.pid);
260
261        // 1. First terminate all threads
262        for &thread_id in self.threads.iter() {
263            if let Err(_e) = crate::process::terminate_thread(self.pid, thread_id) {
264                println!(
265                    "[RAII] Warning: Failed to terminate thread {:?}: {:?}",
266                    thread_id, _e
267                );
268            }
269        }
270
271        // 2. Then revoke all capabilities
272        // SAFETY: ManuallyDrop::take moves the Arc out of the ManuallyDrop
273        // wrapper. This is safe because we are in the Drop impl and will
274        // not access self.capabilities again. The Arc's refcount ensures
275        // the CapabilitySpace is valid until the last reference is dropped.
276        unsafe {
277            let capabilities = ManuallyDrop::take(&mut self.capabilities);
278            let mut cap_space = capabilities.lock();
279            cap_space.revoke_all();
280        }
281
282        // 3. Finally clean up memory space
283        // SAFETY: ManuallyDrop::take moves the Arc out of the ManuallyDrop
284        // wrapper. This is safe because we are in the Drop impl and will
285        // not access self.memory_space again. The Arc's refcount ensures
286        // the VirtualAddressSpace is valid until the last reference drops.
287        unsafe {
288            let memory_space = ManuallyDrop::take(&mut self.memory_space);
289            let mut mem_space = memory_space.lock();
290            mem_space.destroy();
291        }
292
293        println!("[RAII] Process {} resources cleaned up", self.pid);
294    }
295}
296
297/// RAII lock guard that logs acquisition and release
298pub struct TrackedMutexGuard<'a, T> {
299    guard: MutexGuard<'a, T>,
300    #[allow(dead_code)] // Used in Drop impl for logging
301    name: &'static str,
302}
303
304impl<'a, T> TrackedMutexGuard<'a, T> {
305    /// Create a new tracked mutex guard
306    pub fn new(guard: MutexGuard<'a, T>, name: &'static str) -> Self {
307        println!("[RAII] Acquired lock: {}", name);
308        Self { guard, name }
309    }
310}
311
312impl<T> Drop for TrackedMutexGuard<'_, T> {
313    fn drop(&mut self) {
314        println!("[RAII] Released lock: {}", self.name);
315    }
316}
317
318impl<T> Deref for TrackedMutexGuard<'_, T> {
319    type Target = T;
320
321    fn deref(&self) -> &Self::Target {
322        &self.guard
323    }
324}
325
326impl<T> DerefMut for TrackedMutexGuard<'_, T> {
327    fn deref_mut(&mut self) -> &mut Self::Target {
328        &mut self.guard
329    }
330}
331
332/// RAII wrapper for IPC channel cleanup
333pub struct ChannelGuard {
334    channel_id: u64,
335}
336
337impl ChannelGuard {
338    /// Create a new channel guard
339    pub fn new(channel_id: u64) -> Self {
340        Self { channel_id }
341    }
342
343    /// Get the channel ID
344    pub fn id(&self) -> u64 {
345        self.channel_id
346    }
347
348    /// Release ownership without cleanup
349    pub fn leak(self) -> u64 {
350        let id = self.channel_id;
351        core::mem::forget(self);
352        id
353    }
354}
355
356impl Drop for ChannelGuard {
357    fn drop(&mut self) {
358        // Remove from global registry
359        if let Err(_e) = crate::ipc::registry::remove_channel(self.channel_id) {
360            println!(
361                "[RAII] Warning: Failed to remove channel {}: {:?}",
362                self.channel_id, _e
363            );
364        } else {
365            println!("[RAII] Removed channel {} from registry", self.channel_id);
366        }
367    }
368}
369
370/// Macro to create RAII scope guards
371#[macro_export]
372macro_rules! defer {
373    ($e:expr) => {
374        let _guard = $crate::raii::ScopeGuard::new(|| $e);
375    };
376}
377
378/// Generic scope guard that runs cleanup code on drop
379pub struct ScopeGuard<F: FnOnce()> {
380    cleanup: Option<F>,
381}
382
383impl<F: FnOnce()> ScopeGuard<F> {
384    /// Create a new scope guard
385    pub fn new(cleanup: F) -> Self {
386        Self {
387            cleanup: Some(cleanup),
388        }
389    }
390
391    /// Cancel the cleanup
392    pub fn cancel(mut self) {
393        self.cleanup = None;
394    }
395}
396
397impl<F: FnOnce()> Drop for ScopeGuard<F> {
398    fn drop(&mut self) {
399        if let Some(cleanup) = self.cleanup.take() {
400            cleanup();
401        }
402    }
403}
404
405#[cfg(test)]
406mod tests {
407    use super::*;
408
409    #[test]
410    fn test_scope_guard() {
411        let mut cleaned = false;
412        {
413            let _guard = ScopeGuard::new(|| {
414                cleaned = true;
415            });
416        }
417        assert!(cleaned);
418    }
419
420    #[test]
421    fn test_scope_guard_cancel() {
422        let mut cleaned = false;
423        {
424            let guard = ScopeGuard::new(|| {
425                cleaned = true;
426            });
427            guard.cancel();
428        }
429        assert!(!cleaned);
430    }
431}