⚠️ VeridianOS Kernel Documentation - This is low-level kernel code. All functions are unsafe unless explicitly marked otherwise. no_std

veridian_kernel/net/
dma_pool.rs

1//! DMA Buffer Pool for Zero-Copy Networking
2//!
3//! Provides pre-allocated DMA-capable buffers for network packet transmission
4//! and reception, enabling zero-copy operation with minimal allocation
5//! overhead.
6//!
7//! Buffers are allocated from physical frames below 4GB for 32-bit DMA
8//! compatibility. Each buffer gets one 4KB frame; the usable portion is
9//! `DMA_BUFFER_SIZE` (2048 bytes) to accommodate network MTU + headers.
10
11use alloc::vec::Vec;
12use core::sync::atomic::{AtomicU64, Ordering};
13
14use spin::Mutex;
15
16use crate::{
17    error::KernelError,
18    mm::{
19        frame_allocator::MemoryZone, phys_to_virt_addr, FrameNumber, PhysicalAddress,
20        FRAME_ALLOCATOR, FRAME_SIZE,
21    },
22};
23
24/// Standard network buffer size (1500 MTU + headers + alignment)
25pub const DMA_BUFFER_SIZE: usize = 2048;
26
27/// Maximum number of buffers in the pool
28pub const MAX_BUFFERS: usize = 512;
29
30/// Maximum physical address for 32-bit DMA compatibility (4GB)
31const DMA_PHYS_LIMIT: u64 = 0x1_0000_0000;
32
33/// DMA Buffer
34pub struct DmaBuffer {
35    /// Virtual address of the buffer
36    virt_addr: usize,
37
38    /// Physical address for DMA
39    phys_addr: PhysicalAddress,
40
41    /// Buffer size in bytes
42    size: usize,
43
44    /// Reference count for buffer ownership
45    refcount: AtomicU64,
46
47    /// Buffer index in pool
48    index: u16,
49
50    /// Frame number backing this buffer (for deallocation)
51    #[allow(dead_code)] // Needed for future pool teardown / frame reclamation
52    frame: FrameNumber,
53}
54
55impl DmaBuffer {
56    /// Create a new DMA buffer with explicit addresses
57    #[allow(dead_code)] // Used in tests for direct construction
58    fn new(virt_addr: usize, phys_addr: PhysicalAddress, size: usize, index: u16) -> Self {
59        Self {
60            virt_addr,
61            phys_addr,
62            size,
63            refcount: AtomicU64::new(0),
64            index,
65            frame: FrameNumber::new(phys_addr.as_u64() / FRAME_SIZE as u64),
66        }
67    }
68
69    /// Create a DMA buffer from an allocated physical frame.
70    ///
71    /// Converts the frame number to physical and virtual addresses using the
72    /// kernel's direct physical memory mapping.
73    pub fn from_frame(frame: FrameNumber, index: u16) -> Self {
74        let phys_addr = PhysicalAddress::new(frame.as_u64() * FRAME_SIZE as u64);
75        let virt_addr = phys_to_virt_addr(phys_addr.as_u64()) as usize;
76
77        Self {
78            virt_addr,
79            phys_addr,
80            size: DMA_BUFFER_SIZE,
81            refcount: AtomicU64::new(0),
82            index,
83            frame,
84        }
85    }
86
87    /// Get virtual address
88    pub fn virt_addr(&self) -> usize {
89        self.virt_addr
90    }
91
92    /// Get physical address for DMA
93    pub fn phys_addr(&self) -> PhysicalAddress {
94        self.phys_addr
95    }
96
97    /// Get buffer size
98    pub fn size(&self) -> usize {
99        self.size
100    }
101
102    /// Get buffer index in pool
103    pub fn index(&self) -> u16 {
104        self.index
105    }
106
107    /// Get buffer as slice
108    pub fn as_slice(&self) -> &[u8] {
109        // SAFETY: virt_addr points to a DMA buffer of exactly `size` bytes allocated
110        // during pool creation from the frame allocator. The buffer remains valid for
111        // the lifetime of the pool. We hold &self so no mutable alias exists.
112        unsafe { core::slice::from_raw_parts(self.virt_addr as *const u8, self.size) }
113    }
114
115    /// Get buffer as mutable slice
116    pub fn as_mut_slice(&mut self) -> &mut [u8] {
117        // SAFETY: virt_addr points to a DMA buffer of exactly `size` bytes allocated
118        // during pool creation from the frame allocator. We hold &mut self so no other
119        // reference to this buffer exists, making the mutable slice safe.
120        unsafe { core::slice::from_raw_parts_mut(self.virt_addr as *mut u8, self.size) }
121    }
122
123    /// Increment reference count
124    pub fn acquire(&self) -> u64 {
125        self.refcount.fetch_add(1, Ordering::Relaxed)
126    }
127
128    /// Decrement reference count
129    pub fn release(&self) -> u64 {
130        self.refcount.fetch_sub(1, Ordering::Release)
131    }
132
133    /// Check if buffer is free (refcount == 0)
134    pub fn is_free(&self) -> bool {
135        self.refcount.load(Ordering::Acquire) == 0
136    }
137}
138
139/// DMA Buffer Pool
140pub struct DmaBufferPool {
141    /// Pool of DMA buffers
142    buffers: Vec<DmaBuffer>,
143
144    /// Free list (indices of available buffers)
145    free_list: Vec<u16>,
146
147    /// Total buffers allocated
148    total_buffers: usize,
149
150    /// Statistics
151    allocations: AtomicU64,
152    deallocations: AtomicU64,
153    allocation_failures: AtomicU64,
154}
155
156impl DmaBufferPool {
157    /// Create a new DMA buffer pool with physically contiguous frames.
158    ///
159    /// Allocates `num_buffers` frames from the frame allocator for DMA use.
160    /// Frames are filtered to be below 4GB for 32-bit DMA engine compatibility.
161    /// Each frame provides one `DMA_BUFFER_SIZE` buffer. If allocation fails
162    /// for some frames, the pool is created with however many were successful.
163    pub fn new(num_buffers: usize) -> Result<Self, KernelError> {
164        if num_buffers > MAX_BUFFERS {
165            return Err(KernelError::InvalidArgument {
166                name: "num_buffers",
167                value: "exceeds_max",
168            });
169        }
170
171        let mut buffers = Vec::with_capacity(num_buffers);
172        let mut free_list = Vec::with_capacity(num_buffers);
173        let mut allocated = 0usize;
174
175        let allocator = FRAME_ALLOCATOR.lock();
176
177        for i in 0..num_buffers {
178            // Allocate from the Normal zone (16MB-MAX on 64-bit) and then filter
179            // for <4GB. The DMA zone only covers 0-16MB which is often reserved.
180            let frame = match allocator.allocate_frames_in_zone(1, None, Some(MemoryZone::Normal)) {
181                Ok(f) => f,
182                Err(_) => {
183                    // Try without zone constraint as fallback
184                    match allocator.allocate_frames(1, None) {
185                        Ok(f) => f,
186                        Err(_) => break, // No more frames available
187                    }
188                }
189            };
190
191            let phys_addr = frame.as_u64() * FRAME_SIZE as u64;
192
193            // Filter: DMA buffers must be below 4GB for 32-bit DMA engines
194            if phys_addr >= DMA_PHYS_LIMIT {
195                // Frame is above 4GB -- free it and continue trying.
196                // On systems with limited low memory this may exhaust quickly.
197                let _ = allocator.free_frames(frame, 1);
198                continue;
199            }
200
201            // Zero-initialize the buffer memory for safety
202            let virt = phys_to_virt_addr(phys_addr) as *mut u8;
203            // SAFETY: virt points to a freshly allocated frame of FRAME_SIZE bytes.
204            // The frame allocator guarantees this memory is not in use. We zero it
205            // before handing it out to prevent information leaks.
206            unsafe {
207                core::ptr::write_bytes(virt, 0, FRAME_SIZE);
208            }
209
210            let buffer = DmaBuffer::from_frame(frame, i as u16);
211            free_list.push(i as u16);
212            buffers.push(buffer);
213            allocated += 1;
214        }
215
216        drop(allocator);
217
218        if allocated == 0 && num_buffers > 0 {
219            return Err(KernelError::OutOfMemory {
220                requested: num_buffers * FRAME_SIZE,
221                available: 0,
222            });
223        }
224
225        println!(
226            "[DMA-POOL] Allocated {}/{} DMA buffers ({}KB, all below 4GB)",
227            allocated,
228            num_buffers,
229            allocated * DMA_BUFFER_SIZE / 1024,
230        );
231
232        Ok(Self {
233            buffers,
234            free_list,
235            total_buffers: allocated,
236            allocations: AtomicU64::new(0),
237            deallocations: AtomicU64::new(0),
238            allocation_failures: AtomicU64::new(0),
239        })
240    }
241
242    /// Allocate a buffer from the pool
243    pub fn alloc(&mut self) -> Result<&mut DmaBuffer, KernelError> {
244        if let Some(index) = self.free_list.pop() {
245            let buffer = &mut self.buffers[index as usize];
246            buffer.acquire();
247            self.allocations.fetch_add(1, Ordering::Relaxed);
248            Ok(buffer)
249        } else {
250            self.allocation_failures.fetch_add(1, Ordering::Relaxed);
251            Err(KernelError::ResourceExhausted {
252                resource: "dma_buffers",
253            })
254        }
255    }
256
257    /// Free a buffer back to the pool
258    pub fn free(&mut self, buffer_index: u16) -> Result<(), KernelError> {
259        if buffer_index as usize >= self.buffers.len() {
260            return Err(KernelError::InvalidArgument {
261                name: "buffer_index",
262                value: "out_of_range",
263            });
264        }
265
266        let buffer = &self.buffers[buffer_index as usize];
267        let prev_count = buffer.release();
268
269        // Only return to free list if refcount reaches 0
270        if prev_count == 1 {
271            self.free_list.push(buffer_index);
272            self.deallocations.fetch_add(1, Ordering::Relaxed);
273        }
274
275        Ok(())
276    }
277
278    /// Get number of free buffers
279    pub fn free_count(&self) -> usize {
280        self.free_list.len()
281    }
282
283    /// Get total number of buffers
284    pub fn total_count(&self) -> usize {
285        self.total_buffers
286    }
287
288    /// Get allocation statistics
289    pub fn stats(&self) -> DmaPoolStats {
290        DmaPoolStats {
291            total_buffers: self.total_buffers,
292            free_buffers: self.free_list.len(),
293            allocations: self.allocations.load(Ordering::Relaxed),
294            deallocations: self.deallocations.load(Ordering::Relaxed),
295            allocation_failures: self.allocation_failures.load(Ordering::Relaxed),
296        }
297    }
298}
299
300/// DMA Pool Statistics
301#[derive(Debug, Clone, Copy)]
302pub struct DmaPoolStats {
303    pub total_buffers: usize,
304    pub free_buffers: usize,
305    pub allocations: u64,
306    pub deallocations: u64,
307    pub allocation_failures: u64,
308}
309
310/// Global DMA buffer pool for network operations
311static NETWORK_DMA_POOL: Mutex<Option<DmaBufferPool>> = Mutex::new(None);
312
313/// Initialize the global network DMA pool
314pub fn init_network_pool(num_buffers: usize) -> Result<(), KernelError> {
315    let mut pool_lock = NETWORK_DMA_POOL.lock();
316    if pool_lock.is_some() {
317        return Ok(());
318    }
319
320    let pool = DmaBufferPool::new(num_buffers)?;
321    let stats = pool.stats();
322    *pool_lock = Some(pool);
323
324    println!(
325        "[DMA-POOL] Network pool: {} buffers, {} free",
326        stats.total_buffers, stats.free_buffers,
327    );
328    Ok(())
329}
330
331/// Execute a closure with the global network DMA pool (mutable access)
332pub fn with_network_pool<R, F: FnOnce(&mut DmaBufferPool) -> R>(f: F) -> Result<R, KernelError> {
333    let mut pool_lock = NETWORK_DMA_POOL.lock();
334    pool_lock.as_mut().map(f).ok_or(KernelError::InvalidState {
335        expected: "initialized",
336        actual: "uninitialized",
337    })
338}
339
340#[cfg(test)]
341mod tests {
342    use super::*;
343
344    #[test]
345    fn test_dma_pool_constants() {
346        assert_eq!(DMA_BUFFER_SIZE, 2048);
347        assert!(MAX_BUFFERS >= 512);
348        assert!(DMA_PHYS_LIMIT == 0x1_0000_0000);
349    }
350
351    #[test]
352    fn test_dma_pool_exceeds_max() {
353        let pool = DmaBufferPool::new(MAX_BUFFERS + 1);
354        assert!(pool.is_err());
355    }
356
357    #[test]
358    fn test_buffer_reference_counting() {
359        let buffer = DmaBuffer::new(0x1000, PhysicalAddress(0x2000), 2048, 0);
360        assert!(buffer.is_free());
361        assert_eq!(buffer.index(), 0);
362        assert_eq!(buffer.size(), 2048);
363        assert_eq!(buffer.phys_addr().as_u64(), 0x2000);
364        assert_eq!(buffer.virt_addr(), 0x1000);
365
366        buffer.acquire();
367        assert!(!buffer.is_free());
368
369        buffer.release();
370        assert!(buffer.is_free());
371    }
372
373    #[test]
374    fn test_buffer_from_frame() {
375        let frame = FrameNumber::new(0x100); // Frame 256 = physical 0x100000
376        let buffer = DmaBuffer::from_frame(frame, 5);
377
378        assert_eq!(buffer.index(), 5);
379        assert_eq!(buffer.size(), DMA_BUFFER_SIZE);
380        assert_eq!(buffer.phys_addr().as_u64(), 0x100 * FRAME_SIZE as u64);
381        assert!(buffer.is_free());
382    }
383}