1use alloc::vec::Vec;
12use core::sync::atomic::{AtomicU64, Ordering};
13
14use spin::Mutex;
15
16use crate::{
17 error::KernelError,
18 mm::{
19 frame_allocator::MemoryZone, phys_to_virt_addr, FrameNumber, PhysicalAddress,
20 FRAME_ALLOCATOR, FRAME_SIZE,
21 },
22};
23
24pub const DMA_BUFFER_SIZE: usize = 2048;
26
27pub const MAX_BUFFERS: usize = 512;
29
30const DMA_PHYS_LIMIT: u64 = 0x1_0000_0000;
32
33pub struct DmaBuffer {
35 virt_addr: usize,
37
38 phys_addr: PhysicalAddress,
40
41 size: usize,
43
44 refcount: AtomicU64,
46
47 index: u16,
49
50 #[allow(dead_code)] frame: FrameNumber,
53}
54
55impl DmaBuffer {
56 #[allow(dead_code)] fn new(virt_addr: usize, phys_addr: PhysicalAddress, size: usize, index: u16) -> Self {
59 Self {
60 virt_addr,
61 phys_addr,
62 size,
63 refcount: AtomicU64::new(0),
64 index,
65 frame: FrameNumber::new(phys_addr.as_u64() / FRAME_SIZE as u64),
66 }
67 }
68
69 pub fn from_frame(frame: FrameNumber, index: u16) -> Self {
74 let phys_addr = PhysicalAddress::new(frame.as_u64() * FRAME_SIZE as u64);
75 let virt_addr = phys_to_virt_addr(phys_addr.as_u64()) as usize;
76
77 Self {
78 virt_addr,
79 phys_addr,
80 size: DMA_BUFFER_SIZE,
81 refcount: AtomicU64::new(0),
82 index,
83 frame,
84 }
85 }
86
87 pub fn virt_addr(&self) -> usize {
89 self.virt_addr
90 }
91
92 pub fn phys_addr(&self) -> PhysicalAddress {
94 self.phys_addr
95 }
96
97 pub fn size(&self) -> usize {
99 self.size
100 }
101
102 pub fn index(&self) -> u16 {
104 self.index
105 }
106
107 pub fn as_slice(&self) -> &[u8] {
109 unsafe { core::slice::from_raw_parts(self.virt_addr as *const u8, self.size) }
113 }
114
115 pub fn as_mut_slice(&mut self) -> &mut [u8] {
117 unsafe { core::slice::from_raw_parts_mut(self.virt_addr as *mut u8, self.size) }
121 }
122
123 pub fn acquire(&self) -> u64 {
125 self.refcount.fetch_add(1, Ordering::Relaxed)
126 }
127
128 pub fn release(&self) -> u64 {
130 self.refcount.fetch_sub(1, Ordering::Release)
131 }
132
133 pub fn is_free(&self) -> bool {
135 self.refcount.load(Ordering::Acquire) == 0
136 }
137}
138
139pub struct DmaBufferPool {
141 buffers: Vec<DmaBuffer>,
143
144 free_list: Vec<u16>,
146
147 total_buffers: usize,
149
150 allocations: AtomicU64,
152 deallocations: AtomicU64,
153 allocation_failures: AtomicU64,
154}
155
156impl DmaBufferPool {
157 pub fn new(num_buffers: usize) -> Result<Self, KernelError> {
164 if num_buffers > MAX_BUFFERS {
165 return Err(KernelError::InvalidArgument {
166 name: "num_buffers",
167 value: "exceeds_max",
168 });
169 }
170
171 let mut buffers = Vec::with_capacity(num_buffers);
172 let mut free_list = Vec::with_capacity(num_buffers);
173 let mut allocated = 0usize;
174
175 let allocator = FRAME_ALLOCATOR.lock();
176
177 for i in 0..num_buffers {
178 let frame = match allocator.allocate_frames_in_zone(1, None, Some(MemoryZone::Normal)) {
181 Ok(f) => f,
182 Err(_) => {
183 match allocator.allocate_frames(1, None) {
185 Ok(f) => f,
186 Err(_) => break, }
188 }
189 };
190
191 let phys_addr = frame.as_u64() * FRAME_SIZE as u64;
192
193 if phys_addr >= DMA_PHYS_LIMIT {
195 let _ = allocator.free_frames(frame, 1);
198 continue;
199 }
200
201 let virt = phys_to_virt_addr(phys_addr) as *mut u8;
203 unsafe {
207 core::ptr::write_bytes(virt, 0, FRAME_SIZE);
208 }
209
210 let buffer = DmaBuffer::from_frame(frame, i as u16);
211 free_list.push(i as u16);
212 buffers.push(buffer);
213 allocated += 1;
214 }
215
216 drop(allocator);
217
218 if allocated == 0 && num_buffers > 0 {
219 return Err(KernelError::OutOfMemory {
220 requested: num_buffers * FRAME_SIZE,
221 available: 0,
222 });
223 }
224
225 println!(
226 "[DMA-POOL] Allocated {}/{} DMA buffers ({}KB, all below 4GB)",
227 allocated,
228 num_buffers,
229 allocated * DMA_BUFFER_SIZE / 1024,
230 );
231
232 Ok(Self {
233 buffers,
234 free_list,
235 total_buffers: allocated,
236 allocations: AtomicU64::new(0),
237 deallocations: AtomicU64::new(0),
238 allocation_failures: AtomicU64::new(0),
239 })
240 }
241
242 pub fn alloc(&mut self) -> Result<&mut DmaBuffer, KernelError> {
244 if let Some(index) = self.free_list.pop() {
245 let buffer = &mut self.buffers[index as usize];
246 buffer.acquire();
247 self.allocations.fetch_add(1, Ordering::Relaxed);
248 Ok(buffer)
249 } else {
250 self.allocation_failures.fetch_add(1, Ordering::Relaxed);
251 Err(KernelError::ResourceExhausted {
252 resource: "dma_buffers",
253 })
254 }
255 }
256
257 pub fn free(&mut self, buffer_index: u16) -> Result<(), KernelError> {
259 if buffer_index as usize >= self.buffers.len() {
260 return Err(KernelError::InvalidArgument {
261 name: "buffer_index",
262 value: "out_of_range",
263 });
264 }
265
266 let buffer = &self.buffers[buffer_index as usize];
267 let prev_count = buffer.release();
268
269 if prev_count == 1 {
271 self.free_list.push(buffer_index);
272 self.deallocations.fetch_add(1, Ordering::Relaxed);
273 }
274
275 Ok(())
276 }
277
278 pub fn free_count(&self) -> usize {
280 self.free_list.len()
281 }
282
283 pub fn total_count(&self) -> usize {
285 self.total_buffers
286 }
287
288 pub fn stats(&self) -> DmaPoolStats {
290 DmaPoolStats {
291 total_buffers: self.total_buffers,
292 free_buffers: self.free_list.len(),
293 allocations: self.allocations.load(Ordering::Relaxed),
294 deallocations: self.deallocations.load(Ordering::Relaxed),
295 allocation_failures: self.allocation_failures.load(Ordering::Relaxed),
296 }
297 }
298}
299
300#[derive(Debug, Clone, Copy)]
302pub struct DmaPoolStats {
303 pub total_buffers: usize,
304 pub free_buffers: usize,
305 pub allocations: u64,
306 pub deallocations: u64,
307 pub allocation_failures: u64,
308}
309
310static NETWORK_DMA_POOL: Mutex<Option<DmaBufferPool>> = Mutex::new(None);
312
313pub fn init_network_pool(num_buffers: usize) -> Result<(), KernelError> {
315 let mut pool_lock = NETWORK_DMA_POOL.lock();
316 if pool_lock.is_some() {
317 return Ok(());
318 }
319
320 let pool = DmaBufferPool::new(num_buffers)?;
321 let stats = pool.stats();
322 *pool_lock = Some(pool);
323
324 println!(
325 "[DMA-POOL] Network pool: {} buffers, {} free",
326 stats.total_buffers, stats.free_buffers,
327 );
328 Ok(())
329}
330
331pub fn with_network_pool<R, F: FnOnce(&mut DmaBufferPool) -> R>(f: F) -> Result<R, KernelError> {
333 let mut pool_lock = NETWORK_DMA_POOL.lock();
334 pool_lock.as_mut().map(f).ok_or(KernelError::InvalidState {
335 expected: "initialized",
336 actual: "uninitialized",
337 })
338}
339
340#[cfg(test)]
341mod tests {
342 use super::*;
343
344 #[test]
345 fn test_dma_pool_constants() {
346 assert_eq!(DMA_BUFFER_SIZE, 2048);
347 assert!(MAX_BUFFERS >= 512);
348 assert!(DMA_PHYS_LIMIT == 0x1_0000_0000);
349 }
350
351 #[test]
352 fn test_dma_pool_exceeds_max() {
353 let pool = DmaBufferPool::new(MAX_BUFFERS + 1);
354 assert!(pool.is_err());
355 }
356
357 #[test]
358 fn test_buffer_reference_counting() {
359 let buffer = DmaBuffer::new(0x1000, PhysicalAddress(0x2000), 2048, 0);
360 assert!(buffer.is_free());
361 assert_eq!(buffer.index(), 0);
362 assert_eq!(buffer.size(), 2048);
363 assert_eq!(buffer.phys_addr().as_u64(), 0x2000);
364 assert_eq!(buffer.virt_addr(), 0x1000);
365
366 buffer.acquire();
367 assert!(!buffer.is_free());
368
369 buffer.release();
370 assert!(buffer.is_free());
371 }
372
373 #[test]
374 fn test_buffer_from_frame() {
375 let frame = FrameNumber::new(0x100); let buffer = DmaBuffer::from_frame(frame, 5);
377
378 assert_eq!(buffer.index(), 5);
379 assert_eq!(buffer.size(), DMA_BUFFER_SIZE);
380 assert_eq!(buffer.phys_addr().as_u64(), 0x100 * FRAME_SIZE as u64);
381 assert!(buffer.is_free());
382 }
383}