1#![allow(dead_code)]
12use alloc::{collections::VecDeque, vec, vec::Vec};
17use core::sync::atomic::{AtomicU64, Ordering};
18
19use spin::Mutex;
20
21use crate::{
22 error::KernelError,
23 mm::{phys_to_virt_addr, FRAME_ALLOCATOR, FRAME_SIZE},
24};
25
26pub struct DmaBufferPool {
28 free_buffers: Mutex<VecDeque<DmaBuffer>>,
30 total_buffers: AtomicU64,
32 in_use: AtomicU64,
34 buffer_size: usize,
36}
37
38pub struct DmaBuffer {
40 pub physical_addr: u64,
42 pub virtual_addr: u64,
44 pub size: usize,
46}
47
48impl DmaBuffer {
49 pub fn new(size: usize) -> Result<Self, KernelError> {
55 let frames_needed = size.div_ceil(FRAME_SIZE);
57
58 let frame = FRAME_ALLOCATOR
59 .lock()
60 .allocate_frames(frames_needed, None)
61 .map_err(|_| KernelError::OutOfMemory {
62 requested: size,
63 available: 0,
64 })?;
65
66 let phys_addr = frame.as_u64() * FRAME_SIZE as u64;
67 let virt_addr = phys_to_virt_addr(phys_addr);
68
69 unsafe {
73 core::ptr::write_bytes(virt_addr as *mut u8, 0, frames_needed * FRAME_SIZE);
74 }
75
76 Ok(Self {
77 physical_addr: phys_addr,
78 virtual_addr: virt_addr,
79 size,
80 })
81 }
82
83 pub fn as_mut_slice(&mut self) -> &mut [u8] {
85 if self.virtual_addr == 0 {
86 return &mut [];
87 }
88 unsafe { core::slice::from_raw_parts_mut(self.virtual_addr as *mut u8, self.size) }
93 }
94
95 pub fn as_slice(&self) -> &[u8] {
97 if self.virtual_addr == 0 {
98 return &[];
99 }
100 unsafe { core::slice::from_raw_parts(self.virtual_addr as *const u8, self.size) }
103 }
104}
105
106impl DmaBufferPool {
107 pub fn new(buffer_size: usize, initial_count: usize) -> Self {
109 let pool = Self {
110 free_buffers: Mutex::new(VecDeque::new()),
111 total_buffers: AtomicU64::new(0),
112 in_use: AtomicU64::new(0),
113 buffer_size,
114 };
115
116 for _ in 0..initial_count {
118 if let Ok(buf) = DmaBuffer::new(buffer_size) {
119 pool.free_buffers.lock().push_back(buf);
120 pool.total_buffers.fetch_add(1, Ordering::Relaxed);
121 }
122 }
123
124 pool
125 }
126
127 pub fn alloc(&self) -> Option<DmaBuffer> {
129 let mut free = self.free_buffers.lock();
130
131 if let Some(buf) = free.pop_front() {
132 self.in_use.fetch_add(1, Ordering::Relaxed);
133 Some(buf)
134 } else {
135 drop(free);
137
138 if let Ok(buf) = DmaBuffer::new(self.buffer_size) {
139 self.total_buffers.fetch_add(1, Ordering::Relaxed);
140 self.in_use.fetch_add(1, Ordering::Relaxed);
141 Some(buf)
142 } else {
143 None
144 }
145 }
146 }
147
148 pub fn free(&self, buf: DmaBuffer) {
150 self.free_buffers.lock().push_back(buf);
151 self.in_use.fetch_sub(1, Ordering::Relaxed);
152 }
153
154 pub fn stats(&self) -> DmaPoolStats {
156 DmaPoolStats {
157 total_buffers: self.total_buffers.load(Ordering::Relaxed),
158 in_use: self.in_use.load(Ordering::Relaxed),
159 buffer_size: self.buffer_size,
160 }
161 }
162}
163
164#[derive(Debug, Clone, Copy)]
165pub struct DmaPoolStats {
166 pub total_buffers: u64,
167 pub in_use: u64,
168 pub buffer_size: usize,
169}
170
171pub struct ScatterGatherList {
173 segments: Vec<ScatterGatherSegment>,
175}
176
177#[derive(Debug, Clone)]
178pub struct ScatterGatherSegment {
179 pub physical_addr: u64,
181 pub length: usize,
183}
184
185impl ScatterGatherList {
186 pub fn new() -> Self {
188 Self {
189 segments: Vec::new(),
190 }
191 }
192
193 pub fn add_segment(&mut self, addr: u64, length: usize) {
195 self.segments.push(ScatterGatherSegment {
196 physical_addr: addr,
197 length,
198 });
199 }
200
201 pub fn total_length(&self) -> usize {
203 self.segments.iter().map(|s| s.length).sum()
204 }
205
206 pub fn segments(&self) -> &[ScatterGatherSegment] {
208 &self.segments
209 }
210
211 pub fn segment_count(&self) -> usize {
213 self.segments.len()
214 }
215
216 pub fn is_empty(&self) -> bool {
218 self.segments.is_empty()
219 }
220
221 pub fn copy_to_buffer(&self, buf: &mut [u8]) -> Result<usize, KernelError> {
226 let mut offset = 0;
227
228 for segment in &self.segments {
229 if offset + segment.length > buf.len() {
230 return Err(KernelError::OutOfMemory {
231 requested: offset + segment.length,
232 available: buf.len(),
233 });
234 }
235
236 let src_virt = phys_to_virt_addr(segment.physical_addr) as *const u8;
239
240 unsafe {
246 core::ptr::copy_nonoverlapping(
247 src_virt,
248 buf.as_mut_ptr().add(offset),
249 segment.length,
250 );
251 }
252
253 offset += segment.length;
254 }
255
256 Ok(offset)
257 }
258
259 pub fn assemble(&self) -> Result<Vec<u8>, KernelError> {
263 let total = self.total_length();
264 let mut buf = vec![0u8; total];
265 self.copy_to_buffer(&mut buf)?;
266 Ok(buf)
267 }
268}
269
270impl Default for ScatterGatherList {
271 fn default() -> Self {
272 Self::new()
273 }
274}
275
276pub struct ZeroCopySend {
281 sg_list: ScatterGatherList,
283 completion: Option<fn()>,
285}
286
287impl ZeroCopySend {
288 pub fn new() -> Self {
290 Self {
291 sg_list: ScatterGatherList::new(),
292 completion: None,
293 }
294 }
295
296 pub fn add_kernel_buffer(&mut self, phys_addr: u64, length: usize) {
298 self.sg_list.add_segment(phys_addr, length);
299 }
300
301 pub fn add_user_buffer(&mut self, user_addr: u64, length: usize) -> Result<(), KernelError> {
308 if length == 0 {
309 return Ok(());
310 }
311
312 let end_addr = user_addr
314 .checked_add(length as u64)
315 .ok_or(KernelError::InvalidAddress {
316 addr: user_addr as usize,
317 })?;
318 if !crate::mm::user_validation::is_user_addr_valid(user_addr as usize)
319 || !crate::mm::user_validation::is_user_addr_valid((end_addr - 1) as usize)
320 {
321 return Err(KernelError::InvalidAddress {
322 addr: user_addr as usize,
323 });
324 }
325
326 let page_size = FRAME_SIZE as u64;
329 let mut remaining = length;
330 let mut vaddr = user_addr;
331
332 while remaining > 0 {
333 let page_offset = vaddr & (page_size - 1);
334 let bytes_in_page = core::cmp::min(remaining, (page_size - page_offset) as usize);
335
336 if let Some(pte) = crate::mm::translate_user_address(vaddr as usize) {
338 if let Some(frame_phys) = pte.addr() {
339 let phys = frame_phys.as_u64() + page_offset;
340 self.sg_list.add_segment(phys, bytes_in_page);
341 } else {
342 return Err(KernelError::UnmappedMemory {
343 addr: vaddr as usize,
344 });
345 }
346 } else {
347 return Err(KernelError::UnmappedMemory {
348 addr: vaddr as usize,
349 });
350 }
351
352 vaddr += bytes_in_page as u64;
353 remaining -= bytes_in_page;
354 }
355
356 ZERO_COPY_STATS.record_zero_copy(length as u64);
357 Ok(())
358 }
359
360 pub fn on_complete(&mut self, callback: fn()) {
362 self.completion = Some(callback);
363 }
364
365 pub fn sg_list(&self) -> &ScatterGatherList {
367 &self.sg_list
368 }
369
370 pub fn execute(&self) -> Result<(), KernelError> {
376 if self.sg_list.is_empty() {
377 return Ok(());
378 }
379
380 let assembled = self.sg_list.assemble()?;
384 let packet = crate::net::Packet::from_bytes(&assembled);
385
386 let sent = crate::net::device::with_device_mut("eth0", |dev| dev.transmit(&packet))
388 .or_else(|| crate::net::device::with_device_mut("lo0", |dev| dev.transmit(&packet)));
389
390 match sent {
391 Some(Ok(())) => {
392 crate::net::update_stats_tx(assembled.len());
393 }
394 Some(Err(e)) => return Err(e),
395 None => {
396 ZERO_COPY_STATS.record_copy(assembled.len() as u64);
398 }
399 }
400
401 if let Some(cb) = self.completion {
403 cb();
404 }
405
406 Ok(())
407 }
408}
409
410impl Default for ZeroCopySend {
411 fn default() -> Self {
412 Self::new()
413 }
414}
415
416pub struct SendFile {
418 source_fd: u32,
420 dest_socket: u32,
422 offset: u64,
424 count: usize,
426}
427
428impl SendFile {
429 pub fn new(source_fd: u32, dest_socket: u32, offset: u64, count: usize) -> Self {
431 Self {
432 source_fd,
433 dest_socket,
434 offset,
435 count,
436 }
437 }
438
439 pub fn execute(&self) -> Result<usize, KernelError> {
446 let proc = crate::process::current_process().ok_or(KernelError::InvalidState {
447 expected: "running process",
448 actual: "no current process",
449 })?;
450 let ft = proc.file_table.lock();
451 let source_file = ft
452 .get(self.source_fd as usize)
453 .ok_or(KernelError::FsError(crate::error::FsError::NotFound))?;
454 let dest_file = ft
455 .get(self.dest_socket as usize)
456 .ok_or(KernelError::FsError(crate::error::FsError::NotFound))?;
457
458 let _ = self.offset;
460
461 if self.count >= 65536 {
463 if let Ok(transferred) = self.execute_sg(&source_file, &dest_file) {
464 ZERO_COPY_STATS.record_zero_copy(transferred as u64);
465 return Ok(transferred);
466 }
467 }
469
470 let mut transferred = 0usize;
472 let mut buf = [0u8; 4096];
473
474 while transferred < self.count {
475 let chunk = core::cmp::min(buf.len(), self.count - transferred);
476 let n = source_file.read(&mut buf[..chunk])?;
477 if n == 0 {
478 break; }
480 let written = dest_file.write(&buf[..n])?;
481 transferred += written;
482 if written == 0 {
483 break;
484 }
485 }
486
487 ZERO_COPY_STATS.record_copy(transferred as u64);
488 Ok(transferred)
489 }
490
491 fn execute_sg(
498 &self,
499 source: &crate::fs::file::File,
500 dest: &crate::fs::file::File,
501 ) -> Result<usize, KernelError> {
502 let mut sg = ScatterGatherList::new();
503 let mut dma_buffers: Vec<DmaBuffer> = Vec::new();
504 let mut total_read = 0usize;
505
506 while total_read < self.count {
508 let mut dma_buf = DmaBuffer::new(FRAME_SIZE)?;
509 let to_read = core::cmp::min(FRAME_SIZE, self.count - total_read);
510 let n = source.read(&mut dma_buf.as_mut_slice()[..to_read])?;
511 if n == 0 {
512 break; }
514
515 sg.add_segment(dma_buf.physical_addr, n);
516 total_read += n;
517 dma_buffers.push(dma_buf);
518 }
519
520 if total_read == 0 {
521 return Ok(0);
522 }
523
524 let assembled = sg.assemble()?;
526 let mut written_total = 0usize;
527 let mut write_offset = 0usize;
528
529 while write_offset < assembled.len() {
530 let n = dest.write(&assembled[write_offset..])?;
531 if n == 0 {
532 break;
533 }
534 write_offset += n;
535 written_total += n;
536 }
537
538 Ok(written_total)
543 }
544}
545
546pub struct TcpCork {
551 pending: Vec<u8>,
553 max_pending: usize,
555 socket_id: Option<usize>,
557 remote: Option<crate::net::SocketAddr>,
559}
560
561impl TcpCork {
562 pub fn new(max_pending: usize) -> Self {
564 Self {
565 pending: Vec::new(),
566 max_pending,
567 socket_id: None,
568 remote: None,
569 }
570 }
571
572 pub fn with_socket(
574 max_pending: usize,
575 socket_id: usize,
576 remote: crate::net::SocketAddr,
577 ) -> Self {
578 Self {
579 pending: Vec::new(),
580 max_pending,
581 socket_id: Some(socket_id),
582 remote: Some(remote),
583 }
584 }
585
586 pub fn write(&mut self, data: &[u8]) -> Result<(), KernelError> {
588 self.pending.extend_from_slice(data);
589
590 if self.pending.len() >= self.max_pending {
591 self.flush()?;
592 }
593
594 Ok(())
595 }
596
597 pub fn pending_len(&self) -> usize {
599 self.pending.len()
600 }
601
602 pub fn flush(&mut self) -> Result<(), KernelError> {
608 if self.pending.is_empty() {
609 return Ok(());
610 }
611
612 if let (Some(socket_id), Some(remote)) = (self.socket_id, self.remote) {
613 crate::net::tcp::transmit_data(socket_id, &self.pending, remote);
615 ZERO_COPY_STATS.record_copy(self.pending.len() as u64);
616 }
617 self.pending.clear();
620 Ok(())
621 }
622}
623
624pub struct TcpZeroCopySend {
629 sg_list: ScatterGatherList,
631 socket_id: usize,
633 remote: crate::net::SocketAddr,
635 mss: usize,
637}
638
639impl TcpZeroCopySend {
640 const DEFAULT_MSS: usize = 1460;
642
643 pub fn new(socket_id: usize, remote: crate::net::SocketAddr) -> Self {
645 Self {
646 sg_list: ScatterGatherList::new(),
647 socket_id,
648 remote,
649 mss: Self::DEFAULT_MSS,
650 }
651 }
652
653 pub fn set_mss(&mut self, mss: usize) {
655 self.mss = mss;
656 }
657
658 pub fn add_buffer(&mut self, phys_addr: u64, length: usize) {
660 self.sg_list.add_segment(phys_addr, length);
661 }
662
663 pub fn add_user_buffer(&mut self, user_addr: u64, length: usize) -> Result<(), KernelError> {
665 let mut zc_send = ZeroCopySend::new();
667 zc_send.add_user_buffer(user_addr, length)?;
668
669 for seg in zc_send.sg_list.segments() {
671 self.sg_list.add_segment(seg.physical_addr, seg.length);
672 }
673 Ok(())
674 }
675
676 pub fn execute(&self) -> Result<usize, KernelError> {
682 if self.sg_list.is_empty() {
683 return Ok(0);
684 }
685
686 let total_len = self.sg_list.total_length();
687
688 let data = self.sg_list.assemble()?;
690
691 crate::net::tcp::transmit_data(self.socket_id, &data, self.remote);
693
694 ZERO_COPY_STATS.record_zero_copy(total_len as u64);
695 Ok(total_len)
696 }
697
698 pub fn total_length(&self) -> usize {
700 self.sg_list.total_length()
701 }
702
703 pub fn segment_count(&self) -> usize {
705 self.sg_list.segment_count()
706 }
707}
708
709pub struct ZeroCopyStats {
711 pub zero_copy_bytes: AtomicU64,
713 pub copied_bytes: AtomicU64,
715 pub zero_copy_ops: AtomicU64,
717 pub copy_ops: AtomicU64,
719}
720
721impl ZeroCopyStats {
722 pub const fn new() -> Self {
723 Self {
724 zero_copy_bytes: AtomicU64::new(0),
725 copied_bytes: AtomicU64::new(0),
726 zero_copy_ops: AtomicU64::new(0),
727 copy_ops: AtomicU64::new(0),
728 }
729 }
730
731 pub fn record_zero_copy(&self, bytes: u64) {
732 self.zero_copy_bytes.fetch_add(bytes, Ordering::Relaxed);
733 self.zero_copy_ops.fetch_add(1, Ordering::Relaxed);
734 }
735
736 pub fn record_copy(&self, bytes: u64) {
737 self.copied_bytes.fetch_add(bytes, Ordering::Relaxed);
738 self.copy_ops.fetch_add(1, Ordering::Relaxed);
739 }
740
741 pub fn get_efficiency(&self) -> f64 {
742 let zc = self.zero_copy_bytes.load(Ordering::Relaxed) as f64;
743 let cp = self.copied_bytes.load(Ordering::Relaxed) as f64;
744
745 if zc + cp == 0.0 {
746 return 0.0;
747 }
748
749 (zc / (zc + cp)) * 100.0
750 }
751}
752
753impl Default for ZeroCopyStats {
754 fn default() -> Self {
755 Self::new()
756 }
757}
758
759pub(crate) static ZERO_COPY_STATS: ZeroCopyStats = ZeroCopyStats::new();
761
762#[cfg(test)]
763mod tests {
764 use super::*;
765
766 #[test]
767 fn test_dma_buffer_pool() {
768 let pool = DmaBufferPool::new(2048, 10);
769 let stats = pool.stats();
770
771 #[cfg(target_os = "none")]
775 {
776 assert_eq!(stats.total_buffers, 10);
777 assert_eq!(stats.in_use, 0);
778
779 let buf = pool.alloc();
780 assert!(buf.is_some());
781
782 let stats = pool.stats();
783 assert_eq!(stats.in_use, 1);
784 }
785 #[cfg(not(target_os = "none"))]
786 {
787 assert_eq!(stats.in_use, 0);
789 }
790 }
791
792 #[test]
793 fn test_scatter_gather() {
794 let mut sg = ScatterGatherList::new();
795 assert!(sg.is_empty());
796 sg.add_segment(0x1000, 512);
797 sg.add_segment(0x2000, 1024);
798
799 assert!(!sg.is_empty());
800 assert_eq!(sg.total_length(), 1536);
801 assert_eq!(sg.segments().len(), 2);
802 assert_eq!(sg.segment_count(), 2);
803 }
804
805 #[test]
806 fn test_zero_copy_stats() {
807 let stats = ZeroCopyStats::new();
808 stats.record_zero_copy(1000);
809 stats.record_copy(100);
810
811 let efficiency = stats.get_efficiency();
812 assert!(efficiency > 90.0); }
814
815 #[test]
816 fn test_tcp_cork_basic() {
817 let mut cork = TcpCork::new(100);
818 assert_eq!(cork.pending_len(), 0);
819
820 cork.write(b"hello").unwrap();
821 assert_eq!(cork.pending_len(), 5);
822
823 cork.flush().unwrap();
824 assert_eq!(cork.pending_len(), 0);
825 }
826
827 #[test]
828 fn test_zero_copy_send_empty() {
829 let send = ZeroCopySend::new();
830 assert!(send.sg_list().is_empty());
831 }
832}